1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
|
/**************************************************************************
*
* Copyright 2010, VMware.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/*
* Binning code for triangles
*/
#include "util/u_math.h"
#include "util/u_memory.h"
#include "lp_perf.h"
#include "lp_setup_context.h"
#include "lp_setup_coef.h"
#include "lp_rast.h"
#include "lp_state_fs.h"
#if !defined(PIPE_ARCH_SSE)
/**
* Compute a0 for a constant-valued coefficient (GL_FLAT shading).
*/
static void constant_coef( struct lp_rast_shader_inputs *inputs,
unsigned slot,
const float value,
unsigned i )
{
inputs->a0[slot][i] = value;
inputs->dadx[slot][i] = 0.0f;
inputs->dady[slot][i] = 0.0f;
}
static void linear_coef( struct lp_rast_shader_inputs *inputs,
const struct lp_tri_info *info,
unsigned slot,
unsigned vert_attr,
unsigned i)
{
float a0 = info->v0[vert_attr][i];
float a1 = info->v1[vert_attr][i];
float a2 = info->v2[vert_attr][i];
float da01 = a0 - a1;
float da20 = a2 - a0;
float dadx = (da01 * info->dy20_ooa - info->dy01_ooa * da20);
float dady = (da20 * info->dx01_ooa - info->dx20_ooa * da01);
inputs->dadx[slot][i] = dadx;
inputs->dady[slot][i] = dady;
/* calculate a0 as the value which would be sampled for the
* fragment at (0,0), taking into account that we want to sample at
* pixel centers, in other words (0.5, 0.5).
*
* this is neat but unfortunately not a good way to do things for
* triangles with very large values of dadx or dady as it will
* result in the subtraction and re-addition from a0 of a very
* large number, which means we'll end up loosing a lot of the
* fractional bits and precision from a0. the way to fix this is
* to define a0 as the sample at a pixel center somewhere near vmin
* instead - i'll switch to this later.
*/
inputs->a0[slot][i] = a0 - (dadx * info->x0_center +
dady * info->y0_center);
}
/**
* Compute a0, dadx and dady for a perspective-corrected interpolant,
* for a triangle.
* We basically multiply the vertex value by 1/w before computing
* the plane coefficients (a0, dadx, dady).
* Later, when we compute the value at a particular fragment position we'll
* divide the interpolated value by the interpolated W at that fragment.
*/
static void perspective_coef( struct lp_rast_shader_inputs *inputs,
const struct lp_tri_info *info,
unsigned slot,
unsigned vert_attr,
unsigned i)
{
/* premultiply by 1/w (v[0][3] is always 1/w):
*/
float a0 = info->v0[vert_attr][i] * info->v0[0][3];
float a1 = info->v1[vert_attr][i] * info->v1[0][3];
float a2 = info->v2[vert_attr][i] * info->v2[0][3];
float da01 = a0 - a1;
float da20 = a2 - a0;
float dadx = da01 * info->dy20_ooa - info->dy01_ooa * da20;
float dady = da20 * info->dx01_ooa - info->dx20_ooa * da01;
inputs->dadx[slot][i] = dadx;
inputs->dady[slot][i] = dady;
inputs->a0[slot][i] = a0 - (dadx * info->x0_center +
dady * info->y0_center);
}
/**
* Special coefficient setup for gl_FragCoord.
* X and Y are trivial
* Z and W are copied from position_coef which should have already been computed.
* We could do a bit less work if we'd examine gl_FragCoord's swizzle mask.
*/
static void
setup_fragcoord_coef(struct lp_rast_shader_inputs *inputs,
const struct lp_tri_info *info,
unsigned slot,
unsigned usage_mask)
{
/*X*/
if (usage_mask & TGSI_WRITEMASK_X) {
inputs->a0[slot][0] = 0.0;
inputs->dadx[slot][0] = 1.0;
inputs->dady[slot][0] = 0.0;
}
/*Y*/
if (usage_mask & TGSI_WRITEMASK_Y) {
inputs->a0[slot][1] = 0.0;
inputs->dadx[slot][1] = 0.0;
inputs->dady[slot][1] = 1.0;
}
/*Z*/
if (usage_mask & TGSI_WRITEMASK_Z) {
linear_coef(inputs, info, slot, 0, 2);
}
/*W*/
if (usage_mask & TGSI_WRITEMASK_W) {
linear_coef(inputs, info, slot, 0, 3);
}
}
/**
* Setup the fragment input attribute with the front-facing value.
* \param frontface is the triangle front facing?
*/
static void setup_facing_coef( struct lp_rast_shader_inputs *inputs,
unsigned slot,
boolean frontface,
unsigned usage_mask)
{
/* convert TRUE to 1.0 and FALSE to -1.0 */
if (usage_mask & TGSI_WRITEMASK_X)
constant_coef( inputs, slot, 2.0f * frontface - 1.0f, 0 );
if (usage_mask & TGSI_WRITEMASK_Y)
constant_coef( inputs, slot, 0.0f, 1 ); /* wasted */
if (usage_mask & TGSI_WRITEMASK_Z)
constant_coef( inputs, slot, 0.0f, 2 ); /* wasted */
if (usage_mask & TGSI_WRITEMASK_W)
constant_coef( inputs, slot, 0.0f, 3 ); /* wasted */
}
/**
* Compute the tri->coef[] array dadx, dady, a0 values.
*/
void lp_setup_tri_coef( struct lp_setup_context *setup,
struct lp_rast_shader_inputs *inputs,
const float (*v0)[4],
const float (*v1)[4],
const float (*v2)[4],
boolean frontfacing)
{
unsigned fragcoord_usage_mask = TGSI_WRITEMASK_XYZ;
unsigned slot;
unsigned i;
struct lp_tri_info info;
float dx01 = v0[0][0] - v1[0][0];
float dy01 = v0[0][1] - v1[0][1];
float dx20 = v2[0][0] - v0[0][0];
float dy20 = v2[0][1] - v0[0][1];
float oneoverarea = 1.0f / (dx01 * dy20 - dx20 * dy01);
info.v0 = v0;
info.v1 = v1;
info.v2 = v2;
info.frontfacing = frontfacing;
info.x0_center = v0[0][0] - setup->pixel_offset;
info.y0_center = v0[0][1] - setup->pixel_offset;
info.dx01_ooa = dx01 * oneoverarea;
info.dx20_ooa = dx20 * oneoverarea;
info.dy01_ooa = dy01 * oneoverarea;
info.dy20_ooa = dy20 * oneoverarea;
/* setup interpolation for all the remaining attributes:
*/
for (slot = 0; slot < setup->fs.nr_inputs; slot++) {
unsigned vert_attr = setup->fs.input[slot].src_index;
unsigned usage_mask = setup->fs.input[slot].usage_mask;
switch (setup->fs.input[slot].interp) {
case LP_INTERP_CONSTANT:
if (setup->flatshade_first) {
for (i = 0; i < NUM_CHANNELS; i++)
if (usage_mask & (1 << i))
constant_coef(inputs, slot+1, info.v0[vert_attr][i], i);
}
else {
for (i = 0; i < NUM_CHANNELS; i++)
if (usage_mask & (1 << i))
constant_coef(inputs, slot+1, info.v2[vert_attr][i], i);
}
break;
case LP_INTERP_LINEAR:
for (i = 0; i < NUM_CHANNELS; i++)
if (usage_mask & (1 << i))
linear_coef(inputs, &info, slot+1, vert_attr, i);
break;
case LP_INTERP_PERSPECTIVE:
for (i = 0; i < NUM_CHANNELS; i++)
if (usage_mask & (1 << i))
perspective_coef(inputs, &info, slot+1, vert_attr, i);
fragcoord_usage_mask |= TGSI_WRITEMASK_W;
break;
case LP_INTERP_POSITION:
/*
* The generated pixel interpolators will pick up the coeffs from
* slot 0, so all need to ensure that the usage mask is covers all
* usages.
*/
fragcoord_usage_mask |= usage_mask;
break;
case LP_INTERP_FACING:
setup_facing_coef(inputs, slot+1, info.frontfacing, usage_mask);
break;
default:
assert(0);
}
}
/* The internal position input is in slot zero:
*/
setup_fragcoord_coef(inputs, &info, 0, fragcoord_usage_mask);
}
#else
extern void lp_setup_coef_dummy(void);
void lp_setup_coef_dummy(void)
{
}
#endif
|