Compare commits

...

70 Commits

Author SHA1 Message Date
Thomas Hellström
814cae6a45 Update drm expected version. 2006-09-28 09:34:33 +00:00
Thomas Hellström
410673f64c Driver date. 2006-09-28 09:18:45 +00:00
Keith Whitwell
a83a3cfa1c remove duplicate 2006-09-27 20:23:34 +00:00
Keith Whitwell
da89238b13 pull npot mipmap test from trunk 2006-09-27 20:18:36 +00:00
Keith Whitwell
984387b89c Fix mipmap posititioning for 2d ARB_npot textures.
Fix typo in 3D image layout for i915.
2006-09-27 19:01:12 +00:00
Thomas Hellström
93bc5e8402 Fix another single-drawable-multiple-contexts error, that shows up now that the
drm_lock_tranfer() function has been removed from drm.
The error can cause the wrong context number to be used in the lock, but
otherwise should've done no serious harm.
We can probably throw away the i915 lock_mutex now.
2006-09-27 17:17:35 +00:00
Thomas Hellström
43722cae42 Valgrind error fix. 2006-09-26 14:15:08 +00:00
Keith Whitwell
f397a2a1e5 Move get_dirty() call to after reserving space in the batchbuffer as
this may trigger a batchbuffer flush and raise new dirty state.

Add assert_not_dirty() callback required by recent changes.
2006-09-26 11:36:32 +00:00
Keith Whitwell
87a0312068 Add asserts to catch primitives being emitted with state still dirty,
or extended when there is no primitive to extend.

Turn lock/unlock macros into proper functions and add a debug flag to
print out their activity.
2006-09-26 10:44:52 +00:00
Keith Whitwell
ee525436cc Move get_dirty() call to after reserving space in the batchbuffer as
this may trigger a batchbuffer flush and raise new dirty state.
2006-09-26 10:39:42 +00:00
Keith Whitwell
520ba25dc3 Ensure that we don't wrap batchbuffers right at the beginning of a
primitive, otherwise vertices can be emitted without the full-state
preamble.  Fixes gears + texobj lockup.
2006-09-26 09:27:54 +00:00
Thomas Hellström
6f9dc91045 Small COW break on destruction optimization.
Fix a comment.
2006-09-26 09:25:47 +00:00
Thomas Hellström
d17637d47f Fix up buffer object and region refcounting
Sinlence some compilation warnings.
2006-09-26 09:00:03 +00:00
Thomas Hellström
980a25cd92 Make regions screen-based instead of context based. 2006-09-25 18:22:21 +00:00
Keith Whitwell
607c474f2d Import Brian's FBO bugfix from trunk. 2006-09-25 16:11:14 +00:00
Thomas Hellström
2f54146c1f Add some more buffer usage refcounting.
Otherwise these texture buffers will disappear under us.
2006-09-22 18:09:46 +00:00
Keith Whitwell
72aeeef7c3 Remove redundant 3d-state validation prior to blit copypixels. These
were causing some issues.
2006-09-22 16:45:52 +00:00
Keith Whitwell
dadc8e2a50 Fix a longstanding memory leak - intelObj->mt refcount was not deleted
when the texture was deleted.
2006-09-22 16:33:10 +00:00
Keith Whitwell
02d401b475 Turn some printfs into DBG's.
Fix a little mess left by indent.
2006-09-22 16:32:24 +00:00
Keith Whitwell
9d695abcac rationalize debugging flags 2006-09-22 16:05:09 +00:00
Keith Whitwell
e070007d16 Silence warning 2006-09-22 16:03:36 +00:00
Keith Whitwell
39c4c8d54b intel_batch_ioctl() lost its return type somehow 2006-09-22 16:03:06 +00:00
Keith Whitwell
4fa3cf225f Fix initialization of pfPitch - measured in pixels, not bytes. 2006-09-22 15:22:32 +00:00
Thomas Hellström
1c2c1c4560 Another obscure bug causing an infinite loop when multiple contexts are
bound to a drawable.
2006-09-22 14:52:31 +00:00
Keith Whitwell
e3904516bb bring in recent texture swizzle changes from trunk 2006-09-22 14:18:56 +00:00
Keith Whitwell
ceb222798b - Make point_smooth, polygon_smooth fallbacks per-primitive rather than total.
- Also - disable those fallbacks unless INTEL_STRICT_CONFORMANCE is set.
- Gate some FBO debugging on INTEL_DEBUG=fbo
2006-09-22 14:16:33 +00:00
Keith Whitwell
6abcf6a3d3 Fix bug detecting overlapping regions in texture copypixels.
Import code from trunk to allow blit copypixels when scissor is active.
2006-09-22 14:10:39 +00:00
Keith Whitwell
db0ed8942f Check for FragmentProgram._Enabled rather than _Active. _Active is
set when a driver is using an internally generated fragment program to
emulate fixed function behaviour.  For the software rasterizer, we're
better off using the fixed function code directly.
2006-09-22 11:43:19 +00:00
Keith Whitwell
bd9f38ccaa Flush driver, not just tnl module. 2006-09-22 11:39:31 +00:00
Keith Whitwell
b15421c22b resize buffers in MakeCurrent 2006-09-22 11:37:39 +00:00
Thomas Hellström
5db0e131ef Fix intel texture image buffer leak. 2006-09-22 10:55:05 +00:00
Michel Dänzer
b249ff8a86 Fix INTEL_DEBUG=fall. 2006-09-20 16:07:39 +00:00
Michel Dänzer
9d6e0f5d64 Revert INTEL_FIREVERTICES to flush primitives instead of asserting none pending.
The assertion was triggered in some cases, e.g. by the xscreensaver hack
stonerview.
2006-09-20 15:29:36 +00:00
Thomas Hellström
4f8549634e Fix a fencing bug. 2006-09-18 18:55:29 +00:00
Keith Whitwell
3345ab8ccf fix bzflag crashes 2006-09-18 18:11:42 +00:00
Thomas Hellström
cd3c9febda Check for NULL in intelSwapBuffers() 2006-09-18 14:18:14 +00:00
Thomas Hellström
8c58a32360 Fix fencing when submitting empty batchbuffers.
Add a proper buffer waitidle method.
2006-09-18 14:11:00 +00:00
Thomas Hellström
0d646ea3a8 Relax the requirement in pool_takedown in the intel batch buffer pool,
that all allocated buffes should've been handed back before releasing the
pool.
This is to account for the case where whe app calls DestroyScreen() without
first destroying all contexts.
2006-09-18 08:41:22 +00:00
Thomas Hellström
6a33e6d221 Wait for buffer idle unlocked before mapping in some cases.
Greatly improves responsiveness.
Add an MI_FLUSH after each batchbuffer and tell the kernel we're doing so with
the new DRM_I915_FENCE_FLAG_FLUSHED (Requires drm update).
This can be done on a per-batchbuffer basis.
The DRM handles all fence accounting and signals earlier fences that also needs
a flush.
2006-09-15 14:52:31 +00:00
Thomas Hellström
fc4bc6fc97 Adapt to updated libdrm. 2006-09-15 09:22:08 +00:00
Keith Whitwell
4239cfd534 Turn back on zcopy uploads - my timings were incorrect. 2006-09-14 14:48:34 +00:00
Keith Whitwell
b0902a4158 Silence debug, cope with calling intel_region_cow() with lock already held. 2006-09-14 14:48:09 +00:00
Keith Whitwell
269219dc05 Disable zero-copy texturing for now as it isn't such a win with the
newest memory manager code.
2006-09-14 12:11:46 +00:00
Thomas Hellström
5dbadd418c Fix a FBO render offset bug.
The current code failed if the dri drawable was updated before the call to
intelFlush(), and typically rendered into an FBO using the back buffer
cliprects.
2006-09-13 15:10:09 +00:00
Keith Whitwell
71bce51324 update driver date and branch debug 2006-09-13 12:45:42 +00:00
Keith Whitwell
308d377ca8 Bring in writemask + TEX fix from trunk 2006-09-13 12:45:21 +00:00
Thomas Hellström
4f39d22c29 texmem_0_3_branch with adaptations to the drm-ttm-0-2 branch.
Indent the i915 driver directory.
2006-09-12 14:13:36 +00:00
Keith Whitwell
8dab7963b7 Add a demo illustrating various techniques for uploading dynamic (ie
streaming) texture images via PBO's.  Two of these currently seem to
trigger a memory leak in the i915 driver, but at least one
(PBO_PINGPONG) illustrates the good speedups available with PBO's on
the i915 driver, particularly zero-copy uploads.
2006-08-30 20:41:18 +00:00
Keith Whitwell
ec30116c9f Must lock hardware around call to intelEmitCopyBlit() 2006-08-30 20:18:33 +00:00
Keith Whitwell
5ac3ad7722 implement zero-copy uploads for glTexImage from suitable pixel buffer objects 2006-08-30 19:55:32 +00:00
Keith Whitwell
fe239744aa Bring in improved version of the texdown demo from the original texmem branch 2006-08-30 09:15:40 +00:00
Keith Whitwell
137dcd4a46 remove orphan file 2006-08-29 12:11:22 +00:00
Keith Whitwell
0d7755fc73 remove i915_texprog.c as it is no longer needed 2006-08-29 11:58:23 +00:00
Keith Whitwell
c863e63549 remove hashing for bm buffers, use an opaque pointer instead of an integer 2006-08-29 11:45:13 +00:00
Keith Whitwell
62920e2ab2 revert last commit 2006-08-28 15:14:55 +00:00
Keith Whitwell
ce3885fc5f dependent texture read demo 2006-08-28 15:11:10 +00:00
Keith Whitwell
bf0c1ca618 use mesa's texenvprogram in preference to i915 version. Enable tex_env_crossbar 2006-08-28 11:50:10 +00:00
Keith Whitwell
64bc9caa1c various texture tweaks 2006-08-28 11:49:30 +00:00
Keith Whitwell
89a2ea6fd3 Add missing wait for busy buffers in bmMapBuffer() 2006-08-28 11:37:07 +00:00
Keith Whitwell
41123a85ec Fixes for i945 texture layouts 2006-08-28 11:35:20 +00:00
Keith Whitwell
f10469abe8 add dependent texture read test 2006-08-17 13:25:14 +00:00
Keith Whitwell
4a74de797c Make bgra format more explicit 2006-08-17 13:24:57 +00:00
Dave Airlie
c745394242 fix from i965 driver for compressed textures on texman 2006-08-16 03:33:00 +00:00
Keith Whitwell
f9f3de8c31 first pass at texture uploads from pbo's with the blitter 2006-08-03 15:41:10 +00:00
Keith Whitwell
3d0a073a71 trivial test for pbo-based texture uploads 2006-08-03 15:40:19 +00:00
Keith Whitwell
219ee91fa8 Get basic texturing working again on i945 2006-08-03 11:22:09 +00:00
Keith Whitwell
632eae3fec Correct pitch / cpp calculation 2006-08-03 10:31:47 +00:00
Keith Whitwell
f378bcd8bf drmOpen/Close once 2006-08-02 10:24:01 +00:00
Keith Whitwell
ded29089f3 new files from merge of texmem driver 2006-08-02 10:08:58 +00:00
Keith Whitwell
527c05eb2a Pull the texmem i915 driver onto a new branch closer to the current trunk.
Compiles but otherwise untested.
2006-08-02 10:01:03 +00:00
94 changed files with 17262 additions and 10920 deletions

View File

@@ -47,6 +47,7 @@ PROGS = \
renormal \
shadowtex \
singlebuffer \
streaming_rect \
spectex \
spriteblast \
stex3d \

View File

@@ -0,0 +1,407 @@
/*
* GL_ARB_multitexture demo
*
* Command line options:
* -info print GL implementation information
*
*
* Brian Paul November 1998 This program is in the public domain.
* Modified on 12 Feb 2002 for > 2 texture units.
*/
#define GL_GLEXT_PROTOTYPES
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <GL/glut.h>
#include "readtex.h"
enum {
WHOLERECT,
RENDER,
SWAPBUF,
UPLOAD_CONVENTIONAL,
UPLOAD_PBO_NAIVE,
UPLOAD_PBO_PINGPONG,
UPLOAD_PBO_WITH_RESET,
UPLOAD_NOOP,
QUIT
};
static GLint upload_style = UPLOAD_CONVENTIONAL;
static GLboolean whole_rect = 1;
static GLboolean do_render = 1;
static GLboolean do_swapbuffers = 1;
static GLuint col = 0x0;
static GLfloat Xrot = 20.0, Yrot = 30.0;
static GLuint Width = 1024;
static GLuint Height = 512;
#define NR_PBO 2
static GLuint texObj, DrawPBO[NR_PBO];
static void Idle( void )
{
col++;
glutPostRedisplay();
}
static int min( int a, int b ) { return a < b ? a : b; }
static void DrawObject()
{
GLint size = Width * Height * 4;
static char *static_image = NULL;
static int current = 0;
switch (upload_style) {
case UPLOAD_PBO_NAIVE:
/* Continually upload from the one pbo without any attempt to
* decouple from hardware trying to access the same data:
*/
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, DrawPBO[0]);
{
char *image = glMapBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, GL_WRITE_ONLY_ARB);
memset(image, col&0xff, size);
glUnmapBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT);
}
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA, Width, Height, 0,
GL_BGRA, GL_UNSIGNED_BYTE, NULL);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, 0);
break;
case UPLOAD_PBO_PINGPONG:
/* Cycle through the available upload buffers to avoid waiting
* on buffers currently enqueued on the hardware. Avoids
* waiting on buffer contents that may still be referenced by
* hardware.
*/
current = (current + 1) % NR_PBO;
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, DrawPBO[current]);
{
char *image = glMapBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, GL_WRITE_ONLY_ARB);
memset(image, col&0xff, size);
glUnmapBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT);
}
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA, Width, Height, 0,
GL_BGRA, GL_UNSIGNED_BYTE, NULL);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, 0);
break;
case UPLOAD_PBO_WITH_RESET:
/* Alternate approach, uses BufferDataARB(NULL) to explicitly
* release the old image contents from the pbo prior to upload.
* Similar effect to double buffering pbos as above, but a
* little more subtle as the implications of the NULL data are
* not entirely clear. I don't know if all drivers will take
* full advantage of the optimization or not.
*/
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, DrawPBO[0]);
/* XXX: This is extremely important - semantically makes the buffer
* contents undefined, but in practice means that the driver can
* release the old copy of the texture and allocate a new one
* without waiting for outstanding rendering to complete.
*/
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_EXT, size, NULL, GL_STREAM_DRAW_ARB);
{
char *image = glMapBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, GL_WRITE_ONLY_ARB);
memset(image, col&0xff, size);
glUnmapBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT);
}
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA, Width, Height, 0,
GL_BGRA, GL_UNSIGNED_BYTE, NULL);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, 0);
break;
case UPLOAD_CONVENTIONAL:
if (static_image == NULL)
static_image = malloc(size);
memset(static_image, col&0xff, size);
/* BGRA should be the fast path for regular uploads as well.
*/
glTexImage2D(GL_TEXTURE_RECTANGLE_ARB, 0, GL_RGBA, Width, Height, 0,
GL_BGRA, GL_UNSIGNED_BYTE, static_image);
break;
case UPLOAD_NOOP:
break;
}
if (do_render)
{
int x,y,w,h;
if (whole_rect) {
x = y = 0;
w = Width;
h = Height;
}
else {
x = y = 0;
w = min(10, Width);
h = min(10, Height);
}
glBegin(GL_QUADS);
glTexCoord2f( x, y);
glVertex2f( x, y );
glTexCoord2f( x, y + h);
glVertex2f( x, y + h);
glTexCoord2f( x + w, y + h);
glVertex2f( x + w, y + h );
glTexCoord2f( x + w, y);
glVertex2f( x + w, y );
glEnd();
}
}
static void Display( void )
{
static GLint T0 = 0;
static GLint Frames = 0;
GLint t;
glClear( GL_COLOR_BUFFER_BIT );
glPushMatrix();
DrawObject();
glPopMatrix();
if (do_swapbuffers)
glutSwapBuffers();
else
glFlush();
Frames++;
t = glutGet(GLUT_ELAPSED_TIME);
if (t - T0 >= 1000) {
GLfloat seconds = (t - T0) / 1000.0;
GLfloat fps = Frames / seconds;
printf("%d frames in %6.3f seconds = %6.3f FPS\n", Frames, seconds, fps);
T0 = t;
Frames = 0;
}
}
static void Reshape( int width, int height )
{
glViewport( 0, 0, width, height );
glMatrixMode( GL_PROJECTION );
glLoadIdentity();
/* glFrustum( -1.0, 1.0, -1.0, 1.0, 10.0, 100.0 ); */
gluOrtho2D( 0, width, height, 0 );
glMatrixMode( GL_MODELVIEW );
glLoadIdentity();
/* glTranslatef(0.375, 0.375, 0); */
}
static void ModeMenu(int entry)
{
switch (entry) {
case WHOLERECT:
whole_rect = !whole_rect;
break;
case RENDER:
do_render = !do_render;
break;
case SWAPBUF:
do_swapbuffers = !do_swapbuffers;
break;
case UPLOAD_CONVENTIONAL:
case UPLOAD_PBO_NAIVE:
case UPLOAD_PBO_PINGPONG:
case UPLOAD_PBO_WITH_RESET:
case UPLOAD_NOOP:
upload_style = entry;
break;
case QUIT:
exit(0);
break;
}
glutPostRedisplay();
}
static void Key( unsigned char key, int x, int y )
{
(void) x;
(void) y;
switch (key) {
case 27:
exit(0);
break;
}
glutPostRedisplay();
}
static void SpecialKey( int key, int x, int y )
{
float step = 3.0;
(void) x;
(void) y;
switch (key) {
case GLUT_KEY_UP:
Xrot += step;
break;
case GLUT_KEY_DOWN:
Xrot -= step;
break;
case GLUT_KEY_LEFT:
Yrot += step;
break;
case GLUT_KEY_RIGHT:
Yrot -= step;
break;
}
glutPostRedisplay();
}
static void Init( int argc, char *argv[] )
{
const char *exten = (const char *) glGetString(GL_EXTENSIONS);
GLint size;
if (!strstr(exten, "GL_ARB_multitexture")) {
printf("Sorry, GL_ARB_multitexture not supported by this renderer.\n");
exit(1);
}
glGetIntegerv(GL_MAX_TEXTURE_SIZE, &size);
printf("%d x %d max texture size\n", size, size);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
/* allocate two texture objects */
glGenTextures(1, &texObj);
/* setup the texture objects */
glActiveTextureARB(GL_TEXTURE0_ARB);
glBindTexture(GL_TEXTURE_RECTANGLE_ARB, texObj);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_RECTANGLE_ARB, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glGenBuffersARB(NR_PBO, DrawPBO);
{
int i;
for (i = 0; i < NR_PBO; i++) {
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, DrawPBO[i]);
glBufferDataARB(GL_PIXEL_UNPACK_BUFFER_EXT, Width * Height * 4, NULL, GL_STREAM_DRAW_ARB);
}
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, 0);
}
glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glEnable(GL_TEXTURE_RECTANGLE_ARB);
glShadeModel(GL_SMOOTH);
glClearColor(0.3, 0.3, 0.4, 1.0);
if (argc > 1 && strcmp(argv[1], "-info")==0) {
printf("GL_RENDERER = %s\n", (char *) glGetString(GL_RENDERER));
printf("GL_VERSION = %s\n", (char *) glGetString(GL_VERSION));
printf("GL_VENDOR = %s\n", (char *) glGetString(GL_VENDOR));
printf("GL_EXTENSIONS = %s\n", (char *) glGetString(GL_EXTENSIONS));
}
}
int main( int argc, char *argv[] )
{
GLint i;
glutInit( &argc, argv );
for (i = 1; i < argc; i++) {
if (strcmp(argv[i], "-w") == 0) {
Width = atoi(argv[i+1]);
if (Width <= 0) {
printf("Error, bad width\n");
exit(1);
}
i++;
}
else if (strcmp(argv[i], "-h") == 0) {
Height = atoi(argv[i+1]);
if (Height <= 0) {
printf("Error, bad height\n");
exit(1);
}
i++;
}
}
glutInitWindowSize( Width, Height );
glutInitWindowPosition( 0, 0 );
glutInitDisplayMode( GLUT_RGB | GLUT_DOUBLE );
glutCreateWindow(argv[0] );
Init( argc, argv );
glutReshapeFunc( Reshape );
glutKeyboardFunc( Key );
glutSpecialFunc( SpecialKey );
glutDisplayFunc( Display );
glutIdleFunc( Idle );
glutCreateMenu(ModeMenu);
glutAddMenuEntry("Wholerect", WHOLERECT);
glutAddMenuEntry("Render", RENDER);
glutAddMenuEntry("Swapbuf", SWAPBUF);
glutAddMenuEntry("Upload style CONVENTIONAL", UPLOAD_CONVENTIONAL);
glutAddMenuEntry("Upload style PBO_NAIVE", UPLOAD_PBO_NAIVE);
glutAddMenuEntry("Upload style PBO_PINGPONG", UPLOAD_PBO_PINGPONG);
glutAddMenuEntry("Upload style PBO_WITH_RESET", UPLOAD_PBO_WITH_RESET);
glutAddMenuEntry("Upload style NOOP", UPLOAD_NOOP);
glutAddMenuEntry("Quit", QUIT);
glutAttachMenu(GLUT_RIGHT_BUTTON);
glutMainLoop();
return 0;
}

View File

@@ -38,8 +38,8 @@
#include <GL/glut.h>
static GLsizei MaxSize = 1024;
static GLsizei TexWidth = 256, TexHeight = 256, TexBorder = 0;
static GLsizei MaxSize = 2048;
static GLsizei TexWidth = 1024, TexHeight = 1024, TexBorder = 0;
static GLboolean ScaleAndBias = GL_FALSE;
static GLboolean SubImage = GL_FALSE;
static GLdouble DownloadRate = 0.0; /* texels/sec */
@@ -47,6 +47,32 @@ static GLdouble DownloadRate = 0.0; /* texels/sec */
static GLuint Mode = 0;
/* Try and avoid L2 cache effects by cycling through a small number of
* textures.
*
* At the initial size of 1024x1024x4 == 4mbyte, say 8 textures will
* keep us out of most caches at 32mb total.
*
* This turns into a fairly interesting question of what exactly you
* expect to be in cache in normal usage, and what you think should be
* outside. There's no rules for this, no reason to favour one usage
* over another except what the application you care about happens to
* resemble most closely.
*
* - Should the client texture image be in L2 cache? Has it just been
* generated or read from disk?
* - Does the application really use >1 texture, or is it constantly
* updating one image in-place?
*
* Different answers will favour different texture upload mechanisms.
* To upload an image that is purely outside of cache, a DMA-based
* upload will probably win, whereas for small, in-cache textures,
* copying looks good.
*/
#define NR_TEXOBJ 4
static GLuint TexObj[NR_TEXOBJ];
struct FormatRec {
GLenum Format;
GLenum Type;
@@ -116,25 +142,57 @@ TypeStr(GLenum type)
}
}
/* On x86, there is a performance cliff for memcpy to texture memory
* for sources below 64 byte alignment. We do our best with this in
* the driver, but it is better if the images are correctly aligned to
* start with:
*/
#define ALIGN (1<<12)
static unsigned align(unsigned value, unsigned a)
{
return (value + a - 1) & ~(a-1);
}
static int MIN2(int a, int b)
{
return a < b ? a : b;
}
static void
MeasureDownloadRate(void)
{
const int w = TexWidth + 2 * TexBorder;
const int h = TexHeight + 2 * TexBorder;
const int bytes = w * h * BytesPerTexel(Format);
const int image_bytes = align(w * h * BytesPerTexel(Format), ALIGN);
const int bytes = image_bytes * NR_TEXOBJ;
GLubyte *orig_texImage, *orig_getImage;
GLubyte *texImage, *getImage;
GLdouble t0, t1, time;
int count;
int i;
int offset = 0;
GLdouble total = 0; /* ints will tend to overflow */
texImage = (GLubyte *) malloc(bytes);
getImage = (GLubyte *) malloc(bytes);
if (!texImage || !getImage) {
printf("allocating %d bytes for %d %dx%d images\n",
bytes, NR_TEXOBJ, w, h);
orig_texImage = (GLubyte *) malloc(bytes + ALIGN);
orig_getImage = (GLubyte *) malloc(image_bytes + ALIGN);
if (!orig_texImage || !orig_getImage) {
DownloadRate = 0.0;
return;
}
printf("alloc %p %p\n", orig_texImage, orig_getImage);
texImage = (GLubyte *)align((unsigned)orig_texImage, ALIGN);
getImage = (GLubyte *)align((unsigned)orig_getImage, ALIGN);
for (i = 1; !(((unsigned)texImage) & i); i<<=1)
;
printf("texture image alignment: %d bytes (%p)\n", i, texImage);
for (i = 0; i < bytes; i++) {
texImage[i] = i & 0xff;
}
@@ -166,16 +224,50 @@ MeasureDownloadRate(void)
count = 0;
t0 = glutGet(GLUT_ELAPSED_TIME) * 0.001;
do {
int img = count%NR_TEXOBJ;
GLubyte *img_ptr = texImage + img * image_bytes;
glBindTexture(GL_TEXTURE_2D, TexObj[img]);
if (SubImage && count > 0) {
glTexSubImage2D(GL_TEXTURE_2D, 0, -TexBorder, -TexBorder, w, h,
/* Only update a portion of the image each iteration. This
* is presumably why you'd want to use texsubimage, otherwise
* you may as well just call teximage again.
*
* A bigger question is whether to use a pointer that moves
* with each call, ie does the incoming data come from L2
* cache under normal circumstances, or is it pulled from
* uncached memory?
*
* There's a good argument to say L2 cache, ie you'd expect
* the data to have been recently generated. It's possible
* that it could have come from a file read, which may or may
* not have gone through the cpu.
*/
glTexSubImage2D(GL_TEXTURE_2D, 0,
-TexBorder,
-TexBorder + offset * h/8,
w,
h/8,
FormatTable[Format].Format,
FormatTable[Format].Type, texImage);
FormatTable[Format].Type,
#if 1
texImage /* likely in L2$ */
#else
img_ptr + offset * bytes/8 /* unlikely in L2$ */
#endif
);
offset += 1;
offset %= 8;
total += w * h / 8;
}
else {
glTexImage2D(GL_TEXTURE_2D, 0,
FormatTable[Format].IntFormat, w, h, TexBorder,
FormatTable[Format].Format,
FormatTable[Format].Type, texImage);
FormatTable[Format].Type,
img_ptr);
total += w*h;
}
/* draw a tiny polygon to force texture into texram */
@@ -192,25 +284,12 @@ MeasureDownloadRate(void)
glDisable(GL_TEXTURE_2D);
printf("w*h=%d count=%d time=%f\n", w*h, count, time);
DownloadRate = w * h * count / time;
printf("total texels=%f time=%f\n", total, time);
DownloadRate = total / time;
#if 0
if (!ScaleAndBias) {
/* verify texture readback */
glGetTexImage(GL_TEXTURE_2D, 0,
FormatTable[Format].Format,
FormatTable[Format].Type, getImage);
for (i = 0; i < w * h; i++) {
if (texImage[i] != getImage[i]) {
printf("[%d] %d != %d\n", i, texImage[i], getImage[i]);
}
}
}
#endif
free(texImage);
free(getImage);
free(orig_texImage);
free(orig_getImage);
{
GLint err = glGetError();

View File

@@ -0,0 +1,184 @@
/* Copyright (c) Mark J. Kilgard, 1994. */
/*
* (c) Copyright 1993, Silicon Graphics, Inc.
* ALL RIGHTS RESERVED
* Permission to use, copy, modify, and distribute this software for
* any purpose and without fee is hereby granted, provided that the above
* copyright notice appear in all copies and that both the copyright notice
* and this permission notice appear in supporting documentation, and that
* the name of Silicon Graphics, Inc. not be used in advertising
* or publicity pertaining to distribution of the software without specific,
* written prior permission.
*
* THE MATERIAL EMBODIED ON THIS SOFTWARE IS PROVIDED TO YOU "AS-IS"
* AND WITHOUT WARRANTY OF ANY KIND, EXPRESS, IMPLIED OR OTHERWISE,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTY OF MERCHANTABILITY OR
* FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON
* GRAPHICS, INC. BE LIABLE TO YOU OR ANYONE ELSE FOR ANY DIRECT,
* SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY
* KIND, OR ANY DAMAGES WHATSOEVER, INCLUDING WITHOUT LIMITATION,
* LOSS OF PROFIT, LOSS OF USE, SAVINGS OR REVENUE, OR THE CLAIMS OF
* THIRD PARTIES, WHETHER OR NOT SILICON GRAPHICS, INC. HAS BEEN
* ADVISED OF THE POSSIBILITY OF SUCH LOSS, HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE
* POSSESSION, USE OR PERFORMANCE OF THIS SOFTWARE.
*
* US Government Users Restricted Rights
* Use, duplication, or disclosure by the Government is subject to
* restrictions set forth in FAR 52.227.19(c)(2) or subparagraph
* (c)(1)(ii) of the Rights in Technical Data and Computer Software
* clause at DFARS 252.227-7013 and/or in similar or successor
* clauses in the FAR or the DOD or NASA FAR Supplement.
* Unpublished-- rights reserved under the copyright laws of the
* United States. Contractor/manufacturer is Silicon Graphics,
* Inc., 2011 N. Shoreline Blvd., Mountain View, CA 94039-7311.
*
* OpenGL(TM) is a trademark of Silicon Graphics, Inc.
*/
/* mipmap.c
* This program demonstrates using mipmaps for texture maps.
* To overtly show the effect of mipmaps, each mipmap reduction
* level has a solidly colored, contrasting texture image.
* Thus, the quadrilateral which is drawn is drawn with several
* different colors.
*/
#include <stdlib.h>
#include <stdio.h>
#include <GL/glut.h>
GLubyte mipmapImage32[40][46][3];
GLubyte mipmapImage16[20][23][3];
GLubyte mipmapImage8[10][11][3];
GLubyte mipmapImage4[5][5][3];
GLubyte mipmapImage2[2][2][3];
GLubyte mipmapImage1[1][1][3];
static void makeImages(void)
{
int i, j;
for (i = 0; i < 40; i++) {
for (j = 0; j < 46; j++) {
mipmapImage32[i][j][0] = 255;
mipmapImage32[i][j][1] = 255;
mipmapImage32[i][j][2] = 0;
}
}
for (i = 0; i < 20; i++) {
for (j = 0; j < 23; j++) {
mipmapImage16[i][j][0] = 255;
mipmapImage16[i][j][1] = 0;
mipmapImage16[i][j][2] = 255;
}
}
for (i = 0; i < 10; i++) {
for (j = 0; j < 11; j++) {
mipmapImage8[i][j][0] = 255;
mipmapImage8[i][j][1] = 0;
mipmapImage8[i][j][2] = 0;
}
}
for (i = 0; i < 5; i++) {
for (j = 0; j < 5; j++) {
mipmapImage4[i][j][0] = 0;
mipmapImage4[i][j][1] = 255;
mipmapImage4[i][j][2] = 0;
}
}
for (i = 0; i < 2; i++) {
for (j = 0; j < 2; j++) {
mipmapImage2[i][j][0] = 0;
mipmapImage2[i][j][1] = 0;
mipmapImage2[i][j][2] = 255;
}
}
mipmapImage1[0][0][0] = 255;
mipmapImage1[0][0][1] = 255;
mipmapImage1[0][0][2] = 255;
}
static void myinit(void)
{
if (!glutExtensionSupported("GL_ARB_texture_non_power_of_two")) {
printf("Sorry, this program requires GL_ARB_texture_non_power_of_two\n");
exit(1);
}
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glShadeModel(GL_FLAT);
glTranslatef(0.0, 0.0, -3.6);
makeImages();
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glTexImage2D(GL_TEXTURE_2D, 0, 3, 40, 46, 0,
GL_RGB, GL_UNSIGNED_BYTE, &mipmapImage32[0][0][0]);
glTexImage2D(GL_TEXTURE_2D, 1, 3, 20, 23, 0,
GL_RGB, GL_UNSIGNED_BYTE, &mipmapImage16[0][0][0]);
glTexImage2D(GL_TEXTURE_2D, 2, 3, 10, 11, 0,
GL_RGB, GL_UNSIGNED_BYTE, &mipmapImage8[0][0][0]);
glTexImage2D(GL_TEXTURE_2D, 3, 3, 5, 5, 0,
GL_RGB, GL_UNSIGNED_BYTE, &mipmapImage4[0][0][0]);
glTexImage2D(GL_TEXTURE_2D, 4, 3, 2, 2, 0,
GL_RGB, GL_UNSIGNED_BYTE, &mipmapImage2[0][0][0]);
glTexImage2D(GL_TEXTURE_2D, 5, 3, 1, 1, 0,
GL_RGB, GL_UNSIGNED_BYTE, &mipmapImage1[0][0][0]);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,
GL_NEAREST_MIPMAP_NEAREST);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL);
glEnable(GL_TEXTURE_2D);
}
static void display(void)
{
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glBegin(GL_QUADS);
glTexCoord2f(0.0, 0.0); glVertex3f(-2.0, -1.0, 0.0);
glTexCoord2f(0.0, 8.0); glVertex3f(-2.0, 1.0, 0.0);
glTexCoord2f(8.0, 8.0); glVertex3f(2000.0, 1.0, -6000.0);
glTexCoord2f(8.0, 0.0); glVertex3f(2000.0, -1.0, -6000.0);
glEnd();
glFlush();
}
static void myReshape(int w, int h)
{
glViewport(0, 0, w, h);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
gluPerspective(60.0, 1.0*(GLfloat)w/(GLfloat)h, 1.0, 30000.0);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
static void
key(unsigned char k, int x, int y)
{
switch (k) {
case 27: /* Escape */
exit(0);
break;
default:
return;
}
glutPostRedisplay();
}
int main(int argc, char** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode (GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH);
glutInitWindowSize (500, 500);
glutCreateWindow (argv[0]);
myinit();
glutReshapeFunc (myReshape);
glutDisplayFunc(display);
glutKeyboardFunc(key);
glutMainLoop();
return 0; /* ANSI C requires main to return int. */
}

View File

@@ -38,7 +38,9 @@ SOURCES = \
quad-offset-unfilled.c \
quad-unfilled.c \
quad-tex-2d.c \
quad-tex-pbo.c \
quad-tex-3d.c \
quad-tex-dep.c \
quad.c \
quads.c \
quadstrip.c \

View File

@@ -0,0 +1,245 @@
#include <assert.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#define GL_GLEXT_PROTOTYPES
#include <GL/glut.h>
GLenum doubleBuffer;
/* Demonstrate an equivalent to paletted texturing using fragment
* programs and dependent texturing. This requires at least one
* texture unit to be reserved for the palette lookup.
*/
static void Init( void )
{
static const char *modulate2D =
"!!ARBfp1.0\n"
"TEMP index; \n"
"TEX index, fragment.texcoord[0], texture[0], 2D; \n"
"TEX result.color, index, texture[1], 1D; \n"
"END"
;
GLuint modulateProg;
GLuint Texture;
if (!glutExtensionSupported("GL_ARB_fragment_program")) {
printf("Error: GL_ARB_fragment_program not supported!\n");
exit(1);
}
printf("GL_RENDERER = %s\n", (char *) glGetString(GL_RENDERER));
/* Setup the fragment program */
glGenProgramsARB(1, &modulateProg);
glBindProgramARB(GL_FRAGMENT_PROGRAM_ARB, modulateProg);
glProgramStringARB(GL_FRAGMENT_PROGRAM_ARB, GL_PROGRAM_FORMAT_ASCII_ARB,
strlen(modulate2D), (const GLubyte *)modulate2D);
printf("glGetError = 0x%x\n", (int) glGetError());
printf("glError(GL_PROGRAM_ERROR_STRING_ARB) = %s\n",
(char *) glGetString(GL_PROGRAM_ERROR_STRING_ARB));
assert(glIsProgramARB(modulateProg));
glEnable(GL_FRAGMENT_PROGRAM_ARB);
{
#define HEIGHT 8
#define WIDTH 32
#define B 0
#define G 1
#define R 2
#define A 3
static char texture[HEIGHT * WIDTH + 1] =
" "
" MMM EEEE SSS AAA "
" M M M E S S A A "
" M M M EEEE SS A A "
" M M M E SS AAAAA "
" M M E S S A A "
" M M EEEE SSS A A ";
GLubyte table[256][4];
/* load the color table for each texel-index */
memset(table, 0xff, 256*4);
table[' '][B] = 255;
table[' '][G] = 255;
table[' '][R] = 255;
table[' '][A] = 64;
table['M'][B] = 0;
table['M'][G] = 0;
table['M'][R] = 255;
table['M'][A] = 255;
table['E'][B] = 0;
table['E'][G] = 255;
table['E'][R] = 0;
table['E'][A] = 255;
table['S'][B] = 255;
table['S'][G] = 0;
table['S'][R] = 0;
table['S'][A] = 255;
table['A'][B] = 0;
table['A'][G] = 255;
table['A'][R] = 255;
table['A'][A] = 255;
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
/* Load indexed texture as a 2D I8 texture */
glActiveTextureARB(GL_TEXTURE0_ARB);
glGenTextures(1, &Texture);
glBindTexture(GL_TEXTURE_2D, Texture);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage2D(GL_TEXTURE_2D, /* target */
0, /* level */
GL_INTENSITY, /* internal format */
WIDTH, HEIGHT, /* width, height */
0, /* border */
GL_LUMINANCE, /* texture format */
GL_UNSIGNED_BYTE, /* texture type */
texture); /* the texture */
printf("glGetError = 0x%x\n", (int) glGetError());
printf("glError(GL_PROGRAM_ERROR_STRING_ARB) = %s\n",
(char *) glGetString(GL_PROGRAM_ERROR_STRING_ARB));
glEnable(GL_TEXTURE_2D);
/* Load "pallete" as a 1D BGRA8888 texture */
glActiveTextureARB(GL_TEXTURE1_ARB);
glGenTextures(1, &Texture);
glBindTexture(GL_TEXTURE_1D, Texture);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_1D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexImage1D(GL_TEXTURE_1D, /* target */
0, /* level */
GL_RGBA, /* internal format */
256, /* width */
0, /* border */
GL_BGRA, /* texture format */
GL_UNSIGNED_BYTE, /* texture type */
table); /* the texture */
glEnable(GL_TEXTURE_1D);
/* glEnable(GL_BLEND); */
/* glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA); */
#undef HEIGHT
#undef WIDTH
}
glClearColor(.3, .3, .3, 0);
}
static void Reshape(int width, int height)
{
glViewport(0, 0, (GLint)width, (GLint)height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-1.0, 1.0, -1.0, 1.0, -0.5, 1000.0);
glMatrixMode(GL_MODELVIEW);
}
static void Key(unsigned char key, int x, int y)
{
switch (key) {
case 27:
exit(1);
default:
return;
}
glutPostRedisplay();
}
static void Draw(void)
{
glClear(GL_COLOR_BUFFER_BIT);
glBegin(GL_QUADS);
glTexCoord2f(1,1);
glVertex3f( 0.9, -0.9, -30.0);
glTexCoord2f(1,0);
glVertex3f( 0.9, 0.9, -30.0);
glTexCoord2f(0,0);
glVertex3f(-0.9, 0.9, -30.0);
glTexCoord2f(0,1);
glVertex3f(-0.9, -0.9, -30.0);
glEnd();
glFlush();
if (doubleBuffer) {
glutSwapBuffers();
}
}
static GLenum Args(int argc, char **argv)
{
GLint i;
doubleBuffer = GL_FALSE;
for (i = 1; i < argc; i++) {
if (strcmp(argv[i], "-sb") == 0) {
doubleBuffer = GL_FALSE;
} else if (strcmp(argv[i], "-db") == 0) {
doubleBuffer = GL_TRUE;
} else {
fprintf(stderr, "%s (Bad option).\n", argv[i]);
return GL_FALSE;
}
}
return GL_TRUE;
}
int main(int argc, char **argv)
{
GLenum type;
glutInit(&argc, argv);
if (Args(argc, argv) == GL_FALSE) {
exit(1);
}
glutInitWindowPosition(0, 0); glutInitWindowSize( 250, 250);
type = GLUT_RGB;
type |= (doubleBuffer) ? GLUT_DOUBLE : GLUT_SINGLE;
glutInitDisplayMode(type);
if (glutCreateWindow("First Tri") == GL_FALSE) {
exit(1);
}
Init();
glutReshapeFunc(Reshape);
glutKeyboardFunc(Key);
glutDisplayFunc(Draw);
glutMainLoop();
return 0;
}

View File

@@ -0,0 +1,181 @@
/*
* Copyright (c) 1991, 1992, 1993 Silicon Graphics, Inc.
*
* Permission to use, copy, modify, distribute, and sell this software and
* its documentation for any purpose is hereby granted without fee, provided
* that (i) the above copyright notices and this permission notice appear in
* all copies of the software and related documentation, and (ii) the name of
* Silicon Graphics may not be used in any advertising or
* publicity relating to the software without the specific, prior written
* permission of Silicon Graphics.
*
* THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF
* ANY KIND,
* EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY
* WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE.
*
* IN NO EVENT SHALL SILICON GRAPHICS BE LIABLE FOR
* ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND,
* OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
* WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF
* LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*/
#define GL_GLEXT_PROTOTYPES
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <GL/glut.h>
#define CI_OFFSET_1 16
#define CI_OFFSET_2 32
GLenum doubleBuffer;
static GLuint DrawPBO;
static void Init(void)
{
fprintf(stderr, "GL_RENDERER = %s\n", (char *) glGetString(GL_RENDERER));
fprintf(stderr, "GL_VERSION = %s\n", (char *) glGetString(GL_VERSION));
fprintf(stderr, "GL_VENDOR = %s\n", (char *) glGetString(GL_VENDOR));
glClearColor(0.0, 0.0, 1.0, 0.0);
#define SIZE 16
{
GLubyte tex2d[SIZE][SIZE][4];
GLint s, t;
for (s = 0; s < SIZE; s++) {
for (t = 0; t < SIZE; t++) {
/* bgra:
*/
tex2d[t][s][0] = 0x30;
tex2d[t][s][1] = t*255/(SIZE-1);
tex2d[t][s][2] = s*255/(SIZE-1);
tex2d[t][s][3] = 0xff;
}
}
/* put image into DrawPBO */
glGenBuffersARB(1, &DrawPBO);
glBindBufferARB(GL_PIXEL_PACK_BUFFER_EXT, DrawPBO);
glBufferDataARB(GL_PIXEL_PACK_BUFFER_EXT,
SIZE * SIZE * 4, tex2d, GL_STATIC_DRAW);
glBindBufferARB(GL_PIXEL_PACK_BUFFER_EXT, 0);
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_R, GL_REPEAT);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, DrawPBO);
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, SIZE, SIZE, 0,
GL_BGRA, GL_UNSIGNED_BYTE, NULL);
glBindBufferARB(GL_PIXEL_UNPACK_BUFFER_EXT, 0);
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
glEnable(GL_TEXTURE_2D);
}
}
static void Reshape(int width, int height)
{
glViewport(0, 0, (GLint)width, (GLint)height);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glOrtho(-1.0, 1.0, -1.0, 1.0, -0.5, 1000.0);
glMatrixMode(GL_MODELVIEW);
}
static void Key(unsigned char key, int x, int y)
{
switch (key) {
case 27:
exit(1);
default:
return;
}
glutPostRedisplay();
}
static void Draw(void)
{
glClear(GL_COLOR_BUFFER_BIT);
glBegin(GL_QUADS);
glTexCoord2f(1,0);
glVertex3f( 0.9, -0.9, -30.0);
glTexCoord2f(1,1);
glVertex3f( 0.9, 0.9, -30.0);
glTexCoord2f(0,1);
glVertex3f(-0.9, 0.9, -30.0);
glTexCoord2f(0,0);
glVertex3f(-0.9, -0.9, -30.0);
glEnd();
glFlush();
if (doubleBuffer) {
glutSwapBuffers();
}
}
static GLenum Args(int argc, char **argv)
{
GLint i;
doubleBuffer = GL_FALSE;
for (i = 1; i < argc; i++) {
if (strcmp(argv[i], "-sb") == 0) {
doubleBuffer = GL_FALSE;
} else if (strcmp(argv[i], "-db") == 0) {
doubleBuffer = GL_TRUE;
} else {
fprintf(stderr, "%s (Bad option).\n", argv[i]);
return GL_FALSE;
}
}
return GL_TRUE;
}
int main(int argc, char **argv)
{
GLenum type;
glutInit(&argc, argv);
if (Args(argc, argv) == GL_FALSE) {
exit(1);
}
glutInitWindowPosition(0, 0); glutInitWindowSize( 250, 250);
type = GLUT_RGB;
type |= (doubleBuffer) ? GLUT_DOUBLE : GLUT_SINGLE;
glutInitDisplayMode(type);
if (glutCreateWindow("First Tri") == GL_FALSE) {
exit(1);
}
Init();
glutReshapeFunc(Reshape);
glutKeyboardFunc(Key);
glutDisplayFunc(Draw);
glutMainLoop();
return 0;
}

View File

@@ -722,6 +722,68 @@ static const __DRIinterfaceMethods interface_methods = {
glXGetMscRateOML,
};
#define DRM_MAX_FDS 16
static struct {
char *BusID;
int fd;
int refcount;
} connection[DRM_MAX_FDS];
static int nr_fds = 0;
int drmOpenOnce(void *unused,
const char *BusID,
int *newlyopened)
{
int i;
int fd;
for (i = 0; i < nr_fds; i++)
if (strcmp(BusID, connection[i].BusID) == 0) {
connection[i].refcount++;
*newlyopened = 0;
return connection[i].fd;
}
fd = drmOpen(unused, BusID);
if (fd <= 0 || nr_fds == DRM_MAX_FDS)
return fd;
connection[nr_fds].BusID = strdup(BusID);
connection[nr_fds].fd = fd;
connection[nr_fds].refcount = 1;
*newlyopened = 1;
fprintf(stderr, "saved connection %d for %s %d\n",
nr_fds, connection[nr_fds].BusID,
strcmp(BusID, connection[nr_fds].BusID));
nr_fds++;
return fd;
}
void drmCloseOnce(int fd)
{
int i;
for (i = 0; i < nr_fds; i++) {
if (fd == connection[i].fd) {
if (--connection[i].refcount == 0) {
drmClose(connection[i].fd);
free(connection[i].BusID);
if (i < --nr_fds)
connection[i] = connection[nr_fds];
return;
}
}
}
}
/**
* Perform the required libGL-side initialization and call the client-side
@@ -773,7 +835,8 @@ CallCreateNewScreen(Display *dpy, int scrn, __DRIscreen *psc,
framebuffer.dev_priv = NULL;
if (XF86DRIOpenConnection(dpy, scrn, &hSAREA, &BusID)) {
fd = drmOpen(NULL,BusID);
int newlyopened;
fd = drmOpenOnce(NULL,BusID, &newlyopened);
Xfree(BusID); /* No longer needed */
err_msg = "open DRM";
@@ -800,7 +863,7 @@ CallCreateNewScreen(Display *dpy, int scrn, __DRIscreen *psc,
}
err_msg = "XF86DRIAuthConnection";
if (XF86DRIAuthConnection(dpy, scrn, magic)) {
if (!newlyopened || XF86DRIAuthConnection(dpy, scrn, magic)) {
char *driverName;
/*
@@ -904,7 +967,7 @@ CallCreateNewScreen(Display *dpy, int scrn, __DRIscreen *psc,
}
if ( fd >= 0 ) {
(void)drmClose(fd);
(void)drmCloseOnce(fd);
}
(void)XF86DRICloseConnection(dpy, scrn);

View File

@@ -9,7 +9,10 @@ COMMON_SOURCES = \
../common/vblank.c \
../common/dri_util.c \
../common/xmlconfig.c \
../common/drirenderbuffer.c
../common/drirenderbuffer.c \
../common/dri_bufmgr.c \
../common/dri_drmpool.c
ifeq ($(WINDOW_SYSTEM),dri)
WINOBJ=

View File

@@ -0,0 +1,493 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellstr<74>m <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include "glthread.h"
#include "errno.h"
#include "dri_bufmgr.h"
#include "string.h"
#include "imports.h"
#include "dri_bufpool.h"
_glthread_DECLARE_STATIC_MUTEX(bmMutex);
/*
* TODO: Introduce fence pools in the same way as
* buffer object pools.
*/
typedef struct _DriFenceObject
{
int fd;
_glthread_Mutex mutex;
int refCount;
const char *name;
drmFence fence;
} DriFenceObject;
typedef struct _DriBufferObject
{
DriBufferPool *pool;
_glthread_Mutex mutex;
int refCount;
const char *name;
unsigned flags;
unsigned hint;
unsigned alignment;
void *private;
} DriBufferObject;
void
bmError(int val, const char *file, const char *function, int line)
{
_mesa_printf("Fatal video memory manager error \"%s\".\n"
"Check kernel logs or set the LIBGL_DEBUG\n"
"environment variable to \"verbose\" for more info.\n"
"Detected in file %s, line %d, function %s.\n",
strerror(-val), file, line, function);
#ifndef NDEBUG
abort();
#else
abort();
#endif
}
DriFenceObject *
driFenceBuffers(int fd, char *name, unsigned flags)
{
DriFenceObject *fence = (DriFenceObject *) malloc(sizeof(*fence));
int ret;
if (!fence)
BM_CKFATAL(-EINVAL);
_glthread_LOCK_MUTEX(bmMutex);
fence->refCount = 1;
fence->name = name;
fence->fd = fd;
_glthread_INIT_MUTEX(fence->mutex);
ret = drmFenceBuffers(fd, flags, &fence->fence);
_glthread_UNLOCK_MUTEX(bmMutex);
if (ret) {
free(fence);
BM_CKFATAL(ret);
}
return fence;
}
unsigned
driFenceType(DriFenceObject * fence)
{
unsigned ret;
_glthread_LOCK_MUTEX(bmMutex);
ret = fence->fence.flags;
_glthread_UNLOCK_MUTEX(bmMutex);
return ret;
}
DriFenceObject *
driFenceReference(DriFenceObject * fence)
{
_glthread_LOCK_MUTEX(bmMutex);
++fence->refCount;
_glthread_UNLOCK_MUTEX(bmMutex);
return fence;
}
void
driFenceUnReference(DriFenceObject * fence)
{
if (!fence)
return;
_glthread_LOCK_MUTEX(bmMutex);
if (--fence->refCount == 0) {
drmFenceDestroy(fence->fd, &fence->fence);
free(fence);
}
_glthread_UNLOCK_MUTEX(bmMutex);
}
void
driFenceFinish(DriFenceObject * fence, unsigned type, int lazy)
{
int ret;
unsigned flags = (lazy) ? DRM_FENCE_FLAG_WAIT_LAZY : 0;
_glthread_LOCK_MUTEX(fence->mutex);
ret = drmFenceWait(fence->fd, flags, &fence->fence, type);
_glthread_UNLOCK_MUTEX(fence->mutex);
BM_CKFATAL(ret);
}
int
driFenceSignaled(DriFenceObject * fence, unsigned type)
{
int signaled;
int ret;
if (fence == NULL)
return GL_TRUE;
_glthread_LOCK_MUTEX(fence->mutex);
ret = drmFenceSignaled(fence->fd, &fence->fence, type, &signaled);
_glthread_UNLOCK_MUTEX(fence->mutex);
BM_CKFATAL(ret);
return signaled;
}
extern drmBO *
driBOKernel(struct _DriBufferObject *buf)
{
drmBO *ret;
assert(buf->private != NULL);
ret = buf->pool->kernel(buf->pool, buf->private);
if (!ret)
BM_CKFATAL(-EINVAL);
return ret;
}
void
driBOWaitIdle(struct _DriBufferObject *buf, int lazy)
{
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
BM_CKFATAL(buf->pool->waitIdle(buf->pool, buf->private, lazy));
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void *
driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint)
{
void *virtual;
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
BM_CKFATAL(buf->pool->map(buf->pool, buf->private, flags, hint, &virtual));
_glthread_UNLOCK_MUTEX(buf->mutex);
return virtual;
}
void
driBOUnmap(struct _DriBufferObject *buf)
{
assert(buf->private != NULL);
buf->pool->unmap(buf->pool, buf->private);
}
unsigned long
driBOOffset(struct _DriBufferObject *buf)
{
unsigned long ret;
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
ret = buf->pool->offset(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
return ret;
}
unsigned
driBOFlags(struct _DriBufferObject *buf)
{
unsigned ret;
assert(buf->private != NULL);
_glthread_LOCK_MUTEX(buf->mutex);
ret = buf->pool->flags(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
return ret;
}
struct _DriBufferObject *
driBOReference(struct _DriBufferObject *buf)
{
_glthread_LOCK_MUTEX(bmMutex);
if (++buf->refCount == 1) {
BM_CKFATAL(-EINVAL);
}
_glthread_UNLOCK_MUTEX(bmMutex);
return buf;
}
void
driBOUnReference(struct _DriBufferObject *buf)
{
int tmp;
if (!buf)
return;
_glthread_LOCK_MUTEX(bmMutex);
tmp = --buf->refCount;
_glthread_UNLOCK_MUTEX(bmMutex);
if (!tmp) {
buf->pool->destroy(buf->pool, buf->private);
free(buf);
}
}
void
driBOData(struct _DriBufferObject *buf,
unsigned size, const void *data, unsigned flags)
{
void *virtual;
int newBuffer;
struct _DriBufferPool *pool;
_glthread_LOCK_MUTEX(buf->mutex);
pool = buf->pool;
if (!pool->create) {
_mesa_error(NULL, GL_INVALID_OPERATION,
"driBOData called on invalid buffer\n");
BM_CKFATAL(-EINVAL);
}
newBuffer = !buf->private || (pool->size(pool, buf->private) < size) ||
pool->map(pool, buf->private, DRM_BO_FLAG_WRITE,
DRM_BO_HINT_DONT_BLOCK, &virtual);
if (newBuffer) {
if (buf->private)
pool->destroy(pool, buf->private);
if (!flags)
flags = buf->flags;
buf->private = pool->create(pool, size, flags, 0, buf->alignment);
if (!buf->private)
BM_CKFATAL(-ENOMEM);
BM_CKFATAL(pool->map(pool, buf->private,
DRM_BO_FLAG_WRITE,
DRM_BO_HINT_DONT_BLOCK, &virtual));
}
if (data != NULL)
memcpy(virtual, data, size);
BM_CKFATAL(pool->unmap(pool, buf->private));
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void
driBOSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size, const void *data)
{
void *virtual;
_glthread_LOCK_MUTEX(buf->mutex);
if (size && data) {
BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
DRM_BO_FLAG_WRITE, 0, &virtual));
memcpy((unsigned char *) virtual + offset, data, size);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
}
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void
driBOGetSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size, void *data)
{
void *virtual;
_glthread_LOCK_MUTEX(buf->mutex);
if (size && data) {
BM_CKFATAL(buf->pool->map(buf->pool, buf->private,
DRM_BO_FLAG_READ, 0, &virtual));
memcpy(data, (unsigned char *) virtual + offset, size);
BM_CKFATAL(buf->pool->unmap(buf->pool, buf->private));
}
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void
driBOSetStatic(struct _DriBufferObject *buf,
unsigned long offset,
unsigned long size, void *virtual, unsigned flags)
{
_glthread_LOCK_MUTEX(buf->mutex);
if (buf->private != NULL) {
_mesa_error(NULL, GL_INVALID_OPERATION,
"Invalid buffer for setStatic\n");
BM_CKFATAL(-EINVAL);
}
if (buf->pool->setstatic == NULL) {
_mesa_error(NULL, GL_INVALID_OPERATION,
"Invalid buffer pool for setStatic\n");
BM_CKFATAL(-EINVAL);
}
if (!flags)
flags = buf->flags;
buf->private = buf->pool->setstatic(buf->pool, offset, size,
virtual, flags);
if (!buf->private) {
_mesa_error(NULL, GL_OUT_OF_MEMORY,
"Invalid buffer pool for setStatic\n");
BM_CKFATAL(-ENOMEM);
}
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void
driGenBuffers(struct _DriBufferPool *pool,
const char *name,
unsigned n,
struct _DriBufferObject *buffers[],
unsigned alignment, unsigned flags, unsigned hint)
{
struct _DriBufferObject *buf;
int i;
flags = (flags) ? flags : DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_MEM_VRAM |
DRM_BO_FLAG_MEM_LOCAL | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE;
for (i = 0; i < n; ++i) {
buf = (struct _DriBufferObject *) calloc(1, sizeof(*buf));
if (!buf)
BM_CKFATAL(-ENOMEM);
_glthread_INIT_MUTEX(buf->mutex);
_glthread_LOCK_MUTEX(buf->mutex);
_glthread_LOCK_MUTEX(bmMutex);
buf->refCount = 1;
_glthread_UNLOCK_MUTEX(bmMutex);
buf->flags = flags;
buf->hint = hint;
buf->name = name;
buf->alignment = alignment;
buf->pool = pool;
_glthread_UNLOCK_MUTEX(buf->mutex);
buffers[i] = buf;
}
}
void
driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[])
{
int i;
for (i = 0; i < n; ++i) {
driBOUnReference(buffers[i]);
}
}
void
driInitBufMgr(int fd)
{
;
}
void
driBOCreateList(int target, drmBOList * list)
{
_glthread_LOCK_MUTEX(bmMutex);
BM_CKFATAL(drmBOCreateList(20, list));
_glthread_UNLOCK_MUTEX(bmMutex);
}
void
driBOResetList(drmBOList * list)
{
_glthread_LOCK_MUTEX(bmMutex);
BM_CKFATAL(drmBOResetList(list));
_glthread_UNLOCK_MUTEX(bmMutex);
}
void
driBOAddListItem(drmBOList * list, struct _DriBufferObject *buf,
unsigned flags, unsigned mask)
{
int newItem;
_glthread_LOCK_MUTEX(buf->mutex);
_glthread_LOCK_MUTEX(bmMutex);
BM_CKFATAL(drmAddValidateItem(list, driBOKernel(buf),
flags, mask, &newItem));
_glthread_UNLOCK_MUTEX(bmMutex);
/*
* Tell userspace pools to validate the buffer. This should be a
* noop if the pool is already validated.
* FIXME: We should have a list for this as well.
*/
if (buf->pool->validate) {
BM_CKFATAL(buf->pool->validate(buf->pool, buf->private));
}
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void
driBOFence(struct _DriBufferObject *buf, struct _DriFenceObject *fence)
{
_glthread_LOCK_MUTEX(buf->mutex);
BM_CKFATAL(buf->pool->fence(buf->pool, buf->private, fence));
_glthread_UNLOCK_MUTEX(buf->mutex);
}
void
driBOValidateList(int fd, drmBOList * list)
{
_glthread_LOCK_MUTEX(bmMutex);
BM_CKFATAL(drmBOValidateList(fd, list));
_glthread_UNLOCK_MUTEX(bmMutex);
}
void
driPoolTakeDown(struct _DriBufferPool *pool)
{
pool->takeDown(pool);
}

View File

@@ -0,0 +1,99 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellstr<74>m <thomas-at-tungstengraphics-dot-com>
* Keith Whitwell <keithw-at-tungstengraphics-dot-com>
*/
#ifndef _DRI_BUFMGR_H_
#define _DRI_BUFMGR_H_
#include <xf86drm.h>
struct _DriFenceObject;
struct _DriBufferObject;
struct _DriBufferPool;
extern struct _DriFenceObject *driFenceBuffers(int fd, char *name,
unsigned flags);
extern struct _DriFenceObject *driFenceReference(struct _DriFenceObject *fence);
extern void driFenceUnReference(struct _DriFenceObject *fence);
extern void
driFenceFinish(struct _DriFenceObject *fence, unsigned type, int lazy);
extern int driFenceSignaled(struct _DriFenceObject *fence, unsigned type);
extern unsigned driFenceType(struct _DriFenceObject *fence);
/*
* Return a pointer to the libdrm buffer object this DriBufferObject
* uses.
*/
extern drmBO *driBOKernel(struct _DriBufferObject *buf);
extern void *driBOMap(struct _DriBufferObject *buf, unsigned flags,
unsigned hint);
extern void driBOUnmap(struct _DriBufferObject *buf);
extern unsigned long driBOOffset(struct _DriBufferObject *buf);
extern unsigned driBOFlags(struct _DriBufferObject *buf);
extern struct _DriBufferObject *driBOReference(struct _DriBufferObject *buf);
extern void driBOUnReference(struct _DriBufferObject *buf);
extern void driBOData(struct _DriBufferObject *r_buf,
unsigned size, const void *data, unsigned flags);
extern void driBOSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size,
const void *data);
extern void driBOGetSubData(struct _DriBufferObject *buf,
unsigned long offset, unsigned long size,
void *data);
extern void driGenBuffers(struct _DriBufferPool *pool,
const char *name,
unsigned n,
struct _DriBufferObject *buffers[],
unsigned alignment, unsigned flags, unsigned hint);
extern void driDeleteBuffers(unsigned n, struct _DriBufferObject *buffers[]);
extern void driInitBufMgr(int fd);
extern void driBOCreateList(int target, drmBOList * list);
extern void driBOResetList(drmBOList * list);
extern void driBOAddListItem(drmBOList * list, struct _DriBufferObject *buf,
unsigned flags, unsigned mask);
extern void driBOValidateList(int fd, drmBOList * list);
extern void driBOFence(struct _DriBufferObject *buf,
struct _DriFenceObject *fence);
extern void driPoolTakeDown(struct _DriBufferPool *pool);
extern void driBOSetStatic(struct _DriBufferObject *buf,
unsigned long offset,
unsigned long size, void *virtual, unsigned flags);
extern void driBOWaitIdle(struct _DriBufferObject *buf, int lazy);
extern void driPoolTakeDown(struct _DriBufferPool *pool);
#endif

View File

@@ -0,0 +1,86 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellstr<74>m <thomas-at-tungstengraphics-dot-com>
*/
#ifndef _DRI_BUFPOOL_H_
#define _DRI_BUFPOOL_H_
#include <xf86drm.h>
struct _DriFenceObject;
typedef struct _DriBufferPool
{
int fd;
int (*map) (struct _DriBufferPool * pool, void *private,
unsigned flags, int hint, void **virtual);
int (*unmap) (struct _DriBufferPool * pool, void *private);
int (*destroy) (struct _DriBufferPool * pool, void *private);
unsigned long (*offset) (struct _DriBufferPool * pool, void *private);
unsigned (*flags) (struct _DriBufferPool * pool, void *private);
unsigned long (*size) (struct _DriBufferPool * pool, void *private);
void *(*create) (struct _DriBufferPool * pool, unsigned long size,
unsigned flags, unsigned hint, unsigned alignment);
int (*fence) (struct _DriBufferPool * pool, void *private,
struct _DriFenceObject * fence);
drmBO *(*kernel) (struct _DriBufferPool * pool, void *private);
int (*validate) (struct _DriBufferPool * pool, void *private);
void *(*setstatic) (struct _DriBufferPool * pool, unsigned long offset,
unsigned long size, void *virtual, unsigned flags);
int (*waitIdle) (struct _DriBufferPool *pool, void *private,
int lazy);
void (*takeDown) (struct _DriBufferPool * pool);
void *data;
} DriBufferPool;
extern void bmError(int val, const char *file, const char *function,
int line);
#define BM_CKFATAL(val) \
do{ \
int tstVal = (val); \
if (tstVal) \
bmError(tstVal, __FILE__, __FUNCTION__, __LINE__); \
} while(0);
/*
* Builtin pools.
*/
/*
* Kernel buffer objects. Size in multiples of page size. Page size aligned.
*/
extern struct _DriBufferPool *driDRMPoolInit(int fd);
extern struct _DriBufferPool *driDRMStaticPoolInit(int fd);
#endif

View File

@@ -0,0 +1,221 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellstr<74>m <thomas-at-tungstengraphics-dot-com>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include "dri_bufpool.h"
/*
* Buffer pool implementation using DRM buffer objects as DRI buffer objects.
*/
static void *
pool_create(struct _DriBufferPool *pool,
unsigned long size, unsigned flags, unsigned hint,
unsigned alignment)
{
drmBO *buf = (drmBO *) malloc(sizeof(*buf));
int ret;
if (!buf)
return NULL;
if (alignment && ((4096 % alignment) != 0))
return NULL;
ret = drmBOCreate(pool->fd, NULL, 0, size, NULL, drm_bo_type_dc,
flags, hint, buf);
if (ret) {
free(buf);
return NULL;
}
return (void *) buf;
}
static int
pool_destroy(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
return drmBODestroy(pool->fd, buf);
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, void **virtual)
{
drmBO *buf = (drmBO *) private;
return drmBOMap(pool->fd, buf, flags, hint, virtual);
}
static int
pool_unmap(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
return drmBOUnmap(pool->fd, buf);
}
static unsigned long
pool_offset(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
return buf->offset;
}
static unsigned
pool_flags(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
return buf->flags;
}
static unsigned long
pool_size(struct _DriBufferPool *pool, void *private)
{
drmBO *buf = (drmBO *) private;
return buf->size;
}
static int
pool_fence(struct _DriBufferPool *pool, void *private,
struct _DriFenceObject *fence)
{
/*
* Noop. The kernel handles all fencing.
*/
return 0;
}
static drmBO *
pool_kernel(struct _DriBufferPool *pool, void *private)
{
return (drmBO *) private;
}
static int
pool_waitIdle(struct _DriBufferPool *pool, void *private, int lazy)
{
drmBO *buf = (drmBO *) private;
return drmBOWaitIdle(pool->fd, buf, (lazy) ? DRM_BO_HINT_WAIT_LAZY:0);
}
static void
pool_takedown(struct _DriBufferPool *pool)
{
free(pool);
}
struct _DriBufferPool *
driDRMPoolInit(int fd)
{
struct _DriBufferPool *pool;
pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
if (!pool)
return NULL;
pool->fd = fd;
pool->map = &pool_map;
pool->unmap = &pool_unmap;
pool->destroy = &pool_destroy;
pool->offset = &pool_offset;
pool->flags = &pool_flags;
pool->size = &pool_size;
pool->create = &pool_create;
pool->fence = &pool_fence;
pool->kernel = &pool_kernel;
pool->validate = NULL;
pool->setstatic = NULL;
pool->waitIdle = &pool_waitIdle;
pool->takeDown = &pool_takedown;
pool->data = NULL;
return pool;
}
static void *
pool_setstatic(struct _DriBufferPool *pool, unsigned long offset,
unsigned long size, void *virtual, unsigned flags)
{
drmBO *buf = (drmBO *) malloc(sizeof(*buf));
int ret;
if (!buf)
return NULL;
ret = drmBOCreate(pool->fd, NULL, offset, size, NULL, drm_bo_type_fake,
flags, 0, buf);
if (ret) {
free(buf);
return NULL;
}
buf->virtual = virtual;
return (void *) buf;
}
struct _DriBufferPool *
driDRMStaticPoolInit(int fd)
{
struct _DriBufferPool *pool;
pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
if (!pool)
return NULL;
pool->fd = fd;
pool->map = &pool_map;
pool->unmap = &pool_unmap;
pool->destroy = &pool_destroy;
pool->offset = &pool_offset;
pool->flags = &pool_flags;
pool->size = &pool_size;
pool->create = NULL;
pool->fence = &pool_fence;
pool->kernel = &pool_kernel;
pool->validate = NULL;
pool->setstatic = &pool_setstatic;
pool->waitIdle = &pool_waitIdle;
pool->takeDown = &pool_takedown;
pool->data = NULL;
return pool;
}

View File

@@ -403,8 +403,9 @@ __driUtilUpdateDrawableInfo(__DRIdrawablePrivate *pdp)
__DRIcontextPrivate *pcp = pdp->driContextPriv;
if (!pcp || (pdp != pcp->driDrawablePriv)) {
/* ERROR!!! */
return;
/* ERROR!!!
* ..but we must ignore it. There can be many contexts bound to a drawable.
*/
}
psp = pdp->driScreenPriv;
@@ -435,6 +436,7 @@ __driUtilUpdateDrawableInfo(__DRIdrawablePrivate *pdp)
/* Error -- eg the window may have been destroyed. Keep going
* with no cliprects.
*/
fprintf(stderr, "Drawable destroyed");
pdp->pStamp = &pdp->lastStamp; /* prevent endless loop */
pdp->numClipRects = 0;
pdp->pClipRects = NULL;
@@ -841,7 +843,7 @@ static void driDestroyScreen(__DRInativeDisplay *dpy, int scrn, void *screenPriv
(void)drmUnmap((drmAddress)psp->pSAREA, SAREA_MAX);
(void)drmUnmap((drmAddress)psp->pFB, psp->fbSize);
_mesa_free(psp->pDevPriv);
(void)drmClose(psp->fd);
(void)drmCloseOnce(psp->fd);
if ( psp->modes != NULL ) {
(*dri_interface->destroyContextModes)( psp->modes );
}

View File

@@ -87,15 +87,15 @@ typedef struct __DRIutilversionRec2 __DRIutilversion2;
#define DRI_VALIDATE_DRAWABLE_INFO(psp, pdp) \
do { \
while (*(pdp->pStamp) != pdp->lastStamp) { \
DRM_UNLOCK(psp->fd, &psp->pSAREA->lock, \
pdp->driContextPriv->hHWContext); \
register unsigned int hwContext = psp->pSAREA->lock.lock & \
~(DRM_LOCK_HELD | DRM_LOCK_CONT); \
DRM_UNLOCK(psp->fd, &psp->pSAREA->lock, hwContext); \
\
DRM_SPINLOCK(&psp->pSAREA->drawable_lock, psp->drawLockID); \
DRI_VALIDATE_DRAWABLE_INFO_ONCE(pdp); \
DRM_SPINUNLOCK(&psp->pSAREA->drawable_lock, psp->drawLockID); \
\
DRM_LIGHT_LOCK(psp->fd, &psp->pSAREA->lock, \
pdp->driContextPriv->hHWContext); \
DRM_LIGHT_LOCK(psp->fd, &psp->pSAREA->lock, hwContext); \
} \
} while (0)

View File

@@ -7,16 +7,6 @@ LIBNAME = i915_dri.so
MINIGLX_SOURCES = server/intel_dri.c
DRIVER_SOURCES = \
i915_context.c \
i915_debug.c \
i915_fragprog.c \
i915_metaops.c \
i915_program.c \
i915_state.c \
i915_tex.c \
i915_texprog.c \
i915_texstate.c \
i915_vtbl.c \
i830_context.c \
i830_metaops.c \
i830_state.c \
@@ -24,19 +14,43 @@ DRIVER_SOURCES = \
i830_tex.c \
i830_texstate.c \
i830_vtbl.c \
intel_render.c \
intel_regions.c \
intel_buffer_objects.c \
intel_batchbuffer.c \
intel_mipmap_tree.c \
i915_tex_layout.c \
intel_tex_image.c \
intel_tex_subimage.c \
intel_tex_copy.c \
intel_tex_validate.c \
intel_tex_format.c \
intel_tex.c \
intel_pixel.c \
intel_pixel_copy.c \
intel_pixel_read.c \
intel_pixel_draw.c \
intel_buffers.c \
intel_blit.c \
i915_tex.c \
i915_texstate.c \
i915_context.c \
i915_debug.c \
i915_fragprog.c \
i915_metaops.c \
i915_program.c \
i915_state.c \
i915_vtbl.c \
intel_context.c \
intel_ioctl.c \
intel_pixel.c \
intel_render.c \
intel_rotate.c \
intel_screen.c \
intel_span.c \
intel_state.c \
intel_tex.c \
intel_texmem.c \
intel_tris.c
intel_tris.c \
intel_fbo.c \
intel_depthstencil.c \
intel_batchpool.c
C_SOURCES = \
$(COMMON_SOURCES) \
$(DRIVER_SOURCES)

View File

@@ -38,37 +38,38 @@
* Mesa's Driver Functions
***************************************/
static const struct dri_extension i830_extensions[] =
{
{ "GL_ARB_texture_env_crossbar", NULL },
{ NULL, NULL }
static const struct dri_extension i830_extensions[] = {
{"GL_ARB_texture_env_crossbar", NULL},
{NULL, NULL}
};
static void i830InitDriverFunctions( struct dd_function_table *functions )
static void
i830InitDriverFunctions(struct dd_function_table *functions)
{
intelInitDriverFunctions( functions );
i830InitStateFuncs( functions );
i830InitTextureFuncs( functions );
intelInitDriverFunctions(functions);
i830InitStateFuncs(functions);
i830InitTextureFuncs(functions);
}
GLboolean i830CreateContext( const __GLcontextModes *mesaVis,
__DRIcontextPrivate *driContextPriv,
void *sharedContextPrivate)
GLboolean
i830CreateContext(const __GLcontextModes * mesaVis,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate)
{
struct dd_function_table functions;
i830ContextPtr i830 = (i830ContextPtr) CALLOC_STRUCT(i830_context);
intelContextPtr intel = &i830->intel;
struct i830_context *i830 = CALLOC_STRUCT(i830_context);
struct intel_context *intel = &i830->intel;
GLcontext *ctx = &intel->ctx;
GLuint i;
if (!i830) return GL_FALSE;
if (!i830)
return GL_FALSE;
i830InitVtbl( i830 );
i830InitDriverFunctions( &functions );
i830InitVtbl(i830);
i830InitDriverFunctions(&functions);
if (!intelInitContext( intel, mesaVis, driContextPriv,
sharedContextPrivate, &functions )) {
if (!intelInitContext(intel, mesaVis, driContextPriv,
sharedContextPrivate, &functions)) {
FREE(i830);
return GL_FALSE;
}
@@ -77,48 +78,27 @@ GLboolean i830CreateContext( const __GLcontextModes *mesaVis,
intel->ctx.Const.MaxTextureImageUnits = I830_TEX_UNITS;
intel->ctx.Const.MaxTextureCoordUnits = I830_TEX_UNITS;
intel->nr_heaps = 1;
intel->texture_heaps[0] =
driCreateTextureHeap( 0, intel,
intel->intelScreen->tex.size,
12,
I830_NR_TEX_REGIONS,
intel->sarea->texList,
(unsigned *) & intel->sarea->texAge,
& intel->swapped,
sizeof( struct i830_texture_object ),
(destroy_texture_object_t *)intelDestroyTexObj );
/* FIXME: driCalculateMaxTextureLevels assumes that mipmaps are tightly
* FIXME: packed, but they're not in Intel graphics hardware.
/* Advertise the full hardware capabilities. The new memory
* manager should cope much better with overload situations:
*/
intel->ctx.Const.MaxTextureUnits = I830_TEX_UNITS;
i = driQueryOptioni( &intel->intelScreen->optionCache, "allow_large_textures");
driCalculateMaxTextureLevels( intel->texture_heaps,
intel->nr_heaps,
&intel->ctx.Const,
4,
11, /* max 2D texture size is 2048x2048 */
8, /* max 3D texture size is 256^3 */
10, /* max CUBE texture size is 1024x1024 */
11, /* max RECT. supported */
12,
GL_FALSE,
i );
ctx->Const.MaxTextureLevels = 12;
ctx->Const.Max3DTextureLevels = 9;
ctx->Const.MaxCubeTextureLevels = 11;
ctx->Const.MaxTextureRectSize = (1 << 11);
ctx->Const.MaxTextureUnits = I830_TEX_UNITS;
_tnl_init_vertices( ctx, ctx->Const.MaxArrayLockSize + 12,
18 * sizeof(GLfloat) );
_tnl_init_vertices(ctx, ctx->Const.MaxArrayLockSize + 12,
18 * sizeof(GLfloat));
intel->verts = TNL_CONTEXT(ctx)->clipspace.vertex_buf;
driInitExtensions( ctx, i830_extensions, GL_FALSE );
driInitExtensions(ctx, i830_extensions, GL_FALSE);
i830InitState( i830 );
i830InitState(i830);
i830InitMetaFuncs(i830);
_tnl_allow_vertex_fog( ctx, 1 );
_tnl_allow_pixel_fog( ctx, 0 );
_tnl_allow_vertex_fog(ctx, 1);
_tnl_allow_pixel_fog(ctx, 0);
return GL_TRUE;
}

View File

@@ -39,6 +39,7 @@
#define I830_UPLOAD_CTX 0x1
#define I830_UPLOAD_BUFFERS 0x2
#define I830_UPLOAD_STIPPLE 0x4
#define I830_UPLOAD_INVARIENT 0x8
#define I830_UPLOAD_TEX(i) (0x10<<(i))
#define I830_UPLOAD_TEXBLEND(i) (0x100<<(i))
#define I830_UPLOAD_TEX_ALL (0x0f0)
@@ -48,17 +49,15 @@
*/
#define I830_DESTREG_CBUFADDR0 0
#define I830_DESTREG_CBUFADDR1 1
#define I830_DESTREG_CBUFADDR2 2
#define I830_DESTREG_DBUFADDR0 3
#define I830_DESTREG_DBUFADDR1 4
#define I830_DESTREG_DBUFADDR2 5
#define I830_DESTREG_DV0 6
#define I830_DESTREG_DV1 7
#define I830_DESTREG_SENABLE 8
#define I830_DESTREG_SR0 9
#define I830_DESTREG_SR1 10
#define I830_DESTREG_SR2 11
#define I830_DEST_SETUP_SIZE 12
#define I830_DESTREG_DBUFADDR0 2
#define I830_DESTREG_DBUFADDR1 3
#define I830_DESTREG_DV0 4
#define I830_DESTREG_DV1 5
#define I830_DESTREG_SENABLE 6
#define I830_DESTREG_SR0 7
#define I830_DESTREG_SR1 8
#define I830_DESTREG_SR2 9
#define I830_DEST_SETUP_SIZE 10
#define I830_CTXREG_STATE1 0
#define I830_CTXREG_STATE2 1
@@ -72,7 +71,7 @@
#define I830_CTXREG_AA 9
#define I830_CTXREG_FOGCOLOR 10
#define I830_CTXREG_BLENDCOLOR0 11
#define I830_CTXREG_BLENDCOLOR1 12
#define I830_CTXREG_BLENDCOLOR1 12
#define I830_CTXREG_VF 13
#define I830_CTXREG_VF2 14
#define I830_CTXREG_MCSB0 15
@@ -83,17 +82,16 @@
#define I830_STPREG_ST1 1
#define I830_STP_SETUP_SIZE 2
#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
#define I830_TEXREG_TM0S0 1
#define I830_TEXREG_TM0S1 2
#define I830_TEXREG_TM0S2 3
#define I830_TEXREG_TM0S3 4
#define I830_TEXREG_TM0S4 5
#define I830_TEXREG_MCS 6 /* _3DSTATE_MAP_COORD_SETS */
#define I830_TEXREG_CUBE 7 /* _3DSTATE_MAP_SUBE */
#define I830_TEX_SETUP_SIZE 8
#define I830_TEXREG_TM0LI 0 /* load immediate 2 texture map n */
#define I830_TEXREG_TM0S1 1
#define I830_TEXREG_TM0S2 2
#define I830_TEXREG_TM0S3 3
#define I830_TEXREG_TM0S4 4
#define I830_TEXREG_MCS 5 /* _3DSTATE_MAP_COORD_SETS */
#define I830_TEXREG_CUBE 6 /* _3DSTATE_MAP_SUBE */
#define I830_TEX_SETUP_SIZE 7
#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
#define I830_TEXBLEND_SIZE 12 /* (4 args + op) * 2 + COLOR_FACTOR */
struct i830_texture_object
{
@@ -103,30 +101,39 @@ struct i830_texture_object
#define I830_TEX_UNITS 4
struct i830_hw_state {
struct i830_hw_state
{
GLuint Ctx[I830_CTX_SETUP_SIZE];
GLuint Buffer[I830_DEST_SETUP_SIZE];
GLuint Stipple[I830_STP_SETUP_SIZE];
GLuint Tex[I830_TEX_UNITS][I830_TEX_SETUP_SIZE];
GLuint TexBlend[I830_TEX_UNITS][I830_TEXBLEND_SIZE];
GLuint TexBlendWordsUsed[I830_TEX_UNITS];
GLuint emitted; /* I810_UPLOAD_* */
struct intel_region *draw_region;
struct intel_region *depth_region;
/* Regions aren't actually that appropriate here as the memory may
* be from a PBO or FBO. Just use the buffer id. Will have to do
* this for draw and depth for FBO's...
*/
struct _DriBufferObject *tex_buffer[I830_TEX_UNITS];
GLuint tex_offset[I830_TEX_UNITS];
GLuint emitted; /* I810_UPLOAD_* */
GLuint active;
};
struct i830_context
struct i830_context
{
struct intel_context intel;
DECLARE_RENDERINPUTS(last_index_bitset);
GLuint lodbias_tm0s3[MAX_TEXTURE_UNITS];
DECLARE_RENDERINPUTS(last_index_bitset);
struct i830_hw_state meta, initial, state, *current;
};
typedef struct i830_context *i830ContextPtr;
typedef struct i830_texture_object *i830TextureObjectPtr;
#define I830_CONTEXT(ctx) ((i830ContextPtr)(ctx))
@@ -147,71 +154,55 @@ do { \
/* i830_vtbl.c
*/
extern void
i830InitVtbl( i830ContextPtr i830 );
extern void i830InitVtbl(struct i830_context *i830);
/* i830_context.c
*/
extern GLboolean
i830CreateContext( const __GLcontextModes *mesaVis,
__DRIcontextPrivate *driContextPriv,
void *sharedContextPrivate);
extern GLboolean
i830CreateContext(const __GLcontextModes * mesaVis,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate);
/* i830_tex.c, i830_texstate.c
*/
extern void
i830UpdateTextureState( intelContextPtr intel );
extern void i830UpdateTextureState(struct intel_context *intel);
extern void
i830InitTextureFuncs( struct dd_function_table *functions );
extern intelTextureObjectPtr
i830AllocTexObj( struct gl_texture_object *tObj );
extern void i830InitTextureFuncs(struct dd_function_table *functions);
/* i830_texblend.c
*/
extern GLuint i830SetTexEnvCombine(i830ContextPtr i830,
const struct gl_tex_env_combine_state * combine, GLint blendUnit,
GLuint texel_op, GLuint *state, const GLfloat *factor );
extern GLuint i830SetTexEnvCombine(struct i830_context *i830,
const struct gl_tex_env_combine_state
*combine, GLint blendUnit, GLuint texel_op,
GLuint * state, const GLfloat * factor);
extern void
i830EmitTextureBlend( i830ContextPtr i830 );
extern void i830EmitTextureBlend(struct i830_context *i830);
/* i830_state.c
*/
extern void
i830InitStateFuncs( struct dd_function_table *functions );
extern void i830InitStateFuncs(struct dd_function_table *functions);
extern void
i830EmitState( i830ContextPtr i830 );
extern void i830EmitState(struct i830_context *i830);
extern void
i830InitState( i830ContextPtr i830 );
extern void i830InitState(struct i830_context *i830);
/* i830_metaops.c
*/
extern GLboolean
i830TryTextureReadPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack,
GLvoid *pixels );
extern GLboolean
i830TryTextureDrawPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid *pixels );
extern void
i830ClearWithTris( intelContextPtr intel, GLbitfield mask,
GLboolean all, GLint cx, GLint cy, GLint cw, GLint ch);
extern void i830InitMetaFuncs(struct i830_context *i830);
extern void
i830RotateWindow(intelContextPtr intel, __DRIdrawablePrivate *dPriv,
i830RotateWindow(struct intel_context *intel, __DRIdrawablePrivate * dPriv,
GLuint srcBuf);
#endif
/*======================================================================
* Inline conversion functions. These are better-typed than the
* macros used previously:
*/
static INLINE struct i830_context *
i830_context(GLcontext * ctx)
{
return (struct i830_context *) ctx;
}
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -407,7 +407,7 @@
#define LOGICOP_SET 0xf
#define MODE4_ENABLE_STENCIL_TEST_MASK ((1<<17)|(0xff00))
#define ENABLE_STENCIL_TEST_MASK (1<<17)
#define STENCIL_TEST_MASK(x) ((x)<<8)
#define STENCIL_TEST_MASK(x) (((x)&0xff)<<8)
#define MODE4_ENABLE_STENCIL_WRITE_MASK ((1<<16)|(0x00ff))
#define ENABLE_STENCIL_WRITE_MASK (1<<16)
#define STENCIL_WRITE_MASK(x) ((x)&0xff)
@@ -554,8 +554,8 @@
#define MAPSURF_4BIT_INDEXED (7<<6)
#define TM0S1_MT_FORMAT_MASK (0x7 << 3)
#define TM0S1_MT_FORMAT_SHIFT 3
#define MT_4BIT_IDX_ARGB8888 (7<<3) /* SURFACE_4BIT_INDEXED */
#define MT_8BIT_IDX_RGB565 (0<<3) /* SURFACE_8BIT_INDEXED */
#define MT_4BIT_IDX_ARGB8888 (7<<3) /* SURFACE_4BIT_INDEXED */
#define MT_8BIT_IDX_RGB565 (0<<3) /* SURFACE_8BIT_INDEXED */
#define MT_8BIT_IDX_ARGB1555 (1<<3)
#define MT_8BIT_IDX_ARGB4444 (2<<3)
#define MT_8BIT_IDX_AY88 (3<<3)
@@ -563,9 +563,9 @@
#define MT_8BIT_IDX_BUMP_88DVDU (5<<3)
#define MT_8BIT_IDX_BUMP_655LDVDU (6<<3)
#define MT_8BIT_IDX_ARGB8888 (7<<3)
#define MT_8BIT_I8 (0<<3) /* SURFACE_8BIT */
#define MT_8BIT_I8 (0<<3) /* SURFACE_8BIT */
#define MT_8BIT_L8 (1<<3)
#define MT_16BIT_RGB565 (0<<3) /* SURFACE_16BIT */
#define MT_16BIT_RGB565 (0<<3) /* SURFACE_16BIT */
#define MT_16BIT_ARGB1555 (1<<3)
#define MT_16BIT_ARGB4444 (2<<3)
#define MT_16BIT_AY88 (3<<3)
@@ -573,16 +573,16 @@
#define MT_16BIT_BUMP_88DVDU (5<<3)
#define MT_16BIT_BUMP_655LDVDU (6<<3)
#define MT_16BIT_DIB_RGB565_8888 (7<<3)
#define MT_32BIT_ARGB8888 (0<<3) /* SURFACE_32BIT */
#define MT_32BIT_ARGB8888 (0<<3) /* SURFACE_32BIT */
#define MT_32BIT_ABGR8888 (1<<3)
#define MT_32BIT_BUMP_XLDVDU_8888 (6<<3)
#define MT_32BIT_DIB_8888 (7<<3)
#define MT_411_YUV411 (0<<3) /* SURFACE_411 */
#define MT_422_YCRCB_SWAPY (0<<3) /* SURFACE_422 */
#define MT_411_YUV411 (0<<3) /* SURFACE_411 */
#define MT_422_YCRCB_SWAPY (0<<3) /* SURFACE_422 */
#define MT_422_YCRCB_NORMAL (1<<3)
#define MT_422_YCRCB_SWAPUV (2<<3)
#define MT_422_YCRCB_SWAPUVY (3<<3)
#define MT_COMPRESS_DXT1 (0<<3) /* SURFACE_COMPRESSED */
#define MT_COMPRESS_DXT1 (0<<3) /* SURFACE_COMPRESSED */
#define MT_COMPRESS_DXT2_3 (1<<3)
#define MT_COMPRESS_DXT4_5 (2<<3)
#define MT_COMPRESS_FXT1 (3<<3)

File diff suppressed because it is too large Load Diff

View File

@@ -45,261 +45,13 @@
/**
* Set the texture wrap modes.
*
* The i830M (and related graphics cores) do not support GL_CLAMP. The Intel
* drivers for "other operating systems" implement GL_CLAMP as
* GL_CLAMP_TO_EDGE, so the same is done here.
*
* \param t Texture object whose wrap modes are to be set
* \param swrap Wrap mode for the \a s texture coordinate
* \param twrap Wrap mode for the \a t texture coordinate
*/
static void i830SetTexWrapping(i830TextureObjectPtr tex,
GLenum swrap,
GLenum twrap)
static void
i830TexEnv(GLcontext * ctx, GLenum target,
GLenum pname, const GLfloat * param)
{
tex->Setup[I830_TEXREG_MCS] &= ~(TEXCOORD_ADDR_U_MASK|TEXCOORD_ADDR_V_MASK);
switch( swrap ) {
case GL_REPEAT:
tex->Setup[I830_TEXREG_MCS] |= TEXCOORD_ADDR_U_MODE(TEXCOORDMODE_WRAP);
break;
case GL_CLAMP:
case GL_CLAMP_TO_EDGE:
tex->Setup[I830_TEXREG_MCS] |= TEXCOORD_ADDR_U_MODE(TEXCOORDMODE_CLAMP);
break;
case GL_CLAMP_TO_BORDER:
tex->Setup[I830_TEXREG_MCS] |=
TEXCOORD_ADDR_U_MODE(TEXCOORDMODE_CLAMP_BORDER);
break;
case GL_MIRRORED_REPEAT:
tex->Setup[I830_TEXREG_MCS] |=
TEXCOORD_ADDR_U_MODE(TEXCOORDMODE_MIRROR);
break;
default:
break;
}
switch( twrap ) {
case GL_REPEAT:
tex->Setup[I830_TEXREG_MCS] |= TEXCOORD_ADDR_V_MODE(TEXCOORDMODE_WRAP);
break;
case GL_CLAMP:
case GL_CLAMP_TO_EDGE:
tex->Setup[I830_TEXREG_MCS] |= TEXCOORD_ADDR_V_MODE(TEXCOORDMODE_CLAMP);
break;
case GL_CLAMP_TO_BORDER:
tex->Setup[I830_TEXREG_MCS] |=
TEXCOORD_ADDR_V_MODE(TEXCOORDMODE_CLAMP_BORDER);
break;
case GL_MIRRORED_REPEAT:
tex->Setup[I830_TEXREG_MCS] |=
TEXCOORD_ADDR_V_MODE(TEXCOORDMODE_MIRROR);
break;
default:
break;
}
}
/**
* Set the texture magnification and minification modes.
*
* \param t Texture whose filter modes are to be set
* \param minf Texture minification mode
* \param magf Texture magnification mode
* \param bias LOD bias for this texture unit.
*/
static void i830SetTexFilter( i830TextureObjectPtr t, GLenum minf, GLenum magf,
GLfloat maxanisotropy )
{
int minFilt = 0, mipFilt = 0, magFilt = 0;
if(INTEL_DEBUG&DEBUG_DRI)
fprintf(stderr, "%s\n", __FUNCTION__);
if ( maxanisotropy > 1.0 ) {
minFilt = FILTER_ANISOTROPIC;
magFilt = FILTER_ANISOTROPIC;
}
else {
switch (minf) {
case GL_NEAREST:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_NONE;
break;
case GL_LINEAR:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_NONE;
break;
case GL_NEAREST_MIPMAP_NEAREST:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_NEAREST;
break;
case GL_LINEAR_MIPMAP_NEAREST:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_NEAREST;
break;
case GL_NEAREST_MIPMAP_LINEAR:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_LINEAR;
break;
case GL_LINEAR_MIPMAP_LINEAR:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_LINEAR;
break;
default:
break;
}
switch (magf) {
case GL_NEAREST:
magFilt = FILTER_NEAREST;
break;
case GL_LINEAR:
magFilt = FILTER_LINEAR;
break;
default:
break;
}
}
t->Setup[I830_TEXREG_TM0S3] &= ~TM0S3_MIN_FILTER_MASK;
t->Setup[I830_TEXREG_TM0S3] &= ~TM0S3_MIP_FILTER_MASK;
t->Setup[I830_TEXREG_TM0S3] &= ~TM0S3_MAG_FILTER_MASK;
t->Setup[I830_TEXREG_TM0S3] |= ((minFilt << TM0S3_MIN_FILTER_SHIFT) |
(mipFilt << TM0S3_MIP_FILTER_SHIFT) |
(magFilt << TM0S3_MAG_FILTER_SHIFT));
}
static void i830SetTexBorderColor(i830TextureObjectPtr t, GLubyte color[4])
{
if(INTEL_DEBUG&DEBUG_DRI)
fprintf(stderr, "%s\n", __FUNCTION__);
t->Setup[I830_TEXREG_TM0S4] =
INTEL_PACKCOLOR8888(color[0],color[1],color[2],color[3]);
}
/**
* Allocate space for and load the mesa images into the texture memory block.
* This will happen before drawing with a new texture, or drawing with a
* texture after it was swapped out or teximaged again.
*/
intelTextureObjectPtr i830AllocTexObj( struct gl_texture_object *texObj )
{
i830TextureObjectPtr t = CALLOC_STRUCT( i830_texture_object );
if ( !t )
return NULL;
texObj->DriverData = t;
t->intel.base.tObj = texObj;
t->intel.dirty = I830_UPLOAD_TEX_ALL;
make_empty_list( &t->intel.base );
t->Setup[I830_TEXREG_TM0LI] = 0; /* not used */
t->Setup[I830_TEXREG_TM0S0] = 0;
t->Setup[I830_TEXREG_TM0S1] = 0;
t->Setup[I830_TEXREG_TM0S2] = 0;
t->Setup[I830_TEXREG_TM0S3] = 0;
t->Setup[I830_TEXREG_MCS] = (_3DSTATE_MAP_COORD_SET_CMD |
MAP_UNIT(0) |
ENABLE_TEXCOORD_PARAMS |
TEXCOORDS_ARE_NORMAL |
TEXCOORDTYPE_CARTESIAN |
ENABLE_ADDR_V_CNTL |
TEXCOORD_ADDR_V_MODE(TEXCOORDMODE_WRAP) |
ENABLE_ADDR_U_CNTL |
TEXCOORD_ADDR_U_MODE(TEXCOORDMODE_WRAP));
i830SetTexWrapping( t, texObj->WrapS, texObj->WrapT );
i830SetTexFilter( t, texObj->MinFilter, texObj->MagFilter,
texObj->MaxAnisotropy );
i830SetTexBorderColor( t, texObj->_BorderChan );
return &t->intel;
}
static void i830TexParameter( GLcontext *ctx, GLenum target,
struct gl_texture_object *tObj,
GLenum pname, const GLfloat *params )
{
i830TextureObjectPtr t = (i830TextureObjectPtr) tObj->DriverData;
if (!t)
return;
switch (pname) {
case GL_TEXTURE_MIN_FILTER:
case GL_TEXTURE_MAG_FILTER:
case GL_TEXTURE_MAX_ANISOTROPY_EXT:
i830SetTexFilter( t, tObj->MinFilter, tObj->MagFilter,
tObj->MaxAnisotropy);
break;
case GL_TEXTURE_WRAP_S:
case GL_TEXTURE_WRAP_T:
i830SetTexWrapping( t, tObj->WrapS, tObj->WrapT );
break;
case GL_TEXTURE_BORDER_COLOR:
i830SetTexBorderColor( t, tObj->_BorderChan );
break;
case GL_TEXTURE_BASE_LEVEL:
case GL_TEXTURE_MAX_LEVEL:
case GL_TEXTURE_MIN_LOD:
case GL_TEXTURE_MAX_LOD:
/* The i830 and its successors can do a lot of this without
* reloading the textures. A project for someone?
*/
intelFlush( ctx );
driSwapOutTextureObject( (driTextureObject *) t );
break;
default:
return;
}
t->intel.dirty = I830_UPLOAD_TEX_ALL;
}
static void i830TexEnv( GLcontext *ctx, GLenum target,
GLenum pname, const GLfloat *param )
{
i830ContextPtr i830 = I830_CONTEXT( ctx );
GLuint unit = ctx->Texture.CurrentUnit;
switch (pname) {
case GL_TEXTURE_ENV_COLOR:
#if 0
{
GLubyte r, g, b, a;
GLuint col;
UNCLAMPED_FLOAT_TO_UBYTE(r, param[RCOMP]);
UNCLAMPED_FLOAT_TO_UBYTE(g, param[GCOMP]);
UNCLAMPED_FLOAT_TO_UBYTE(b, param[BCOMP]);
UNCLAMPED_FLOAT_TO_UBYTE(a, param[ACOMP]);
col = ((a << 24) | (r << 16) | (g << 8) | b);
if (col != i830->state.TexEnv[unit][I830_TEXENVREG_COL1]) {
I830_STATECHANGE(i830, I830_UPLOAD_TEXENV);
i830->state.TexEnv[unit][I830_TEXENVREG_COL1] = col;
}
break;
}
#endif
case GL_TEXTURE_ENV_COLOR:
case GL_TEXTURE_ENV_MODE:
case GL_COMBINE_RGB:
case GL_COMBINE_ALPHA:
@@ -319,38 +71,30 @@ static void i830TexEnv( GLcontext *ctx, GLenum target,
case GL_ALPHA_SCALE:
break;
case GL_TEXTURE_LOD_BIAS: {
int b = (int) ((*param) * 16.0);
if (b > 63) b = 63;
if (b < -64) b = -64;
I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
i830->state.Tex[unit][I830_TEXREG_TM0S3] &= ~TM0S3_LOD_BIAS_MASK;
i830->state.Tex[unit][I830_TEXREG_TM0S3] |=
((b << TM0S3_LOD_BIAS_SHIFT) & TM0S3_LOD_BIAS_MASK);
break;
}
case GL_TEXTURE_LOD_BIAS:{
struct i830_context *i830 = i830_context(ctx);
GLuint unit = ctx->Texture.CurrentUnit;
int b = (int) ((*param) * 16.0);
if (b > 63)
b = 63;
if (b < -64)
b = -64;
I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
i830->lodbias_tm0s3[unit] =
((b << TM0S3_LOD_BIAS_SHIFT) & TM0S3_LOD_BIAS_MASK);
break;
}
default:
break;
}
}
static void i830BindTexture( GLcontext *ctx, GLenum target,
struct gl_texture_object *texObj )
void
i830InitTextureFuncs(struct dd_function_table *functions)
{
i830TextureObjectPtr tex;
if (!texObj->DriverData)
i830AllocTexObj( texObj );
tex = (i830TextureObjectPtr)texObj->DriverData;
}
void i830InitTextureFuncs( struct dd_function_table *functions )
{
functions->BindTexture = i830BindTexture;
functions->TexEnv = i830TexEnv;
functions->TexParameter = i830TexParameter;
functions->TexEnv = i830TexEnv;
}

View File

@@ -46,46 +46,42 @@
/* ================================================================
* Texture combine functions
*/
static GLuint pass_through( GLuint *state, GLuint blendUnit )
static GLuint
pass_through(GLuint * state, GLuint blendUnit)
{
state[0] = (_3DSTATE_MAP_BLEND_OP_CMD(blendUnit) |
TEXPIPE_COLOR |
ENABLE_TEXOUTPUT_WRT_SEL |
TEXOP_OUTPUT_CURRENT |
DISABLE_TEX_CNTRL_STAGE |
TEXOP_SCALE_1X |
TEXOP_MODIFY_PARMS |
TEXBLENDOP_ARG1);
TEXPIPE_COLOR |
ENABLE_TEXOUTPUT_WRT_SEL |
TEXOP_OUTPUT_CURRENT |
DISABLE_TEX_CNTRL_STAGE |
TEXOP_SCALE_1X | TEXOP_MODIFY_PARMS | TEXBLENDOP_ARG1);
state[1] = (_3DSTATE_MAP_BLEND_OP_CMD(blendUnit) |
TEXPIPE_ALPHA |
ENABLE_TEXOUTPUT_WRT_SEL |
TEXOP_OUTPUT_CURRENT |
TEXOP_SCALE_1X |
TEXOP_MODIFY_PARMS |
TEXBLENDOP_ARG1);
TEXPIPE_ALPHA |
ENABLE_TEXOUTPUT_WRT_SEL |
TEXOP_OUTPUT_CURRENT |
TEXOP_SCALE_1X | TEXOP_MODIFY_PARMS | TEXBLENDOP_ARG1);
state[2] = (_3DSTATE_MAP_BLEND_ARG_CMD(blendUnit) |
TEXPIPE_COLOR |
TEXBLEND_ARG1 |
TEXBLENDARG_MODIFY_PARMS |
TEXBLENDARG_CURRENT);
TEXPIPE_COLOR |
TEXBLEND_ARG1 |
TEXBLENDARG_MODIFY_PARMS | TEXBLENDARG_CURRENT);
state[3] = (_3DSTATE_MAP_BLEND_ARG_CMD(blendUnit) |
TEXPIPE_ALPHA |
TEXBLEND_ARG1 |
TEXBLENDARG_MODIFY_PARMS |
TEXBLENDARG_CURRENT);
TEXPIPE_ALPHA |
TEXBLEND_ARG1 |
TEXBLENDARG_MODIFY_PARMS | TEXBLENDARG_CURRENT);
return 4;
}
static GLuint emit_factor( GLuint blendUnit, GLuint *state, GLuint count,
const GLfloat *factor )
static GLuint
emit_factor(GLuint blendUnit, GLuint * state, GLuint count,
const GLfloat * factor)
{
GLubyte r, g, b, a;
GLuint col;
if (0)
fprintf(stderr, "emit constant %d: %.2f %.2f %.2f %.2f\n",
blendUnit, factor[0], factor[1], factor[2], factor[3]);
blendUnit, factor[0], factor[1], factor[2], factor[3]);
UNCLAMPED_FLOAT_TO_UBYTE(r, factor[0]);
UNCLAMPED_FLOAT_TO_UBYTE(g, factor[1]);
@@ -94,21 +90,27 @@ static GLuint emit_factor( GLuint blendUnit, GLuint *state, GLuint count,
col = ((a << 24) | (r << 16) | (g << 8) | b);
state[count++] = _3DSTATE_COLOR_FACTOR_N_CMD(blendUnit);
state[count++] = _3DSTATE_COLOR_FACTOR_N_CMD(blendUnit);
state[count++] = col;
return count;
}
static __inline__ GLuint GetTexelOp(GLint unit)
static INLINE GLuint
GetTexelOp(GLint unit)
{
switch(unit) {
case 0: return TEXBLENDARG_TEXEL0;
case 1: return TEXBLENDARG_TEXEL1;
case 2: return TEXBLENDARG_TEXEL2;
case 3: return TEXBLENDARG_TEXEL3;
default: return TEXBLENDARG_TEXEL0;
switch (unit) {
case 0:
return TEXBLENDARG_TEXEL0;
case 1:
return TEXBLENDARG_TEXEL1;
case 2:
return TEXBLENDARG_TEXEL2;
case 3:
return TEXBLENDARG_TEXEL3;
default:
return TEXBLENDARG_TEXEL0;
}
}
@@ -132,12 +134,10 @@ static __inline__ GLuint GetTexelOp(GLint unit)
* partial support for the extension?
*/
GLuint
i830SetTexEnvCombine(i830ContextPtr i830,
const struct gl_tex_env_combine_state * combine,
GLint blendUnit,
GLuint texel_op,
GLuint *state,
const GLfloat *factor )
i830SetTexEnvCombine(struct i830_context * i830,
const struct gl_tex_env_combine_state * combine,
GLint blendUnit,
GLuint texel_op, GLuint * state, const GLfloat * factor)
{
const GLuint numColorArgs = combine->_NumArgsRGB;
const GLuint numAlphaArgs = combine->_NumArgsA;
@@ -162,7 +162,7 @@ i830SetTexEnvCombine(i830ContextPtr i830,
TEXPIPE_ALPHA | TEXBLEND_ARG0 | TEXBLENDARG_MODIFY_PARMS,
};
if(INTEL_DEBUG&DEBUG_TEXTURE)
if (INTEL_DEBUG & DEBUG_TEXTURE)
fprintf(stderr, "%s\n", __FUNCTION__);
@@ -188,23 +188,23 @@ i830SetTexEnvCombine(i830ContextPtr i830,
}
switch(combine->ModeRGB) {
case GL_REPLACE:
switch (combine->ModeRGB) {
case GL_REPLACE:
blendop = TEXBLENDOP_ARG1;
break;
case GL_MODULATE:
case GL_MODULATE:
blendop = TEXBLENDOP_MODULATE;
break;
case GL_ADD:
case GL_ADD:
blendop = TEXBLENDOP_ADD;
break;
case GL_ADD_SIGNED:
blendop = TEXBLENDOP_ADDSIGNED;
blendop = TEXBLENDOP_ADDSIGNED;
break;
case GL_INTERPOLATE:
blendop = TEXBLENDOP_BLEND;
blendop = TEXBLENDOP_BLEND;
break;
case GL_SUBTRACT:
case GL_SUBTRACT:
blendop = TEXBLENDOP_SUBTRACT;
break;
case GL_DOT3_RGB_EXT:
@@ -215,55 +215,54 @@ i830SetTexEnvCombine(i830ContextPtr i830,
case GL_DOT3_RGBA:
blendop = TEXBLENDOP_DOT3;
break;
default:
return pass_through( state, blendUnit );
default:
return pass_through(state, blendUnit);
}
blendop |= (rgb_shift << TEXOP_SCALE_SHIFT);
/* Handle RGB args */
for(i = 0; i < 3; i++) {
switch(combine->SourceRGB[i]) {
case GL_TEXTURE:
args_RGB[i] = texel_op;
break;
for (i = 0; i < 3; i++) {
switch (combine->SourceRGB[i]) {
case GL_TEXTURE:
args_RGB[i] = texel_op;
break;
case GL_TEXTURE0:
case GL_TEXTURE1:
case GL_TEXTURE2:
case GL_TEXTURE3:
args_RGB[i] = GetTexelOp( combine->SourceRGB[i] - GL_TEXTURE0 );
break;
args_RGB[i] = GetTexelOp(combine->SourceRGB[i] - GL_TEXTURE0);
break;
case GL_CONSTANT:
args_RGB[i] = TEXBLENDARG_FACTOR_N;
need_factor = 1;
break;
args_RGB[i] = TEXBLENDARG_FACTOR_N;
need_factor = 1;
break;
case GL_PRIMARY_COLOR:
args_RGB[i] = TEXBLENDARG_DIFFUSE;
break;
args_RGB[i] = TEXBLENDARG_DIFFUSE;
break;
case GL_PREVIOUS:
args_RGB[i] = TEXBLENDARG_CURRENT;
break;
default:
return pass_through( state, blendUnit );
args_RGB[i] = TEXBLENDARG_CURRENT;
break;
default:
return pass_through(state, blendUnit);
}
switch(combine->OperandRGB[i]) {
case GL_SRC_COLOR:
args_RGB[i] |= 0;
break;
case GL_ONE_MINUS_SRC_COLOR:
args_RGB[i] |= TEXBLENDARG_INV_ARG;
break;
case GL_SRC_ALPHA:
args_RGB[i] |= TEXBLENDARG_REPLICATE_ALPHA;
break;
case GL_ONE_MINUS_SRC_ALPHA:
args_RGB[i] |= (TEXBLENDARG_REPLICATE_ALPHA |
TEXBLENDARG_INV_ARG);
break;
default:
return pass_through( state, blendUnit );
switch (combine->OperandRGB[i]) {
case GL_SRC_COLOR:
args_RGB[i] |= 0;
break;
case GL_ONE_MINUS_SRC_COLOR:
args_RGB[i] |= TEXBLENDARG_INV_ARG;
break;
case GL_SRC_ALPHA:
args_RGB[i] |= TEXBLENDARG_REPLICATE_ALPHA;
break;
case GL_ONE_MINUS_SRC_ALPHA:
args_RGB[i] |= (TEXBLENDARG_REPLICATE_ALPHA | TEXBLENDARG_INV_ARG);
break;
default:
return pass_through(state, blendUnit);
}
}
@@ -275,76 +274,76 @@ i830SetTexEnvCombine(i830ContextPtr i830,
* Note - the global factor is set up with alpha == .5, so
* the alpha part of the DOT4 calculation should be zero.
*/
if ( combine->ModeRGB == GL_DOT3_RGBA_EXT ||
combine->ModeRGB == GL_DOT3_RGBA ) {
if (combine->ModeRGB == GL_DOT3_RGBA_EXT ||
combine->ModeRGB == GL_DOT3_RGBA) {
ablendop = TEXBLENDOP_DOT4;
args_A[0] = TEXBLENDARG_FACTOR; /* the global factor */
args_A[0] = TEXBLENDARG_FACTOR; /* the global factor */
args_A[1] = TEXBLENDARG_FACTOR;
args_A[2] = TEXBLENDARG_FACTOR;
}
else {
switch(combine->ModeA) {
case GL_REPLACE:
ablendop = TEXBLENDOP_ARG1;
break;
case GL_MODULATE:
ablendop = TEXBLENDOP_MODULATE;
break;
case GL_ADD:
ablendop = TEXBLENDOP_ADD;
break;
switch (combine->ModeA) {
case GL_REPLACE:
ablendop = TEXBLENDOP_ARG1;
break;
case GL_MODULATE:
ablendop = TEXBLENDOP_MODULATE;
break;
case GL_ADD:
ablendop = TEXBLENDOP_ADD;
break;
case GL_ADD_SIGNED:
ablendop = TEXBLENDOP_ADDSIGNED;
break;
ablendop = TEXBLENDOP_ADDSIGNED;
break;
case GL_INTERPOLATE:
ablendop = TEXBLENDOP_BLEND;
break;
case GL_SUBTRACT:
ablendop = TEXBLENDOP_SUBTRACT;
break;
ablendop = TEXBLENDOP_BLEND;
break;
case GL_SUBTRACT:
ablendop = TEXBLENDOP_SUBTRACT;
break;
default:
return pass_through( state, blendUnit );
return pass_through(state, blendUnit);
}
ablendop |= (alpha_shift << TEXOP_SCALE_SHIFT);
/* Handle A args */
for(i = 0; i < 3; i++) {
switch(combine->SourceA[i]) {
case GL_TEXTURE:
args_A[i] = texel_op;
break;
case GL_TEXTURE0:
case GL_TEXTURE1:
case GL_TEXTURE2:
case GL_TEXTURE3:
args_A[i] = GetTexelOp( combine->SourceA[i] - GL_TEXTURE0 );
break;
case GL_CONSTANT:
args_A[i] = TEXBLENDARG_FACTOR_N;
need_factor = 1;
break;
case GL_PRIMARY_COLOR:
args_A[i] = TEXBLENDARG_DIFFUSE;
break;
case GL_PREVIOUS:
args_A[i] = TEXBLENDARG_CURRENT;
break;
default:
return pass_through( state, blendUnit );
}
for (i = 0; i < 3; i++) {
switch (combine->SourceA[i]) {
case GL_TEXTURE:
args_A[i] = texel_op;
break;
case GL_TEXTURE0:
case GL_TEXTURE1:
case GL_TEXTURE2:
case GL_TEXTURE3:
args_A[i] = GetTexelOp(combine->SourceA[i] - GL_TEXTURE0);
break;
case GL_CONSTANT:
args_A[i] = TEXBLENDARG_FACTOR_N;
need_factor = 1;
break;
case GL_PRIMARY_COLOR:
args_A[i] = TEXBLENDARG_DIFFUSE;
break;
case GL_PREVIOUS:
args_A[i] = TEXBLENDARG_CURRENT;
break;
default:
return pass_through(state, blendUnit);
}
switch(combine->OperandA[i]) {
case GL_SRC_ALPHA:
args_A[i] |= 0;
break;
case GL_ONE_MINUS_SRC_ALPHA:
args_A[i] |= TEXBLENDARG_INV_ARG;
break;
default:
return pass_through( state, blendUnit );
}
switch (combine->OperandA[i]) {
case GL_SRC_ALPHA:
args_A[i] |= 0;
break;
case GL_ONE_MINUS_SRC_ALPHA:
args_A[i] |= TEXBLENDARG_INV_ARG;
break;
default:
return pass_through(state, blendUnit);
}
}
}
@@ -363,86 +362,86 @@ i830SetTexEnvCombine(i830ContextPtr i830,
used = 0;
state[used++] = (_3DSTATE_MAP_BLEND_OP_CMD(blendUnit) |
TEXPIPE_COLOR |
ENABLE_TEXOUTPUT_WRT_SEL |
TEXOP_OUTPUT_CURRENT |
DISABLE_TEX_CNTRL_STAGE |
TEXOP_MODIFY_PARMS |
blendop);
TEXPIPE_COLOR |
ENABLE_TEXOUTPUT_WRT_SEL |
TEXOP_OUTPUT_CURRENT |
DISABLE_TEX_CNTRL_STAGE | TEXOP_MODIFY_PARMS | blendop);
state[used++] = (_3DSTATE_MAP_BLEND_OP_CMD(blendUnit) |
TEXPIPE_ALPHA |
ENABLE_TEXOUTPUT_WRT_SEL |
TEXOP_OUTPUT_CURRENT |
TEXOP_MODIFY_PARMS |
ablendop);
TEXPIPE_ALPHA |
ENABLE_TEXOUTPUT_WRT_SEL |
TEXOP_OUTPUT_CURRENT | TEXOP_MODIFY_PARMS | ablendop);
for ( i = 0 ; i < numColorArgs ; i++ ) {
for (i = 0; i < numColorArgs; i++) {
state[used++] = (_3DSTATE_MAP_BLEND_ARG_CMD(blendUnit) |
tex_blend_rgb[i] | args_RGB[i]);
tex_blend_rgb[i] | args_RGB[i]);
}
for ( i = 0 ; i < numAlphaArgs ; i++ ) {
for (i = 0; i < numAlphaArgs; i++) {
state[used++] = (_3DSTATE_MAP_BLEND_ARG_CMD(blendUnit) |
tex_blend_a[i] | args_A[i]);
tex_blend_a[i] | args_A[i]);
}
if (need_factor)
return emit_factor( blendUnit, state, used, factor );
else
if (need_factor)
return emit_factor(blendUnit, state, used, factor);
else
return used;
}
static void emit_texblend( i830ContextPtr i830, GLuint unit, GLuint blendUnit,
GLboolean last_stage )
static void
emit_texblend(struct i830_context *i830, GLuint unit, GLuint blendUnit,
GLboolean last_stage)
{
struct gl_texture_unit *texUnit = &i830->intel.ctx.Texture.Unit[unit];
GLuint tmp[I830_TEXBLEND_SIZE], tmp_sz;
if (0) fprintf(stderr, "%s unit %d\n", __FUNCTION__, unit);
if (0)
fprintf(stderr, "%s unit %d\n", __FUNCTION__, unit);
/* Update i830->state.TexBlend
*/
tmp_sz = i830SetTexEnvCombine(i830, texUnit->_CurrentCombine, blendUnit,
GetTexelOp(unit), tmp,
texUnit->EnvColor );
*/
tmp_sz = i830SetTexEnvCombine(i830, texUnit->_CurrentCombine, blendUnit,
GetTexelOp(unit), tmp, texUnit->EnvColor);
if (last_stage)
if (last_stage)
tmp[0] |= TEXOP_LAST_STAGE;
if (tmp_sz != i830->state.TexBlendWordsUsed[blendUnit] ||
memcmp( tmp, i830->state.TexBlend[blendUnit], tmp_sz * sizeof(GLuint))) {
I830_STATECHANGE( i830, I830_UPLOAD_TEXBLEND(blendUnit) );
memcpy( i830->state.TexBlend[blendUnit], tmp, tmp_sz * sizeof(GLuint));
memcmp(tmp, i830->state.TexBlend[blendUnit],
tmp_sz * sizeof(GLuint))) {
I830_STATECHANGE(i830, I830_UPLOAD_TEXBLEND(blendUnit));
memcpy(i830->state.TexBlend[blendUnit], tmp, tmp_sz * sizeof(GLuint));
i830->state.TexBlendWordsUsed[blendUnit] = tmp_sz;
}
I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND(blendUnit), GL_TRUE);
}
static void emit_passthrough( i830ContextPtr i830 )
static void
emit_passthrough(struct i830_context *i830)
{
GLuint tmp[I830_TEXBLEND_SIZE], tmp_sz;
GLuint unit = 0;
tmp_sz = pass_through( tmp, unit );
tmp_sz = pass_through(tmp, unit);
tmp[0] |= TEXOP_LAST_STAGE;
if (tmp_sz != i830->state.TexBlendWordsUsed[unit] ||
memcmp( tmp, i830->state.TexBlend[unit], tmp_sz * sizeof(GLuint))) {
I830_STATECHANGE( i830, I830_UPLOAD_TEXBLEND(unit) );
memcpy( i830->state.TexBlend[unit], tmp, tmp_sz * sizeof(GLuint));
memcmp(tmp, i830->state.TexBlend[unit], tmp_sz * sizeof(GLuint))) {
I830_STATECHANGE(i830, I830_UPLOAD_TEXBLEND(unit));
memcpy(i830->state.TexBlend[unit], tmp, tmp_sz * sizeof(GLuint));
i830->state.TexBlendWordsUsed[unit] = tmp_sz;
}
I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND(unit), GL_TRUE);
}
void i830EmitTextureBlend( i830ContextPtr i830 )
void
i830EmitTextureBlend(struct i830_context *i830)
{
GLcontext *ctx = &i830->intel.ctx;
GLuint unit, last_stage = 0, blendunit = 0;
@@ -450,16 +449,15 @@ void i830EmitTextureBlend( i830ContextPtr i830 )
I830_ACTIVESTATE(i830, I830_UPLOAD_TEXBLEND_ALL, GL_FALSE);
if (ctx->Texture._EnabledUnits) {
for (unit = 0 ; unit < ctx->Const.MaxTextureUnits ; unit++)
if (ctx->Texture.Unit[unit]._ReallyEnabled)
last_stage = unit;
for (unit = 0; unit < ctx->Const.MaxTextureUnits; unit++)
if (ctx->Texture.Unit[unit]._ReallyEnabled)
last_stage = unit;
for (unit = 0 ; unit < ctx->Const.MaxTextureUnits ; unit++)
if (ctx->Texture.Unit[unit]._ReallyEnabled)
emit_texblend( i830, unit, blendunit++, last_stage == unit );
for (unit = 0; unit < ctx->Const.MaxTextureUnits; unit++)
if (ctx->Texture.Unit[unit]._ReallyEnabled)
emit_texblend(i830, unit, blendunit++, last_stage == unit);
}
else {
emit_passthrough( i830 );
emit_passthrough(i830);
}
}

View File

@@ -38,446 +38,279 @@
#include "intel_screen.h"
#include "intel_ioctl.h"
#include "intel_tex.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "i830_context.h"
#include "i830_reg.h"
static const GLint initial_offsets[6][2] = { {0,0},
{0,2},
{1,0},
{1,2},
{1,1},
{1,3} };
static const GLint step_offsets[6][2] = { {0,2},
{0,2},
{-1,2},
{-1,2},
{-1,1},
{-1,1} };
#define I830_TEX_UNIT_ENABLED(unit) (1<<unit)
static GLboolean i830SetTexImages( i830ContextPtr i830,
struct gl_texture_object *tObj )
static GLuint
translate_texture_format(GLuint mesa_format)
{
GLuint total_height, pitch, i, textureFormat;
i830TextureObjectPtr t = (i830TextureObjectPtr) tObj->DriverData;
const struct gl_texture_image *baseImage = tObj->Image[0][tObj->BaseLevel];
GLint firstLevel, lastLevel, numLevels;
switch( baseImage->TexFormat->MesaFormat ) {
switch (mesa_format) {
case MESA_FORMAT_L8:
t->intel.texelBytes = 1;
textureFormat = MAPSURF_8BIT | MT_8BIT_L8;
break;
return MAPSURF_8BIT | MT_8BIT_L8;
case MESA_FORMAT_I8:
t->intel.texelBytes = 1;
textureFormat = MAPSURF_8BIT | MT_8BIT_I8;
break;
return MAPSURF_8BIT | MT_8BIT_I8;
case MESA_FORMAT_A8:
t->intel.texelBytes = 1;
textureFormat = MAPSURF_8BIT | MT_8BIT_I8; /* Kludge -- check with conform, glean */
break;
return MAPSURF_8BIT | MT_8BIT_I8; /* Kludge! */
case MESA_FORMAT_AL88:
t->intel.texelBytes = 2;
textureFormat = MAPSURF_16BIT | MT_16BIT_AY88;
break;
return MAPSURF_16BIT | MT_16BIT_AY88;
case MESA_FORMAT_RGB565:
t->intel.texelBytes = 2;
textureFormat = MAPSURF_16BIT | MT_16BIT_RGB565;
break;
return MAPSURF_16BIT | MT_16BIT_RGB565;
case MESA_FORMAT_ARGB1555:
t->intel.texelBytes = 2;
textureFormat = MAPSURF_16BIT | MT_16BIT_ARGB1555;
break;
return MAPSURF_16BIT | MT_16BIT_ARGB1555;
case MESA_FORMAT_ARGB4444:
t->intel.texelBytes = 2;
textureFormat = MAPSURF_16BIT | MT_16BIT_ARGB4444;
break;
return MAPSURF_16BIT | MT_16BIT_ARGB4444;
case MESA_FORMAT_ARGB8888:
t->intel.texelBytes = 4;
textureFormat = MAPSURF_32BIT | MT_32BIT_ARGB8888;
break;
return MAPSURF_32BIT | MT_32BIT_ARGB8888;
case MESA_FORMAT_YCBCR_REV:
t->intel.texelBytes = 2;
textureFormat = (MAPSURF_422 | MT_422_YCRCB_NORMAL |
TM0S1_COLORSPACE_CONVERSION);
break;
return (MAPSURF_422 | MT_422_YCRCB_NORMAL);
case MESA_FORMAT_YCBCR:
t->intel.texelBytes = 2;
textureFormat = (MAPSURF_422 | MT_422_YCRCB_SWAPY | /* ??? */
TM0S1_COLORSPACE_CONVERSION);
break;
return (MAPSURF_422 | MT_422_YCRCB_SWAPY);
case MESA_FORMAT_RGB_FXT1:
case MESA_FORMAT_RGBA_FXT1:
t->intel.texelBytes = 2;
textureFormat = MAPSURF_COMPRESSED | MT_COMPRESS_FXT1;
break;
return (MAPSURF_COMPRESSED | MT_COMPRESS_FXT1);
case MESA_FORMAT_RGBA_DXT1:
case MESA_FORMAT_RGB_DXT1:
/*
* DXTn pitches are Width/4 * blocksize in bytes
* for DXT1: blocksize=8 so Width/4*8 = Width * 2
* for DXT3/5: blocksize=16 so Width/4*16 = Width * 4
*/
t->intel.texelBytes = 2;
textureFormat = (MAPSURF_COMPRESSED | MT_COMPRESS_DXT1);
break;
return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT1);
case MESA_FORMAT_RGBA_DXT3:
t->intel.texelBytes = 4;
textureFormat = (MAPSURF_COMPRESSED | MT_COMPRESS_DXT2_3);
break;
return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT2_3);
case MESA_FORMAT_RGBA_DXT5:
t->intel.texelBytes = 4;
textureFormat = (MAPSURF_COMPRESSED | MT_COMPRESS_DXT4_5);
break;
return (MAPSURF_COMPRESSED | MT_COMPRESS_DXT4_5);
default:
fprintf(stderr, "%s: bad image format\n", __FUNCTION__);
fprintf(stderr, "%s: bad image format %x\n", __FUNCTION__, mesa_format);
abort();
return 0;
}
/* Compute which mipmap levels we really want to send to the hardware.
* This depends on the base image size, GL_TEXTURE_MIN_LOD,
* GL_TEXTURE_MAX_LOD, GL_TEXTURE_BASE_LEVEL, and GL_TEXTURE_MAX_LEVEL.
* Yes, this looks overly complicated, but it's all needed.
*/
driCalculateTextureFirstLastLevel( (driTextureObject *) t );
}
/* Figure out the amount of memory required to hold all the mipmap
* levels. Choose the smallest pitch to accomodate the largest
* mipmap:
*/
firstLevel = t->intel.base.firstLevel;
lastLevel = t->intel.base.lastLevel;
numLevels = lastLevel - firstLevel + 1;
/* All images must be loaded at this pitch. Count the number of
* lines required:
*/
switch (tObj->Target) {
case GL_TEXTURE_CUBE_MAP: {
const GLuint dim = tObj->Image[0][firstLevel]->Width;
GLuint face;
pitch = dim * t->intel.texelBytes;
pitch *= 2; /* double pitch for cube layouts */
pitch = (pitch + 3) & ~3;
total_height = dim * 4;
for ( face = 0 ; face < 6 ; face++) {
GLuint x = initial_offsets[face][0] * dim;
GLuint y = initial_offsets[face][1] * dim;
GLuint d = dim;
t->intel.base.dirty_images[face] = ~0;
assert(tObj->Image[face][firstLevel]->Width == dim);
assert(tObj->Image[face][firstLevel]->Height == dim);
for (i = 0; i < numLevels; i++) {
t->intel.image[face][i].image = tObj->Image[face][firstLevel + i];
if (!t->intel.image[face][i].image) {
fprintf(stderr, "no image %d %d\n", face, i);
break; /* can't happen */
}
t->intel.image[face][i].offset =
y * pitch + x * t->intel.texelBytes;
t->intel.image[face][i].internalFormat = baseImage->_BaseFormat;
d >>= 1;
x += step_offsets[face][0] * d;
y += step_offsets[face][1] * d;
}
}
break;
}
/* The i915 (and related graphics cores) do not support GL_CLAMP. The
* Intel drivers for "other operating systems" implement GL_CLAMP as
* GL_CLAMP_TO_EDGE, so the same is done here.
*/
static GLuint
translate_wrap_mode(GLenum wrap)
{
switch (wrap) {
case GL_REPEAT:
return TEXCOORDMODE_WRAP;
case GL_CLAMP:
case GL_CLAMP_TO_EDGE:
return TEXCOORDMODE_CLAMP; /* not really correct */
case GL_CLAMP_TO_BORDER:
return TEXCOORDMODE_CLAMP_BORDER;
case GL_MIRRORED_REPEAT:
return TEXCOORDMODE_MIRROR;
default:
pitch = tObj->Image[0][firstLevel]->Width * t->intel.texelBytes;
pitch = (pitch + 3) & ~3;
t->intel.base.dirty_images[0] = ~0;
for ( total_height = i = 0 ; i < numLevels ; i++ ) {
t->intel.image[0][i].image = tObj->Image[0][firstLevel + i];
if (!t->intel.image[0][i].image)
break;
t->intel.image[0][i].offset = total_height * pitch;
t->intel.image[0][i].internalFormat = baseImage->_BaseFormat;
if (t->intel.image[0][i].image->IsCompressed)
{
if (t->intel.image[0][i].image->Height > 4)
total_height += t->intel.image[0][i].image->Height/4;
else
total_height += 1;
}
else
total_height += MAX2(2, t->intel.image[0][i].image->Height);
}
break;
return TEXCOORDMODE_WRAP;
}
t->intel.Pitch = pitch;
t->intel.base.totalSize = total_height*pitch;
t->intel.max_level = i-1;
t->Setup[I830_TEXREG_TM0S1] =
(((tObj->Image[0][firstLevel]->Height - 1) << TM0S1_HEIGHT_SHIFT) |
((tObj->Image[0][firstLevel]->Width - 1) << TM0S1_WIDTH_SHIFT) |
textureFormat);
t->Setup[I830_TEXREG_TM0S2] =
(((pitch / 4) - 1) << TM0S2_PITCH_SHIFT) |
TM0S2_CUBE_FACE_ENA_MASK;
t->Setup[I830_TEXREG_TM0S3] &= ~TM0S3_MAX_MIP_MASK;
t->Setup[I830_TEXREG_TM0S3] &= ~TM0S3_MIN_MIP_MASK;
t->Setup[I830_TEXREG_TM0S3] |= ((numLevels - 1)*4) << TM0S3_MIN_MIP_SHIFT;
t->intel.dirty = I830_UPLOAD_TEX_ALL;
return intelUploadTexImages( &i830->intel, &t->intel, 0 );
}
static void i830_import_tex_unit( i830ContextPtr i830,
i830TextureObjectPtr t,
GLuint unit )
/* Recalculate all state from scratch. Perhaps not the most
* efficient, but this has gotten complex enough that we need
* something which is understandable and reliable.
*/
static GLboolean
i830_update_tex_unit(struct intel_context *intel, GLuint unit, GLuint ss3)
{
if(INTEL_DEBUG&DEBUG_TEXTURE)
fprintf(stderr, "%s unit(%d)\n", __FUNCTION__, unit);
if (i830->intel.CurrentTexObj[unit])
i830->intel.CurrentTexObj[unit]->base.bound &= ~(1U << unit);
GLcontext *ctx = &intel->ctx;
struct i830_context *i830 = i830_context(ctx);
struct gl_texture_object *tObj = ctx->Texture.Unit[unit]._Current;
struct intel_texture_object *intelObj = intel_texture_object(tObj);
struct gl_texture_image *firstImage;
GLuint *state = i830->state.Tex[unit];
i830->intel.CurrentTexObj[unit] = (intelTextureObjectPtr)t;
t->intel.base.bound |= (1 << unit);
memset(state, 0, sizeof(state));
I830_STATECHANGE( i830, I830_UPLOAD_TEX(unit) );
i830->state.Tex[unit][I830_TEXREG_TM0LI] = (_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
(LOAD_TEXTURE_MAP0 << unit) | 4);
i830->state.Tex[unit][I830_TEXREG_TM0S0] = (TM0S0_USE_FENCE |
t->intel.TextureOffset);
i830->state.Tex[unit][I830_TEXREG_TM0S1] = t->Setup[I830_TEXREG_TM0S1];
i830->state.Tex[unit][I830_TEXREG_TM0S2] = t->Setup[I830_TEXREG_TM0S2];
i830->state.Tex[unit][I830_TEXREG_TM0S3] &= TM0S3_LOD_BIAS_MASK;
i830->state.Tex[unit][I830_TEXREG_TM0S3] |= (t->Setup[I830_TEXREG_TM0S3] &
~TM0S3_LOD_BIAS_MASK);
i830->state.Tex[unit][I830_TEXREG_TM0S4] = t->Setup[I830_TEXREG_TM0S4];
i830->state.Tex[unit][I830_TEXREG_MCS] = (t->Setup[I830_TEXREG_MCS] &
~MAP_UNIT_MASK);
i830->state.Tex[unit][I830_TEXREG_CUBE] = t->Setup[I830_TEXREG_CUBE];
i830->state.Tex[unit][I830_TEXREG_MCS] |= MAP_UNIT(unit);
t->intel.dirty &= ~I830_UPLOAD_TEX(unit);
}
static GLboolean enable_tex_common( GLcontext *ctx, GLuint unit )
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
struct gl_texture_object *tObj = texUnit->_Current;
i830TextureObjectPtr t = (i830TextureObjectPtr)tObj->DriverData;
if (0) fprintf(stderr, "%s\n", __FUNCTION__);
/* Fallback if there's a texture border */
if ( tObj->Image[0][tObj->BaseLevel]->Border > 0 ) {
fprintf(stderr, "Texture border\n");
if (!intel_finalize_mipmap_tree(intel, unit))
return GL_FALSE;
/* Get first image here, since intelObj->firstLevel will get set in
* the intel_finalize_mipmap_tree() call above.
*/
firstImage = tObj->Image[0][intelObj->firstLevel];
i830->state.tex_buffer[unit] = intelObj->mt->region->buffer;
i830->state.tex_offset[unit] = intel_miptree_image_offset(intelObj->mt, 0,
intelObj->
firstLevel);
state[I830_TEXREG_TM0LI] = (_3DSTATE_LOAD_STATE_IMMEDIATE_2 |
(LOAD_TEXTURE_MAP0 << unit) | 4);
/* state[I830_TEXREG_TM0S0] = (TM0S0_USE_FENCE | */
/* t->intel.TextureOffset); */
state[I830_TEXREG_TM0S1] =
(((firstImage->Height - 1) << TM0S1_HEIGHT_SHIFT) |
((firstImage->Width - 1) << TM0S1_WIDTH_SHIFT) |
translate_texture_format(firstImage->TexFormat->MesaFormat));
state[I830_TEXREG_TM0S2] =
(((((intelObj->mt->pitch * intelObj->mt->cpp) / 4) -
1) << TM0S2_PITCH_SHIFT) | TM0S2_CUBE_FACE_ENA_MASK);
{
if (tObj->Target == GL_TEXTURE_CUBE_MAP)
state[I830_TEXREG_CUBE] = (_3DSTATE_MAP_CUBE | MAP_UNIT(unit) |
CUBE_NEGX_ENABLE |
CUBE_POSX_ENABLE |
CUBE_NEGY_ENABLE |
CUBE_POSY_ENABLE |
CUBE_NEGZ_ENABLE | CUBE_POSZ_ENABLE);
else
state[I830_TEXREG_CUBE] = (_3DSTATE_MAP_CUBE | MAP_UNIT(unit));
}
/* Upload teximages (not pipelined)
*/
if (t->intel.base.dirty_images[0]) {
if (!i830SetTexImages( i830, tObj )) {
return GL_FALSE;
{
GLuint minFilt, mipFilt, magFilt;
switch (tObj->MinFilter) {
case GL_NEAREST:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_NONE;
break;
case GL_LINEAR:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_NONE;
break;
case GL_NEAREST_MIPMAP_NEAREST:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_NEAREST;
break;
case GL_LINEAR_MIPMAP_NEAREST:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_NEAREST;
break;
case GL_NEAREST_MIPMAP_LINEAR:
minFilt = FILTER_NEAREST;
mipFilt = MIPFILTER_LINEAR;
break;
case GL_LINEAR_MIPMAP_LINEAR:
minFilt = FILTER_LINEAR;
mipFilt = MIPFILTER_LINEAR;
break;
default:
return GL_FALSE;
}
if (tObj->MaxAnisotropy > 1.0) {
minFilt = FILTER_ANISOTROPIC;
magFilt = FILTER_ANISOTROPIC;
}
else {
switch (tObj->MagFilter) {
case GL_NEAREST:
magFilt = FILTER_NEAREST;
break;
case GL_LINEAR:
magFilt = FILTER_LINEAR;
break;
default:
return GL_FALSE;
}
}
state[I830_TEXREG_TM0S3] = i830->lodbias_tm0s3[unit];
#if 0
/* YUV conversion:
*/
if (firstImage->TexFormat->MesaFormat == MESA_FORMAT_YCBCR ||
firstImage->TexFormat->MesaFormat == MESA_FORMAT_YCBCR_REV)
state[I830_TEXREG_TM0S3] |= SS2_COLORSPACE_CONVERSION;
#endif
state[I830_TEXREG_TM0S3] |= ((intelObj->lastLevel -
intelObj->firstLevel) *
4) << TM0S3_MIN_MIP_SHIFT;
state[I830_TEXREG_TM0S3] |= ((minFilt << TM0S3_MIN_FILTER_SHIFT) |
(mipFilt << TM0S3_MIP_FILTER_SHIFT) |
(magFilt << TM0S3_MAG_FILTER_SHIFT));
}
/* Update state if this is a different texture object to last
* time.
*/
if (i830->intel.CurrentTexObj[unit] != &t->intel ||
(t->intel.dirty & I830_UPLOAD_TEX(unit))) {
i830_import_tex_unit( i830, t, unit);
{
GLenum ws = tObj->WrapS;
GLenum wt = tObj->WrapT;
/* 3D textures not available on i830
*/
if (tObj->Target == GL_TEXTURE_3D)
return GL_FALSE;
state[I830_TEXREG_MCS] = (_3DSTATE_MAP_COORD_SET_CMD |
MAP_UNIT(unit) |
ENABLE_TEXCOORD_PARAMS |
ss3 |
ENABLE_ADDR_V_CNTL |
TEXCOORD_ADDR_V_MODE(translate_wrap_mode(wt))
| ENABLE_ADDR_U_CNTL |
TEXCOORD_ADDR_U_MODE(translate_wrap_mode
(ws)));
}
state[I830_TEXREG_TM0S4] = INTEL_PACKCOLOR8888(tObj->_BorderChan[0],
tObj->_BorderChan[1],
tObj->_BorderChan[2],
tObj->_BorderChan[3]);
I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(unit), GL_TRUE);
return GL_TRUE;
}
static GLboolean enable_tex_rect( GLcontext *ctx, GLuint unit )
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
GLuint mcs = i830->state.Tex[unit][I830_TEXREG_MCS];
mcs &= ~TEXCOORDS_ARE_NORMAL;
mcs |= TEXCOORDS_ARE_IN_TEXELUNITS;
if ((mcs != i830->state.Tex[unit][I830_TEXREG_MCS])
|| (0 != i830->state.Tex[unit][I830_TEXREG_CUBE])) {
I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
i830->state.Tex[unit][I830_TEXREG_MCS] = mcs;
i830->state.Tex[unit][I830_TEXREG_CUBE] = 0;
}
return GL_TRUE;
}
static GLboolean enable_tex_2d( GLcontext *ctx, GLuint unit )
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
GLuint mcs = i830->state.Tex[unit][I830_TEXREG_MCS];
mcs &= ~TEXCOORDS_ARE_IN_TEXELUNITS;
mcs |= TEXCOORDS_ARE_NORMAL;
if ((mcs != i830->state.Tex[unit][I830_TEXREG_MCS])
|| (0 != i830->state.Tex[unit][I830_TEXREG_CUBE])) {
I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
i830->state.Tex[unit][I830_TEXREG_MCS] = mcs;
i830->state.Tex[unit][I830_TEXREG_CUBE] = 0;
}
return GL_TRUE;
}
static GLboolean enable_tex_cube( GLcontext *ctx, GLuint unit )
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
struct gl_texture_object *tObj = texUnit->_Current;
i830TextureObjectPtr t = (i830TextureObjectPtr)tObj->DriverData;
GLuint mcs = i830->state.Tex[unit][I830_TEXREG_MCS];
const GLuint cube = CUBE_NEGX_ENABLE | CUBE_POSX_ENABLE
| CUBE_NEGY_ENABLE | CUBE_POSY_ENABLE
| CUBE_NEGZ_ENABLE | CUBE_POSZ_ENABLE;
GLuint face;
mcs &= ~TEXCOORDS_ARE_IN_TEXELUNITS;
mcs |= TEXCOORDS_ARE_NORMAL;
if ((mcs != i830->state.Tex[unit][I830_TEXREG_MCS])
|| (cube != i830->state.Tex[unit][I830_TEXREG_CUBE])) {
I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
i830->state.Tex[unit][I830_TEXREG_MCS] = mcs;
i830->state.Tex[unit][I830_TEXREG_CUBE] = cube;
}
/* Upload teximages (not pipelined)
/* memcmp was already disabled, but definitely won't work as the
* region might now change and that wouldn't be detected:
*/
if ( t->intel.base.dirty_images[0] || t->intel.base.dirty_images[1] ||
t->intel.base.dirty_images[2] || t->intel.base.dirty_images[3] ||
t->intel.base.dirty_images[4] || t->intel.base.dirty_images[5] ) {
i830SetTexImages( i830, tObj );
}
I830_STATECHANGE(i830, I830_UPLOAD_TEX(unit));
return GL_TRUE;
}
/* upload (per face) */
for (face = 0; face < 6; face++) {
if (t->intel.base.dirty_images[face]) {
if (!intelUploadTexImages( &i830->intel, &t->intel, face )) {
return GL_FALSE;
}
void
i830UpdateTextureState(struct intel_context *intel)
{
struct i830_context *i830 = i830_context(&intel->ctx);
GLboolean ok = GL_TRUE;
GLuint i;
for (i = 0; i < I830_TEX_UNITS && ok; i++) {
switch (intel->ctx.Texture.Unit[i]._ReallyEnabled) {
case TEXTURE_1D_BIT:
case TEXTURE_2D_BIT:
case TEXTURE_CUBE_BIT:
ok = i830_update_tex_unit(intel, i, TEXCOORDS_ARE_NORMAL);
break;
case TEXTURE_RECT_BIT:
ok = i830_update_tex_unit(intel, i, TEXCOORDS_ARE_IN_TEXELUNITS);
break;
case 0:
if (i830->state.active & I830_UPLOAD_TEX(i))
I830_ACTIVESTATE(i830, I830_UPLOAD_TEX(i), GL_FALSE);
break;
case TEXTURE_3D_BIT:
default:
ok = GL_FALSE;
break;
}
}
return GL_TRUE;
}
static GLboolean disable_tex( GLcontext *ctx, GLuint unit )
{
i830ContextPtr i830 = I830_CONTEXT(ctx);
/* This is happening too often. I need to conditionally send diffuse
* state to the card. Perhaps a diffuse dirty flag of some kind.
* Will need to change this logic if more than 2 texture units are
* used. We need to only do this up to the last unit enabled, or unit
* one if nothing is enabled.
*/
if ( i830->intel.CurrentTexObj[unit] != NULL ) {
/* The old texture is no longer bound to this texture unit.
* Mark it as such.
*/
i830->intel.CurrentTexObj[unit]->base.bound &= ~(1U << 0);
i830->intel.CurrentTexObj[unit] = NULL;
}
return GL_TRUE;
}
static GLboolean i830UpdateTexUnit( GLcontext *ctx, GLuint unit )
{
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
if (texUnit->_ReallyEnabled &&
INTEL_CONTEXT(ctx)->intelScreen->tex.size < 2048 * 1024)
return GL_FALSE;
switch(texUnit->_ReallyEnabled) {
case TEXTURE_1D_BIT:
case TEXTURE_2D_BIT:
return (enable_tex_common( ctx, unit ) &&
enable_tex_2d( ctx, unit ));
case TEXTURE_RECT_BIT:
return (enable_tex_common( ctx, unit ) &&
enable_tex_rect( ctx, unit ));
case TEXTURE_CUBE_BIT:
return (enable_tex_common( ctx, unit ) &&
enable_tex_cube( ctx, unit ));
case 0:
return disable_tex( ctx, unit );
default:
return GL_FALSE;
}
}
void i830UpdateTextureState( intelContextPtr intel )
{
i830ContextPtr i830 = I830_CONTEXT(intel);
GLcontext *ctx = &intel->ctx;
GLboolean ok;
if (0) fprintf(stderr, "%s\n", __FUNCTION__);
I830_ACTIVESTATE(i830, I830_UPLOAD_TEX_ALL, GL_FALSE);
ok = (i830UpdateTexUnit( ctx, 0 ) &&
i830UpdateTexUnit( ctx, 1 ) &&
i830UpdateTexUnit( ctx, 2 ) &&
i830UpdateTexUnit( ctx, 3 ));
FALLBACK( intel, I830_FALLBACK_TEXTURE, !ok );
FALLBACK(intel, I830_FALLBACK_TEXTURE, !ok);
if (ok)
i830EmitTextureBlend( i830 );
i830EmitTextureBlend(i830);
}

View File

@@ -28,14 +28,15 @@
#include "i830_context.h"
#include "i830_reg.h"
#include "intel_batchbuffer.h"
#include "intel_regions.h"
#include "tnl/t_context.h"
#include "tnl/t_vertex.h"
static GLboolean i830_check_vertex_size( intelContextPtr intel,
GLuint expected );
#define FILE_DEBUG_FLAG DEBUG_STATE
static GLboolean i830_check_vertex_size(struct intel_context *intel,
GLuint expected);
#define SZ_TO_HW(sz) ((sz-2)&0x3)
#define EMIT_SZ(sz) (EMIT_1F + (sz) - 1)
@@ -59,10 +60,11 @@ do { \
#define VRTX_TEX_SET_FMT(n, x) ((x)<<((n)*2))
#define TEXBIND_SET(n, x) ((x)<<((n)*4))
static void i830_render_start( intelContextPtr intel )
static void
i830_render_start(struct intel_context *intel)
{
GLcontext *ctx = &intel->ctx;
i830ContextPtr i830 = I830_CONTEXT(intel);
struct i830_context *i830 = i830_context(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
DECLARE_RENDERINPUTS(index_bitset);
@@ -70,7 +72,7 @@ static void i830_render_start( intelContextPtr intel )
GLuint v2 = _3DSTATE_VFT1_CMD;
GLuint mcsb1 = 0;
RENDERINPUTS_COPY( index_bitset, tnl->render_inputs_bitset );
RENDERINPUTS_COPY(index_bitset, tnl->render_inputs_bitset);
/* Important:
*/
@@ -80,196 +82,215 @@ static void i830_render_start( intelContextPtr intel )
/* EMIT_ATTR's must be in order as they tell t_vertex.c how to
* build up a hardware vertex.
*/
if (RENDERINPUTS_TEST_RANGE( index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX )) {
EMIT_ATTR( _TNL_ATTRIB_POS, EMIT_4F_VIEWPORT, VFT0_XYZW );
if (RENDERINPUTS_TEST_RANGE(index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX)) {
EMIT_ATTR(_TNL_ATTRIB_POS, EMIT_4F_VIEWPORT, VFT0_XYZW);
intel->coloroffset = 4;
}
else {
EMIT_ATTR( _TNL_ATTRIB_POS, EMIT_3F_VIEWPORT, VFT0_XYZ );
EMIT_ATTR(_TNL_ATTRIB_POS, EMIT_3F_VIEWPORT, VFT0_XYZ);
intel->coloroffset = 3;
}
if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_POINTSIZE )) {
EMIT_ATTR( _TNL_ATTRIB_POINTSIZE, EMIT_1F, VFT0_POINT_WIDTH );
if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_POINTSIZE)) {
EMIT_ATTR(_TNL_ATTRIB_POINTSIZE, EMIT_1F, VFT0_POINT_WIDTH);
}
EMIT_ATTR( _TNL_ATTRIB_COLOR0, EMIT_4UB_4F_BGRA, VFT0_DIFFUSE );
EMIT_ATTR(_TNL_ATTRIB_COLOR0, EMIT_4UB_4F_BGRA, VFT0_DIFFUSE);
intel->specoffset = 0;
if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_COLOR1 ) ||
RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_FOG )) {
if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_COLOR1 )) {
if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_COLOR1) ||
RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_FOG)) {
if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_COLOR1)) {
intel->specoffset = intel->coloroffset + 1;
EMIT_ATTR( _TNL_ATTRIB_COLOR1, EMIT_3UB_3F_BGR, VFT0_SPEC );
EMIT_ATTR(_TNL_ATTRIB_COLOR1, EMIT_3UB_3F_BGR, VFT0_SPEC);
}
else
EMIT_PAD( 3 );
EMIT_PAD(3);
if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_FOG ))
EMIT_ATTR( _TNL_ATTRIB_FOG, EMIT_1UB_1F, VFT0_SPEC );
if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_FOG))
EMIT_ATTR(_TNL_ATTRIB_FOG, EMIT_1UB_1F, VFT0_SPEC);
else
EMIT_PAD( 1 );
EMIT_PAD(1);
}
if (RENDERINPUTS_TEST_RANGE( index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX )) {
if (RENDERINPUTS_TEST_RANGE(index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX)) {
int i, count = 0;
for (i = 0; i < I830_TEX_UNITS; i++) {
if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_TEX(i) )) {
if (RENDERINPUTS_TEST(index_bitset, _TNL_ATTRIB_TEX(i))) {
GLuint sz = VB->TexCoordPtr[i]->size;
GLuint emit;
GLuint mcs = (i830->state.Tex[i][I830_TEXREG_MCS] &
GLuint mcs = (i830->state.Tex[i][I830_TEXREG_MCS] &
~TEXCOORDTYPE_MASK);
switch (sz) {
case 1:
case 2:
emit = EMIT_2F;
sz = 2;
mcs |= TEXCOORDTYPE_CARTESIAN;
break;
case 3:
emit = EMIT_3F;
sz = 3;
mcs |= TEXCOORDTYPE_VECTOR;
break;
case 4:
emit = EMIT_3F_XYW;
sz = 3;
mcs |= TEXCOORDTYPE_HOMOGENEOUS;
break;
default:
continue;
};
switch (sz) {
case 1:
case 2:
emit = EMIT_2F;
sz = 2;
mcs |= TEXCOORDTYPE_CARTESIAN;
break;
case 3:
emit = EMIT_3F;
sz = 3;
mcs |= TEXCOORDTYPE_VECTOR;
break;
case 4:
emit = EMIT_3F_XYW;
sz = 3;
mcs |= TEXCOORDTYPE_HOMOGENEOUS;
break;
default:
continue;
};
EMIT_ATTR( _TNL_ATTRIB_TEX0+i, emit, 0 );
v2 |= VRTX_TEX_SET_FMT(count, SZ_TO_HW(sz));
mcsb1 |= (count+8)<<(i*4);
if (mcs != i830->state.Tex[i][I830_TEXREG_MCS]) {
I830_STATECHANGE(i830, I830_UPLOAD_TEX(i));
i830->state.Tex[i][I830_TEXREG_MCS] = mcs;
}
EMIT_ATTR(_TNL_ATTRIB_TEX0 + i, emit, 0);
v2 |= VRTX_TEX_SET_FMT(count, SZ_TO_HW(sz));
mcsb1 |= (count + 8) << (i * 4);
count++;
}
if (mcs != i830->state.Tex[i][I830_TEXREG_MCS]) {
I830_STATECHANGE(i830, I830_UPLOAD_TEX(i));
i830->state.Tex[i][I830_TEXREG_MCS] = mcs;
}
count++;
}
}
v0 |= VFT0_TEX_COUNT(count);
}
/* Only need to change the vertex emit code if there has been a
* statechange to a new hardware vertex format:
*/
if (v0 != i830->state.Ctx[I830_CTXREG_VF] ||
v2 != i830->state.Ctx[I830_CTXREG_VF2] ||
mcsb1 != i830->state.Ctx[I830_CTXREG_MCSB1] ||
!RENDERINPUTS_EQUAL( index_bitset, i830->last_index_bitset )) {
I830_STATECHANGE( i830, I830_UPLOAD_CTX );
!RENDERINPUTS_EQUAL(index_bitset, i830->last_index_bitset)) {
int k;
I830_STATECHANGE(i830, I830_UPLOAD_CTX);
/* Must do this *after* statechange, so as not to affect
* buffered vertices reliant on the old state:
*/
intel->vertex_size =
_tnl_install_attrs( ctx,
intel->vertex_attrs,
intel->vertex_attr_count,
intel->ViewportMatrix.m, 0 );
intel->vertex_size =
_tnl_install_attrs(ctx,
intel->vertex_attrs,
intel->vertex_attr_count,
intel->ViewportMatrix.m, 0);
intel->vertex_size >>= 2;
i830->state.Ctx[I830_CTXREG_VF] = v0;
i830->state.Ctx[I830_CTXREG_VF2] = v2;
i830->state.Ctx[I830_CTXREG_MCSB1] = mcsb1;
RENDERINPUTS_COPY( i830->last_index_bitset, index_bitset );
RENDERINPUTS_COPY(i830->last_index_bitset, index_bitset);
assert(i830_check_vertex_size( intel, intel->vertex_size ));
k = i830_check_vertex_size(intel, intel->vertex_size);
assert(k);
}
}
static void i830_reduced_primitive_state( intelContextPtr intel,
GLenum rprim )
static void
i830_reduced_primitive_state(struct intel_context *intel, GLenum rprim)
{
i830ContextPtr i830 = I830_CONTEXT(intel);
GLuint st1 = i830->state.Stipple[I830_STPREG_ST1];
struct i830_context *i830 = i830_context(&intel->ctx);
GLuint st1 = i830->state.Stipple[I830_STPREG_ST1];
st1 &= ~ST1_ENABLE;
st1 &= ~ST1_ENABLE;
switch (rprim) {
case GL_TRIANGLES:
if (intel->ctx.Polygon.StippleFlag &&
intel->hw_stipple)
st1 |= ST1_ENABLE;
break;
case GL_LINES:
case GL_POINTS:
default:
break;
}
switch (rprim) {
case GL_TRIANGLES:
if (intel->ctx.Polygon.StippleFlag && intel->hw_stipple)
st1 |= ST1_ENABLE;
break;
case GL_LINES:
case GL_POINTS:
default:
break;
}
i830->intel.reduced_primitive = rprim;
i830->intel.reduced_primitive = rprim;
if (st1 != i830->state.Stipple[I830_STPREG_ST1]) {
I830_STATECHANGE(i830, I830_UPLOAD_STIPPLE);
i830->state.Stipple[I830_STPREG_ST1] = st1;
}
if (st1 != i830->state.Stipple[I830_STPREG_ST1]) {
INTEL_FIREVERTICES(intel);
I830_STATECHANGE(i830, I830_UPLOAD_STIPPLE);
i830->state.Stipple[I830_STPREG_ST1] = st1;
}
}
/* Pull apart the vertex format registers and figure out how large a
* vertex is supposed to be.
*/
static GLboolean i830_check_vertex_size( intelContextPtr intel,
GLuint expected )
static GLboolean
i830_check_vertex_size(struct intel_context *intel, GLuint expected)
{
i830ContextPtr i830 = I830_CONTEXT(intel);
struct i830_context *i830 = i830_context(&intel->ctx);
int vft0 = i830->current->Ctx[I830_CTXREG_VF];
int vft1 = i830->current->Ctx[I830_CTXREG_VF2];
int nrtex = (vft0 & VFT0_TEX_COUNT_MASK) >> VFT0_TEX_COUNT_SHIFT;
int i, sz = 0;
switch (vft0 & VFT0_XYZW_MASK) {
case VFT0_XY: sz = 2; break;
case VFT0_XYZ: sz = 3; break;
case VFT0_XYW: sz = 3; break;
case VFT0_XYZW: sz = 4; break;
default:
case VFT0_XY:
sz = 2;
break;
case VFT0_XYZ:
sz = 3;
break;
case VFT0_XYW:
sz = 3;
break;
case VFT0_XYZW:
sz = 4;
break;
default:
fprintf(stderr, "no xyzw specified\n");
return 0;
}
if (vft0 & VFT0_SPEC) sz++;
if (vft0 & VFT0_DIFFUSE) sz++;
if (vft0 & VFT0_DEPTH_OFFSET) sz++;
if (vft0 & VFT0_POINT_WIDTH) sz++;
for (i = 0 ; i < nrtex ; i++) {
if (vft0 & VFT0_SPEC)
sz++;
if (vft0 & VFT0_DIFFUSE)
sz++;
if (vft0 & VFT0_DEPTH_OFFSET)
sz++;
if (vft0 & VFT0_POINT_WIDTH)
sz++;
for (i = 0; i < nrtex; i++) {
switch (vft1 & VFT1_TEX0_MASK) {
case TEXCOORDFMT_2D: sz += 2; break;
case TEXCOORDFMT_3D: sz += 3; break;
case TEXCOORDFMT_4D: sz += 4; break;
case TEXCOORDFMT_1D: sz += 1; break;
case TEXCOORDFMT_2D:
sz += 2;
break;
case TEXCOORDFMT_3D:
sz += 3;
break;
case TEXCOORDFMT_4D:
sz += 4;
break;
case TEXCOORDFMT_1D:
sz += 1;
break;
}
vft1 >>= VFT1_TEX1_SHIFT;
}
if (sz != expected)
if (sz != expected)
fprintf(stderr, "vertex size mismatch %d/%d\n", sz, expected);
return sz == expected;
}
static void i830_emit_invarient_state( intelContextPtr intel )
static void
i830_emit_invarient_state(struct intel_context *intel)
{
BATCH_LOCALS;
BEGIN_BATCH( 200 );
OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(0));
OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(1));
OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(2));
OUT_BATCH(_3DSTATE_MAP_CUBE | MAP_UNIT(3));
BEGIN_BATCH(40, 0);
OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
OUT_BATCH(0);
@@ -282,37 +303,35 @@ static void i830_emit_invarient_state( intelContextPtr intel )
OUT_BATCH(_3DSTATE_FOG_MODE_CMD);
OUT_BATCH(FOGFUNC_ENABLE |
FOG_LINEAR_CONST |
FOGSRC_INDEX_Z |
ENABLE_FOG_DENSITY);
FOG_LINEAR_CONST | FOGSRC_INDEX_Z | ENABLE_FOG_DENSITY);
OUT_BATCH(0);
OUT_BATCH(0);
OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
MAP_UNIT(0) |
DISABLE_TEX_STREAM_BUMP |
ENABLE_TEX_STREAM_COORD_SET |
TEX_STREAM_COORD_SET(0) |
ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(0));
MAP_UNIT(0) |
DISABLE_TEX_STREAM_BUMP |
ENABLE_TEX_STREAM_COORD_SET |
TEX_STREAM_COORD_SET(0) |
ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(0));
OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
MAP_UNIT(1) |
DISABLE_TEX_STREAM_BUMP |
ENABLE_TEX_STREAM_COORD_SET |
TEX_STREAM_COORD_SET(1) |
ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(1));
MAP_UNIT(1) |
DISABLE_TEX_STREAM_BUMP |
ENABLE_TEX_STREAM_COORD_SET |
TEX_STREAM_COORD_SET(1) |
ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(1));
OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
MAP_UNIT(2) |
DISABLE_TEX_STREAM_BUMP |
ENABLE_TEX_STREAM_COORD_SET |
TEX_STREAM_COORD_SET(2) |
ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(2));
MAP_UNIT(2) |
DISABLE_TEX_STREAM_BUMP |
ENABLE_TEX_STREAM_COORD_SET |
TEX_STREAM_COORD_SET(2) |
ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(2));
OUT_BATCH(_3DSTATE_MAP_TEX_STREAM_CMD |
MAP_UNIT(3) |
DISABLE_TEX_STREAM_BUMP |
ENABLE_TEX_STREAM_COORD_SET |
TEX_STREAM_COORD_SET(3) |
ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(3));
MAP_UNIT(3) |
DISABLE_TEX_STREAM_BUMP |
ENABLE_TEX_STREAM_COORD_SET |
TEX_STREAM_COORD_SET(3) |
ENABLE_TEX_STREAM_MAP_IDX | TEX_STREAM_MAP_IDX(3));
OUT_BATCH(_3DSTATE_MAP_COORD_TRANSFORM);
OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(0));
@@ -324,21 +343,13 @@ static void i830_emit_invarient_state( intelContextPtr intel )
OUT_BATCH(DISABLE_TEX_TRANSFORM | TEXTURE_SET(3));
OUT_BATCH(_3DSTATE_RASTER_RULES_CMD |
ENABLE_POINT_RASTER_RULE |
OGL_POINT_RASTER_RULE |
ENABLE_LINE_STRIP_PROVOKE_VRTX |
ENABLE_TRI_FAN_PROVOKE_VRTX |
ENABLE_TRI_STRIP_PROVOKE_VRTX |
LINE_STRIP_PROVOKE_VRTX(1) |
TRI_FAN_PROVOKE_VRTX(2) |
TRI_STRIP_PROVOKE_VRTX(2));
OUT_BATCH(_3DSTATE_SCISSOR_ENABLE_CMD |
DISABLE_SCISSOR_RECT);
OUT_BATCH(_3DSTATE_SCISSOR_RECT_0_CMD);
OUT_BATCH(0);
OUT_BATCH(0);
ENABLE_POINT_RASTER_RULE |
OGL_POINT_RASTER_RULE |
ENABLE_LINE_STRIP_PROVOKE_VRTX |
ENABLE_TRI_FAN_PROVOKE_VRTX |
ENABLE_TRI_STRIP_PROVOKE_VRTX |
LINE_STRIP_PROVOKE_VRTX(1) |
TRI_FAN_PROVOKE_VRTX(2) | TRI_STRIP_PROVOKE_VRTX(2));
OUT_BATCH(_3DSTATE_VERTEX_TRANSFORM);
OUT_BATCH(DISABLE_VIEWPORT_TRANSFORM | DISABLE_PERSPECTIVE_DIVIDE);
@@ -349,7 +360,7 @@ static void i830_emit_invarient_state( intelContextPtr intel )
OUT_BATCH(_3DSTATE_COLOR_FACTOR_CMD);
OUT_BATCH(0x80808080); /* .5 required in alpha for GL_DOT3_RGBA_EXT */
OUT_BATCH(0x80808080); /* .5 required in alpha for GL_DOT3_RGBA_EXT */
ADVANCE_BATCH();
}
@@ -358,33 +369,39 @@ static void i830_emit_invarient_state( intelContextPtr intel )
#define emit( intel, state, size ) \
do { \
int k; \
BEGIN_BATCH( size / sizeof(GLuint)); \
for (k = 0 ; k < size / sizeof(GLuint) ; k++) \
BEGIN_BATCH(size / sizeof(GLuint), 0); \
for (k = 0 ; k < size / sizeof(GLuint) ; k++) { \
if (0) _mesa_printf(" 0x%08x\n", state[k]); \
OUT_BATCH(state[k]); \
} \
ADVANCE_BATCH(); \
} while (0);
} while (0)
static GLuint get_state_size( struct i830_hw_state *state )
static GLuint
get_state_size(struct i830_hw_state *state)
{
GLuint dirty = state->active & ~state->emitted;
GLuint sz = 0;
GLuint i;
if (dirty & I830_UPLOAD_CTX)
if (dirty & I830_UPLOAD_INVARIENT)
sz += 40 * sizeof(int);
if (dirty & I830_UPLOAD_CTX)
sz += sizeof(state->Ctx);
if (dirty & I830_UPLOAD_BUFFERS)
if (dirty & I830_UPLOAD_BUFFERS)
sz += sizeof(state->Buffer);
if (dirty & I830_UPLOAD_STIPPLE)
if (dirty & I830_UPLOAD_STIPPLE)
sz += sizeof(state->Stipple);
for (i = 0; i < I830_TEX_UNITS; i++) {
if ((dirty & I830_UPLOAD_TEX(i)))
sz += sizeof(state->Tex[i]);
if ((dirty & I830_UPLOAD_TEX(i)))
sz += sizeof(state->Tex[i]);
if (dirty & I830_UPLOAD_TEXBLEND(i))
sz += state->TexBlendWordsUsed[i] * 4;
if (dirty & I830_UPLOAD_TEXBLEND(i))
sz += state->TexBlendWordsUsed[i] * 4;
}
return sz;
@@ -393,135 +410,197 @@ static GLuint get_state_size( struct i830_hw_state *state )
/* Push the state into the sarea and/or texture memory.
*/
static void i830_emit_state( intelContextPtr intel )
static void
i830_emit_state(struct intel_context *intel)
{
i830ContextPtr i830 = I830_CONTEXT(intel);
struct i830_context *i830 = i830_context(&intel->ctx);
struct i830_hw_state *state = i830->current;
int i;
GLuint dirty = state->active & ~state->emitted;
GLuint counter = intel->batch.counter;
GLuint dirty;
BATCH_LOCALS;
if (intel->batch.space < get_state_size(state)) {
intelFlushBatch(intel, GL_TRUE);
dirty = state->active & ~state->emitted;
counter = intel->batch.counter;
/* We don't hold the lock at this point, so want to make sure that
* there won't be a buffer wrap.
*
* It might be better to talk about explicit places where
* scheduling is allowed, rather than assume that it is whenever a
* batchbuffer fills up.
*/
intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
/* Do this here as we may have flushed the batchbuffer above,
* causing more state to be dirty!
*/
dirty = state->active & ~state->emitted;
if (dirty & I830_UPLOAD_INVARIENT) {
DBG("I830_UPLOAD_INVARIENT:\n");
i830_emit_invarient_state(intel);
}
if (dirty & I830_UPLOAD_CTX) {
if (VERBOSE) fprintf(stderr, "I830_UPLOAD_CTX:\n");
emit( i830, state->Ctx, sizeof(state->Ctx) );
DBG("I830_UPLOAD_CTX:\n");
emit(i830, state->Ctx, sizeof(state->Ctx));
}
if (dirty & I830_UPLOAD_BUFFERS) {
if (VERBOSE) fprintf(stderr, "I830_UPLOAD_BUFFERS:\n");
emit( i830, state->Buffer, sizeof(state->Buffer) );
DBG("I830_UPLOAD_BUFFERS:\n");
BEGIN_BATCH(I830_DEST_SETUP_SIZE + 2, 0);
OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR0]);
OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR1]);
OUT_RELOC(state->draw_region->buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, 0);
if (state->depth_region) {
OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR0]);
OUT_BATCH(state->Buffer[I830_DESTREG_DBUFADDR1]);
OUT_RELOC(state->depth_region->buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, 0);
}
OUT_BATCH(state->Buffer[I830_DESTREG_DV0]);
OUT_BATCH(state->Buffer[I830_DESTREG_DV1]);
OUT_BATCH(state->Buffer[I830_DESTREG_SENABLE]);
OUT_BATCH(state->Buffer[I830_DESTREG_SR0]);
OUT_BATCH(state->Buffer[I830_DESTREG_SR1]);
OUT_BATCH(state->Buffer[I830_DESTREG_SR2]);
ADVANCE_BATCH();
}
if (dirty & I830_UPLOAD_STIPPLE) {
if (VERBOSE) fprintf(stderr, "I830_UPLOAD_STIPPLE:\n");
emit( i830, state->Stipple, sizeof(state->Stipple) );
DBG("I830_UPLOAD_STIPPLE:\n");
emit(i830, state->Stipple, sizeof(state->Stipple));
}
for (i = 0; i < I830_TEX_UNITS; i++) {
if ((dirty & I830_UPLOAD_TEX(i))) {
if (VERBOSE) fprintf(stderr, "I830_UPLOAD_TEX(%d):\n", i);
emit( i830, state->Tex[i], sizeof(state->Tex[i]));
}
if ((dirty & I830_UPLOAD_TEX(i))) {
DBG("I830_UPLOAD_TEX(%d):\n", i);
BEGIN_BATCH(I830_TEX_SETUP_SIZE + 1, 0);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0LI]);
if (state->tex_buffer[i]) {
OUT_RELOC(state->tex_buffer[i],
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
DRM_BO_MASK_MEM | DRM_BO_FLAG_READ,
state->tex_offset[i] | TM0S0_USE_FENCE);
}
else {
assert(i == 0);
assert(state == &i830->meta);
OUT_BATCH(0);
}
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S1]);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S2]);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S3]);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0S4]);
OUT_BATCH(state->Tex[i][I830_TEXREG_MCS]);
OUT_BATCH(state->Tex[i][I830_TEXREG_CUBE]);
}
if (dirty & I830_UPLOAD_TEXBLEND(i)) {
if (VERBOSE) fprintf(stderr, "I830_UPLOAD_TEXBLEND(%d):\n", i);
emit( i830, state->TexBlend[i],
state->TexBlendWordsUsed[i] * 4 );
DBG("I830_UPLOAD_TEXBLEND(%d): %d words\n", i,
state->TexBlendWordsUsed[i]);
emit(i830, state->TexBlend[i], state->TexBlendWordsUsed[i] * 4);
}
}
state->emitted |= dirty;
intel->batch.last_emit_state = counter;
assert(counter == intel->batch.counter);
}
static void i830_destroy_context( intelContextPtr intel )
static void
i830_destroy_context(struct intel_context *intel)
{
_tnl_free_vertices(&intel->ctx);
}
static void
i830_set_color_region(intelContextPtr intel, const intelRegion *region)
i830_set_draw_region(struct intel_context *intel,
struct intel_region *draw_region,
struct intel_region *depth_region)
{
i830ContextPtr i830 = I830_CONTEXT(intel);
I830_STATECHANGE( i830, I830_UPLOAD_BUFFERS );
struct i830_context *i830 = i830_context(&intel->ctx);
intel_region_release(intel->intelScreen, &i830->state.draw_region);
intel_region_release(intel->intelScreen, &i830->state.depth_region);
intel_region_reference(&i830->state.draw_region, draw_region);
intel_region_reference(&i830->state.depth_region, depth_region);
/* XXX FBO: Need code from i915_set_draw_region() */
I830_STATECHANGE(i830, I830_UPLOAD_BUFFERS);
I830_STATECHANGE(i830, I830_UPLOAD_BUFFERS);
i830->state.Buffer[I830_DESTREG_CBUFADDR1] =
(BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(region->pitch) | BUF_3D_USE_FENCE);
i830->state.Buffer[I830_DESTREG_CBUFADDR2] = region->offset;
}
static void
i830_set_z_region(intelContextPtr intel, const intelRegion *region)
{
i830ContextPtr i830 = I830_CONTEXT(intel);
I830_STATECHANGE( i830, I830_UPLOAD_BUFFERS );
(BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(draw_region->pitch) |
BUF_3D_USE_FENCE);
i830->state.Buffer[I830_DESTREG_DBUFADDR1] =
(BUF_3D_ID_DEPTH | BUF_3D_PITCH(region->pitch) | BUF_3D_USE_FENCE);
i830->state.Buffer[I830_DESTREG_DBUFADDR2] = region->offset;
(BUF_3D_ID_DEPTH | BUF_3D_PITCH(depth_region->pitch) |
BUF_3D_USE_FENCE);
}
#if 0
static void
i830_update_color_z_regions(intelContextPtr intel,
const intelRegion *colorRegion,
const intelRegion *depthRegion)
const intelRegion * colorRegion,
const intelRegion * depthRegion)
{
i830ContextPtr i830 = I830_CONTEXT(intel);
i830->state.Buffer[I830_DESTREG_CBUFADDR1] =
(BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(colorRegion->pitch) | BUF_3D_USE_FENCE);
(BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(colorRegion->pitch) |
BUF_3D_USE_FENCE);
i830->state.Buffer[I830_DESTREG_CBUFADDR2] = colorRegion->offset;
i830->state.Buffer[I830_DESTREG_DBUFADDR1] =
(BUF_3D_ID_DEPTH | BUF_3D_PITCH(depthRegion->pitch) | BUF_3D_USE_FENCE);
i830->state.Buffer[I830_DESTREG_DBUFADDR2] = depthRegion->offset;
}
#endif
/* This isn't really handled at the moment.
*/
static void i830_lost_hardware( intelContextPtr intel )
static void
i830_lost_hardware(struct intel_context *intel)
{
I830_CONTEXT(intel)->state.emitted = 0;
struct i830_context *i830 = i830_context(&intel->ctx);
i830->state.emitted = 0;
}
static void i830_emit_flush( intelContextPtr intel )
static GLuint
i830_flush_cmd(void)
{
BATCH_LOCALS;
BEGIN_BATCH(2);
OUT_BATCH( MI_FLUSH | FLUSH_MAP_CACHE );
OUT_BATCH( 0 );
ADVANCE_BATCH();
return MI_FLUSH | FLUSH_MAP_CACHE;
}
void i830InitVtbl( i830ContextPtr i830 )
static void
i830_assert_not_dirty( struct intel_context *intel )
{
struct i830_context *i830 = i830_context(&intel->ctx);
struct i830_hw_state *state = i830->current;
GLuint dirty = state->active & ~state->emitted;
assert(!dirty);
}
void
i830InitVtbl(struct i830_context *i830)
{
i830->intel.vtbl.alloc_tex_obj = i830AllocTexObj;
i830->intel.vtbl.check_vertex_size = i830_check_vertex_size;
i830->intel.vtbl.clear_with_tris = i830ClearWithTris;
i830->intel.vtbl.rotate_window = i830RotateWindow;
i830->intel.vtbl.destroy = i830_destroy_context;
i830->intel.vtbl.emit_invarient_state = i830_emit_invarient_state;
i830->intel.vtbl.emit_state = i830_emit_state;
i830->intel.vtbl.lost_hardware = i830_lost_hardware;
i830->intel.vtbl.reduced_primitive_state = i830_reduced_primitive_state;
i830->intel.vtbl.set_color_region = i830_set_color_region;
i830->intel.vtbl.set_z_region = i830_set_z_region;
i830->intel.vtbl.update_color_z_regions = i830_update_color_z_regions;
i830->intel.vtbl.set_draw_region = i830_set_draw_region;
i830->intel.vtbl.update_texture_state = i830UpdateTextureState;
i830->intel.vtbl.emit_flush = i830_emit_flush;
i830->intel.vtbl.flush_cmd = i830_flush_cmd;
i830->intel.vtbl.render_start = i830_render_start;
i830->intel.vtbl.assert_not_dirty = i830_assert_not_dirty;
}

View File

@@ -41,77 +41,88 @@
#include "utils.h"
#include "i915_reg.h"
#include "intel_regions.h"
#include "intel_batchbuffer.h"
/***************************************
* Mesa's Driver Functions
***************************************/
static const struct dri_extension i915_extensions[] =
{
{ "GL_ARB_depth_texture", NULL },
{ "GL_ARB_fragment_program", NULL },
{ "GL_ARB_shadow", NULL },
{ "GL_EXT_shadow_funcs", NULL },
/* ARB extn won't work if not enabled */
{ "GL_SGIX_depth_texture", NULL },
{ NULL, NULL }
static const struct dri_extension i915_extensions[] = {
{"GL_ARB_depth_texture", NULL},
{"GL_ARB_fragment_program", NULL},
{"GL_ARB_shadow", NULL},
{"GL_ARB_texture_env_crossbar", NULL},
{"GL_ARB_texture_non_power_of_two", NULL},
{"GL_EXT_shadow_funcs", NULL},
/* ARB extn won't work if not enabled */
{"GL_SGIX_depth_texture", NULL},
{NULL, NULL}
};
/* Override intel default.
*/
static void i915InvalidateState( GLcontext *ctx, GLuint new_state )
static void
i915InvalidateState(GLcontext * ctx, GLuint new_state)
{
_swrast_InvalidateState( ctx, new_state );
_swsetup_InvalidateState( ctx, new_state );
_ac_InvalidateState( ctx, new_state );
_tnl_InvalidateState( ctx, new_state );
_tnl_invalidate_vertex_state( ctx, new_state );
INTEL_CONTEXT(ctx)->NewGLState |= new_state;
_swrast_InvalidateState(ctx, new_state);
_swsetup_InvalidateState(ctx, new_state);
_ac_InvalidateState(ctx, new_state);
_tnl_InvalidateState(ctx, new_state);
_tnl_invalidate_vertex_state(ctx, new_state);
intel_context(ctx)->NewGLState |= new_state;
/* Todo: gather state values under which tracked parameters become
* invalidated, add callbacks for things like
* ProgramLocalParameters, etc.
*/
{
struct i915_fragment_program *p =
(struct i915_fragment_program *)ctx->FragmentProgram._Current;
struct i915_fragment_program *p =
(struct i915_fragment_program *) ctx->FragmentProgram._Current;
if (p && p->nr_params)
p->params_uptodate = 0;
p->params_uptodate = 0;
}
if (new_state & (_NEW_FOG|_NEW_HINT|_NEW_PROGRAM))
if (new_state & (_NEW_FOG | _NEW_HINT | _NEW_PROGRAM))
i915_update_fog(ctx);
}
static void i915InitDriverFunctions( struct dd_function_table *functions )
static void
i915InitDriverFunctions(struct dd_function_table *functions)
{
intelInitDriverFunctions( functions );
i915InitStateFunctions( functions );
i915InitTextureFuncs( functions );
i915InitFragProgFuncs( functions );
intelInitDriverFunctions(functions);
i915InitStateFunctions(functions);
i915InitTextureFuncs(functions);
i915InitFragProgFuncs(functions);
functions->UpdateState = i915InvalidateState;
}
GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
__DRIcontextPrivate *driContextPriv,
void *sharedContextPrivate)
GLboolean
i915CreateContext(const __GLcontextModes * mesaVis,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate)
{
struct dd_function_table functions;
i915ContextPtr i915 = (i915ContextPtr) CALLOC_STRUCT(i915_context);
intelContextPtr intel = &i915->intel;
struct i915_context *i915 =
(struct i915_context *) CALLOC_STRUCT(i915_context);
struct intel_context *intel = &i915->intel;
GLcontext *ctx = &intel->ctx;
GLuint i;
if (!i915) return GL_FALSE;
if (!i915)
return GL_FALSE;
i915InitVtbl( i915 );
_mesa_printf("\ntexmem-0-3 branch\n\n");
i915InitDriverFunctions( &functions );
i915InitVtbl(i915);
i915InitMetaFuncs(i915);
if (!intelInitContext( intel, mesaVis, driContextPriv,
sharedContextPrivate, &functions )) {
i915InitDriverFunctions(&functions);
if (!intelInitContext(intel, mesaVis, driContextPriv,
sharedContextPrivate, &functions)) {
FREE(i915);
return GL_FALSE;
}
@@ -120,63 +131,44 @@ GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
ctx->Const.MaxTextureImageUnits = I915_TEX_UNITS;
ctx->Const.MaxTextureCoordUnits = I915_TEX_UNITS;
intel->nr_heaps = 1;
intel->texture_heaps[0] =
driCreateTextureHeap( 0, intel,
intel->intelScreen->tex.size,
12,
I830_NR_TEX_REGIONS,
intel->sarea->texList,
(unsigned *) & intel->sarea->texAge,
& intel->swapped,
sizeof( struct i915_texture_object ),
(destroy_texture_object_t *)intelDestroyTexObj );
/* FIXME: driCalculateMaxTextureLevels assumes that mipmaps are
* tightly packed, but they're not in Intel graphics
* hardware.
/* Advertise the full hardware capabilities. The new memory
* manager should cope much better with overload situations:
*/
ctx->Const.MaxTextureLevels = 12;
ctx->Const.Max3DTextureLevels = 9;
ctx->Const.MaxCubeTextureLevels = 12;
ctx->Const.MaxTextureRectSize = (1 << 11);
ctx->Const.MaxTextureUnits = I915_TEX_UNITS;
i = driQueryOptioni( &intel->intelScreen->optionCache, "allow_large_textures");
driCalculateMaxTextureLevels( intel->texture_heaps,
intel->nr_heaps,
&intel->ctx.Const,
4,
11, /* max 2D texture size is 2048x2048 */
8, /* 3D texture */
11, /* cube texture. */
11, /* rect texture */
12,
GL_FALSE,
i );
/* GL_ARB_fragment_program limits - don't think Mesa actually
* validates programs against these, and in any case one ARB
* instruction can translate to more than one HW instruction, so
* we'll still have to check and fallback each time.
*/
ctx->Const.FragmentProgram.MaxNativeTemps = I915_MAX_TEMPORARY;
ctx->Const.FragmentProgram.MaxNativeAttribs = 11; /* 8 tex, 2 color, fog */
ctx->Const.FragmentProgram.MaxNativeAttribs = 11; /* 8 tex, 2 color, fog */
ctx->Const.FragmentProgram.MaxNativeParameters = I915_MAX_CONSTANT;
ctx->Const.FragmentProgram.MaxNativeAluInstructions = I915_MAX_ALU_INSN;
ctx->Const.FragmentProgram.MaxNativeTexInstructions = I915_MAX_TEX_INSN;
ctx->Const.FragmentProgram.MaxNativeInstructions = (I915_MAX_ALU_INSN +
I915_MAX_TEX_INSN);
ctx->Const.FragmentProgram.MaxNativeTexIndirections = I915_MAX_TEX_INDIRECT;
ctx->Const.FragmentProgram.MaxNativeInstructions = (I915_MAX_ALU_INSN +
I915_MAX_TEX_INSN);
ctx->Const.FragmentProgram.MaxNativeTexIndirections =
I915_MAX_TEX_INDIRECT;
ctx->Const.FragmentProgram.MaxNativeAddressRegs = 0; /* I don't think we have one */
ctx->_MaintainTexEnvProgram = 1;
ctx->_UseTexEnvProgram = 1;
driInitExtensions( ctx, i915_extensions, GL_FALSE );
driInitExtensions(ctx, i915_extensions, GL_FALSE);
_tnl_init_vertices( ctx, ctx->Const.MaxArrayLockSize + 12,
36 * sizeof(GLfloat) );
_tnl_init_vertices(ctx, ctx->Const.MaxArrayLockSize + 12,
36 * sizeof(GLfloat));
intel->verts = TNL_CONTEXT(ctx)->clipspace.vertex_buf;
i915InitState( i915 );
i915InitState(i915);
return GL_TRUE;
}

View File

@@ -45,6 +45,8 @@
#define I915_UPLOAD_PROGRAM 0x8
#define I915_UPLOAD_CONSTANTS 0x10
#define I915_UPLOAD_FOG 0x20
#define I915_UPLOAD_INVARIENT 0x40
#define I915_UPLOAD_DEFAULTS 0x80
#define I915_UPLOAD_TEX(i) (0x00010000<<(i))
#define I915_UPLOAD_TEX_ALL (0x00ff0000)
#define I915_UPLOAD_TEX_0_SHIFT 16
@@ -54,10 +56,8 @@
*/
#define I915_DESTREG_CBUFADDR0 0
#define I915_DESTREG_CBUFADDR1 1
#define I915_DESTREG_CBUFADDR2 2
#define I915_DESTREG_DBUFADDR0 3
#define I915_DESTREG_DBUFADDR1 4
#define I915_DESTREG_DBUFADDR2 5
#define I915_DESTREG_DV0 6
#define I915_DESTREG_DV1 7
#define I915_DESTREG_SENABLE 8
@@ -88,7 +88,6 @@
#define I915_STPREG_ST1 1
#define I915_STP_SETUP_SIZE 2
#define I915_TEXREG_MS2 0
#define I915_TEXREG_MS3 1
#define I915_TEXREG_MS4 2
#define I915_TEXREG_SS2 3
@@ -96,6 +95,15 @@
#define I915_TEXREG_SS4 5
#define I915_TEX_SETUP_SIZE 6
#define I915_DEFREG_C0 0
#define I915_DEFREG_C1 1
#define I915_DEFREG_S0 2
#define I915_DEFREG_S1 3
#define I915_DEFREG_Z0 4
#define I915_DEFREG_Z1 5
#define I915_DEF_SETUP_SIZE 6
#define I915_MAX_CONSTANT 32
#define I915_CONSTANT_SIZE (2+(4*I915_MAX_CONSTANT))
@@ -106,13 +114,14 @@
/* Hardware version of a parsed fragment program. "Derived" from the
* mesa fragment_program struct.
*/
struct i915_fragment_program {
struct i915_fragment_program
{
struct gl_fragment_program FragProg;
GLboolean translated;
GLboolean params_uptodate;
GLboolean on_hardware;
GLboolean error; /* If program is malformed for any reason. */
GLboolean error; /* If program is malformed for any reason. */
GLuint nr_tex_indirect;
GLuint nr_tex_insn;
@@ -134,22 +143,22 @@ struct i915_fragment_program {
GLuint constant_flags[I915_MAX_CONSTANT];
GLuint nr_constants;
GLuint *csr; /* Cursor, points into program.
*/
GLuint *csr; /* Cursor, points into program.
*/
GLuint *decl; /* Cursor, points into declarations.
*/
GLuint decl_s; /* flags for which s regs need to be decl'd */
GLuint decl_t; /* flags for which t regs need to be decl'd */
GLuint *decl; /* Cursor, points into declarations.
*/
GLuint temp_flag; /* Tracks temporary regs which are in
* use.
*/
GLuint decl_s; /* flags for which s regs need to be decl'd */
GLuint decl_t; /* flags for which t regs need to be decl'd */
GLuint utemp_flag; /* Tracks TYPE_U temporary regs which are in
* use.
*/
GLuint temp_flag; /* Tracks temporary regs which are in
* use.
*/
GLuint utemp_flag; /* Tracks TYPE_U temporary regs which are in
* use.
*/
@@ -158,26 +167,25 @@ struct i915_fragment_program {
GLuint wpos_tex;
GLboolean depth_written;
struct {
GLuint reg; /* Hardware constant idx */
const GLfloat *values; /* Pointer to tracked values */
struct
{
GLuint reg; /* Hardware constant idx */
const GLfloat *values; /* Pointer to tracked values */
} param[I915_MAX_CONSTANT];
GLuint nr_params;
/* Helpers for i915_texprog.c:
*/
GLuint src_texture; /* Reg containing sampled texture color,
* else UREG_BAD.
*/
GLuint src_texture; /* Reg containing sampled texture color,
* else UREG_BAD.
*/
GLuint src_previous; /* Reg containing color from previous
* stage. May need to be decl'd.
*/
GLuint src_previous; /* Reg containing color from previous
* stage. May need to be decl'd.
*/
GLuint last_tex_stage; /* Number of last enabled texture unit */
GLuint last_tex_stage; /* Number of last enabled texture unit */
struct vertex_buffer *VB;
};
@@ -187,41 +195,53 @@ struct i915_fragment_program {
struct i915_texture_object
{
struct intel_texture_object intel;
GLenum lastTarget;
GLboolean refs_border_color;
GLuint Setup[I915_TEX_SETUP_SIZE];
};
#define I915_TEX_UNITS 8
struct i915_hw_state {
struct i915_hw_state
{
GLuint Ctx[I915_CTX_SETUP_SIZE];
GLuint Buffer[I915_DEST_SETUP_SIZE];
GLuint Stipple[I915_STP_SETUP_SIZE];
GLuint Fog[I915_FOG_SETUP_SIZE];
GLuint Defaults[I915_DEF_SETUP_SIZE];
GLuint Tex[I915_TEX_UNITS][I915_TEX_SETUP_SIZE];
GLuint Constant[I915_CONSTANT_SIZE];
GLuint ConstantSize;
GLuint Program[I915_PROGRAM_SIZE];
GLuint ProgramSize;
GLuint active; /* I915_UPLOAD_* */
GLuint emitted; /* I915_UPLOAD_* */
/* Region pointers for relocation:
*/
struct intel_region *draw_region;
struct intel_region *depth_region;
/* struct intel_region *tex_region[I915_TEX_UNITS]; */
/* Regions aren't actually that appropriate here as the memory may
* be from a PBO or FBO. Just use the buffer id. Will have to do
* this for draw and depth for FBO's...
*/
struct _DriBufferObject *tex_buffer[I915_TEX_UNITS];
GLuint tex_offset[I915_TEX_UNITS];
GLuint active; /* I915_UPLOAD_* */
GLuint emitted; /* I915_UPLOAD_* */
};
#define I915_FOG_PIXEL 2
#define I915_FOG_VERTEX 1
#define I915_FOG_NONE 0
struct i915_context
struct i915_context
{
struct intel_context intel;
GLuint last_ReallyEnabled;
GLuint vertex_fog;
GLuint lodbias_ss2[MAX_TEXTURE_UNITS];
struct i915_fragment_program tex_program;
struct i915_fragment_program *current_program;
@@ -230,24 +250,14 @@ struct i915_context
};
typedef struct i915_context *i915ContextPtr;
typedef struct i915_texture_object *i915TextureObjectPtr;
#define I915_CONTEXT(ctx) ((i915ContextPtr)(ctx))
#define I915_STATECHANGE(i915, flag) \
do { \
if (0) fprintf(stderr, "I915_STATECHANGE %x in %s\n", flag, __FUNCTION__); \
INTEL_FIREVERTICES( &(i915)->intel ); \
(i915)->state.emitted &= ~(flag); \
} while (0)
#define I915_ACTIVESTATE(i915, flag, mode) \
do { \
if (0) fprintf(stderr, "I915_ACTIVESTATE %x %d in %s\n", \
flag, mode, __FUNCTION__); \
INTEL_FIREVERTICES( &(i915)->intel ); \
if (mode) \
(i915)->state.active |= (flag); \
@@ -259,7 +269,13 @@ do { \
/*======================================================================
* i915_vtbl.c
*/
extern void i915InitVtbl( i915ContextPtr i915 );
extern void i915InitVtbl(struct i915_context *i915);
extern void
i915_state_draw_region(struct intel_context *intel,
struct i915_hw_state *state,
struct intel_region *color_region,
struct intel_region *depth_region);
@@ -288,70 +304,64 @@ do { \
/*======================================================================
* i915_context.c
*/
extern GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
__DRIcontextPrivate *driContextPriv,
void *sharedContextPrivate);
extern GLboolean i915CreateContext(const __GLcontextModes * mesaVis,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate);
/*======================================================================
* i915_texprog.c
*/
extern void i915ValidateTextureProgram( i915ContextPtr i915 );
extern void i915ValidateTextureProgram(struct i915_context *i915);
/*======================================================================
* i915_debug.c
*/
extern void i915_disassemble_program( const GLuint *program, GLuint sz );
extern void i915_print_ureg( const char *msg, GLuint ureg );
extern void i915_disassemble_program(const GLuint * program, GLuint sz);
extern void i915_print_ureg(const char *msg, GLuint ureg);
/*======================================================================
* i915_state.c
*/
extern void i915InitStateFunctions( struct dd_function_table *functions );
extern void i915InitState( i915ContextPtr i915 );
extern void i915_update_fog(GLcontext *ctxx);
extern void i915InitStateFunctions(struct dd_function_table *functions);
extern void i915InitState(struct i915_context *i915);
extern void i915_update_fog(GLcontext * ctx);
/*======================================================================
* i915_tex.c
*/
extern void i915UpdateTextureState( intelContextPtr intel );
extern void i915InitTextureFuncs( struct dd_function_table *functions );
extern intelTextureObjectPtr i915AllocTexObj( struct gl_texture_object *texObj );
extern void i915UpdateTextureState(struct intel_context *intel);
extern void i915InitTextureFuncs(struct dd_function_table *functions);
/*======================================================================
* i915_metaops.c
*/
extern GLboolean
i915TryTextureReadPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack,
GLvoid *pixels );
void i915InitMetaFuncs(struct i915_context *i915);
extern GLboolean
i915TryTextureDrawPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid *pixels );
extern void
i915ClearWithTris( intelContextPtr intel, GLbitfield mask,
GLboolean all, GLint cx, GLint cy, GLint cw, GLint ch);
extern void
i915RotateWindow(intelContextPtr intel, __DRIdrawablePrivate *dPriv,
GLuint srcBuf);
/*======================================================================
* i915_fragprog.c
*/
extern void i915ValidateFragmentProgram( i915ContextPtr i915 );
extern void i915InitFragProgFuncs( struct dd_function_table *functions );
#endif
extern void i915ValidateFragmentProgram(struct i915_context *i915);
extern void i915InitFragProgFuncs(struct dd_function_table *functions);
/*======================================================================
* Inline conversion functions. These are better-typed than the
* macros used previously:
*/
static INLINE struct i915_context *
i915_context(GLcontext * ctx)
{
return (struct i915_context *) ctx;
}
#define I915_CONTEXT(ctx) i915_context(ctx)
#endif

View File

@@ -67,27 +67,27 @@ static const char *opcodes[0x20] = {
static const int args[0x20] = {
0, /* 0 nop */
2, /* 1 add */
1, /* 2 mov */
2, /* 3 m ul */
3, /* 4 mad */
3, /* 5 dp2add */
2, /* 6 dp3 */
2, /* 7 dp4 */
1, /* 8 frc */
1, /* 9 rcp */
1, /* a rsq */
1, /* b exp */
1, /* c log */
3, /* d cmp */
2, /* e min */
2, /* f max */
1, /* 10 flr */
1, /* 11 mod */
1, /* 12 trc */
2, /* 13 sge */
2, /* 14 slt */
0, /* 0 nop */
2, /* 1 add */
1, /* 2 mov */
2, /* 3 m ul */
3, /* 4 mad */
3, /* 5 dp2add */
2, /* 6 dp3 */
2, /* 7 dp4 */
1, /* 8 frc */
1, /* 9 rcp */
1, /* a rsq */
1, /* b exp */
1, /* c log */
3, /* d cmp */
2, /* e min */
2, /* f max */
1, /* 10 flr */
1, /* 11 mod */
1, /* 12 trc */
2, /* 13 sge */
2, /* 14 slt */
1,
1,
1,
@@ -113,26 +113,35 @@ static const char *regname[0x8] = {
"UNKNOWN",
};
static void print_reg_type_nr( GLuint type, GLuint nr )
static void
print_reg_type_nr(GLuint type, GLuint nr)
{
switch (type) {
case REG_TYPE_T:
switch (nr) {
case T_DIFFUSE: fprintf(stderr, "T_DIFFUSE"); return;
case T_SPECULAR: fprintf(stderr, "T_SPECULAR"); return;
case T_FOG_W: fprintf(stderr, "T_FOG_W"); return;
default: fprintf(stderr, "T_TEX%d", nr); return;
case T_DIFFUSE:
fprintf(stderr, "T_DIFFUSE");
return;
case T_SPECULAR:
fprintf(stderr, "T_SPECULAR");
return;
case T_FOG_W:
fprintf(stderr, "T_FOG_W");
return;
default:
fprintf(stderr, "T_TEX%d", nr);
return;
}
case REG_TYPE_OC:
if (nr == 0) {
fprintf(stderr, "oC");
return;
fprintf(stderr, "oC");
return;
}
break;
case REG_TYPE_OD:
if (nr == 0) {
fprintf(stderr, "oD");
return;
fprintf(stderr, "oD");
return;
}
break;
default:
@@ -151,7 +160,8 @@ static void print_reg_type_nr( GLuint type, GLuint nr )
(SRC_W << A2_SRC2_CHANNEL_W_SHIFT))
static void print_reg_neg_swizzle( GLuint reg )
static void
print_reg_neg_swizzle(GLuint reg)
{
int i;
@@ -161,50 +171,71 @@ static void print_reg_neg_swizzle( GLuint reg )
fprintf(stderr, ".");
for (i = 3 ; i >= 0; i--) {
if (reg & (1<<((i*4)+3)))
fprintf(stderr, "-");
switch ((reg>>(i*4)) & 0x7) {
case 0: fprintf(stderr, "x"); break;
case 1: fprintf(stderr, "y"); break;
case 2: fprintf(stderr, "z"); break;
case 3: fprintf(stderr, "w"); break;
case 4: fprintf(stderr, "0"); break;
case 5: fprintf(stderr, "1"); break;
default: fprintf(stderr, "?"); break;
for (i = 3; i >= 0; i--) {
if (reg & (1 << ((i * 4) + 3)))
fprintf(stderr, "-");
switch ((reg >> (i * 4)) & 0x7) {
case 0:
fprintf(stderr, "x");
break;
case 1:
fprintf(stderr, "y");
break;
case 2:
fprintf(stderr, "z");
break;
case 3:
fprintf(stderr, "w");
break;
case 4:
fprintf(stderr, "0");
break;
case 5:
fprintf(stderr, "1");
break;
default:
fprintf(stderr, "?");
break;
}
}
}
static void print_src_reg( GLuint dword )
static void
print_src_reg(GLuint dword)
{
GLuint nr = (dword >> A2_SRC2_NR_SHIFT) & REG_NR_MASK;
GLuint type = (dword >> A2_SRC2_TYPE_SHIFT) & REG_TYPE_MASK;
print_reg_type_nr( type, nr );
print_reg_neg_swizzle( dword );
print_reg_type_nr(type, nr);
print_reg_neg_swizzle(dword);
}
void i915_print_ureg( const char *msg, GLuint ureg )
void
i915_print_ureg(const char *msg, GLuint ureg)
{
fprintf(stderr, "%s: ", msg);
print_src_reg( ureg >> 8 );
print_src_reg(ureg >> 8);
fprintf(stderr, "\n");
}
static void print_dest_reg( GLuint dword )
static void
print_dest_reg(GLuint dword)
{
GLuint nr = (dword >> A0_DEST_NR_SHIFT) & REG_NR_MASK;
GLuint type = (dword >> A0_DEST_TYPE_SHIFT) & REG_TYPE_MASK;
print_reg_type_nr( type, nr );
print_reg_type_nr(type, nr);
if ((dword & A0_DEST_CHANNEL_ALL) == A0_DEST_CHANNEL_ALL)
return;
fprintf(stderr, ".");
if (dword & A0_DEST_CHANNEL_X) fprintf(stderr, "x");
if (dword & A0_DEST_CHANNEL_Y) fprintf(stderr, "y");
if (dword & A0_DEST_CHANNEL_Z) fprintf(stderr, "z");
if (dword & A0_DEST_CHANNEL_W) fprintf(stderr, "w");
if (dword & A0_DEST_CHANNEL_X)
fprintf(stderr, "x");
if (dword & A0_DEST_CHANNEL_Y)
fprintf(stderr, "y");
if (dword & A0_DEST_CHANNEL_Z)
fprintf(stderr, "z");
if (dword & A0_DEST_CHANNEL_W)
fprintf(stderr, "w");
}
@@ -213,14 +244,15 @@ static void print_dest_reg( GLuint dword )
#define GET_SRC2_REG(r) (r)
static void print_arith_op( GLuint opcode, const GLuint *program )
static void
print_arith_op(GLuint opcode, const GLuint * program)
{
if (opcode != A0_NOP) {
print_dest_reg(program[0]);
if (program[0] & A0_DEST_SATURATE)
fprintf(stderr, " = SATURATE ");
fprintf(stderr, " = SATURATE ");
else
fprintf(stderr, " = ");
fprintf(stderr, " = ");
}
fprintf(stderr, "%s ", opcodes[opcode]);
@@ -233,7 +265,7 @@ static void print_arith_op( GLuint opcode, const GLuint *program )
fprintf(stderr, ", ");
print_src_reg(GET_SRC1_REG(program[1], program[2]));
if (args[opcode] == 2) {
if (args[opcode] == 2) {
fprintf(stderr, "\n");
return;
}
@@ -245,22 +277,24 @@ static void print_arith_op( GLuint opcode, const GLuint *program )
}
static void print_tex_op( GLuint opcode, const GLuint *program )
static void
print_tex_op(GLuint opcode, const GLuint * program)
{
print_dest_reg(program[0] | A0_DEST_CHANNEL_ALL);
fprintf(stderr, " = ");
fprintf(stderr, "%s ", opcodes[opcode]);
fprintf(stderr, "S[%d],",
program[0] & T0_SAMPLER_NR_MASK);
fprintf(stderr, "S[%d],", program[0] & T0_SAMPLER_NR_MASK);
print_reg_type_nr( (program[1]>>T1_ADDRESS_REG_TYPE_SHIFT) & REG_TYPE_MASK,
(program[1]>>T1_ADDRESS_REG_NR_SHIFT) & REG_NR_MASK );
print_reg_type_nr((program[1] >> T1_ADDRESS_REG_TYPE_SHIFT) &
REG_TYPE_MASK,
(program[1] >> T1_ADDRESS_REG_NR_SHIFT) & REG_NR_MASK);
fprintf(stderr, "\n");
}
static void print_dcl_op( GLuint opcode, const GLuint *program )
static void
print_dcl_op(GLuint opcode, const GLuint * program)
{
fprintf(stderr, "%s ", opcodes[opcode]);
print_dest_reg(program[0] | A0_DEST_CHANNEL_ALL);
@@ -268,31 +302,32 @@ static void print_dcl_op( GLuint opcode, const GLuint *program )
}
void i915_disassemble_program( const GLuint *program, GLuint sz )
void
i915_disassemble_program(const GLuint * program, GLuint sz)
{
GLuint size = program[0] & 0x1ff;
GLint i;
fprintf(stderr, "BEGIN\n");
if (size+2 != sz) {
if (size + 2 != sz) {
fprintf(stderr, "%s: program size mismatch %d/%d\n", __FUNCTION__,
size+2, sz);
size + 2, sz);
exit(1);
}
program ++;
for (i = 1 ; i < sz ; i+=3, program+=3) {
GLuint opcode = program[0] & (0x1f<<24);
program++;
for (i = 1; i < sz; i += 3, program += 3) {
GLuint opcode = program[0] & (0x1f << 24);
if ((GLint) opcode >= A0_NOP && opcode <= A0_SLT)
print_arith_op(opcode >> 24, program);
print_arith_op(opcode >> 24, program);
else if (opcode >= T0_TEXLD && opcode <= T0_TEXKILL)
print_tex_op(opcode >> 24, program);
print_tex_op(opcode >> 24, program);
else if (opcode == D0_DCL)
print_dcl_op(opcode >> 24, program);
else
fprintf(stderr, "Unknown opcode 0x%x\n", opcode);
print_dcl_op(opcode >> 24, program);
else
fprintf(stderr, "Unknown opcode 0x%x\n", opcode);
}
fprintf(stderr, "END\n\n");

File diff suppressed because it is too large Load Diff

View File

@@ -34,127 +34,170 @@
#include "intel_screen.h"
#include "intel_batchbuffer.h"
#include "intel_ioctl.h"
#include "intel_regions.h"
#include "intel_rotate.h"
#include "i915_context.h"
#include "i915_reg.h"
/* A large amount of state doesn't need to be uploaded.
/* We touch almost everything:
*/
#define ACTIVE (I915_UPLOAD_PROGRAM | \
I915_UPLOAD_STIPPLE | \
#define ACTIVE (I915_UPLOAD_INVARIENT | \
I915_UPLOAD_CTX | \
I915_UPLOAD_BUFFERS | \
I915_UPLOAD_TEX(0))
I915_UPLOAD_STIPPLE | \
I915_UPLOAD_PROGRAM | \
I915_UPLOAD_FOG | \
I915_UPLOAD_TEX(0))
#define SET_STATE( i915, STATE ) \
#define SET_STATE( i915, STATE ) \
do { \
i915->current->emitted &= ~ACTIVE; \
i915->current = &i915->STATE; \
i915->current = &i915->STATE; \
i915->current->emitted &= ~ACTIVE; \
} while (0)
/* Operations where the 3D engine is decoupled temporarily from the
* current GL state and used for other purposes than simply rendering
* incoming triangles.
*/
static void set_initial_state( i915ContextPtr i915 )
{
memcpy(&i915->meta, &i915->initial, sizeof(i915->meta) );
i915->meta.active = ACTIVE;
i915->meta.emitted = 0;
}
static void set_no_depth_stencil_write( i915ContextPtr i915 )
static void
meta_no_stencil_write(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
/* ctx->Driver.Enable( ctx, GL_STENCIL_TEST, GL_FALSE )
*/
i915->meta.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_TEST_ENABLE |
S5_STENCIL_WRITE_ENABLE);
/* ctx->Driver.Enable( ctx, GL_DEPTH_TEST, GL_FALSE )
*/
i915->meta.Ctx[I915_CTXREG_LIS6] &= ~(S6_DEPTH_TEST_ENABLE |
S6_DEPTH_WRITE_ENABLE);
i915->meta.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_TEST_ENABLE |
S5_STENCIL_WRITE_ENABLE);
i915->meta.emitted &= ~I915_UPLOAD_CTX;
}
static void
meta_no_depth_write(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
/* ctx->Driver.Enable( ctx, GL_DEPTH_TEST, GL_FALSE )
*/
i915->meta.Ctx[I915_CTXREG_LIS6] &= ~(S6_DEPTH_TEST_ENABLE |
S6_DEPTH_WRITE_ENABLE);
i915->meta.emitted &= ~I915_UPLOAD_CTX;
}
static void
meta_depth_replace(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
/* ctx->Driver.Enable( ctx, GL_DEPTH_TEST, GL_TRUE )
* ctx->Driver.DepthMask( ctx, GL_TRUE )
*/
i915->meta.Ctx[I915_CTXREG_LIS6] |= (S6_DEPTH_TEST_ENABLE |
S6_DEPTH_WRITE_ENABLE);
/* ctx->Driver.DepthFunc( ctx, GL_REPLACE )
*/
i915->meta.Ctx[I915_CTXREG_LIS6] &= ~S6_DEPTH_TEST_FUNC_MASK;
i915->meta.Ctx[I915_CTXREG_LIS6] |=
COMPAREFUNC_ALWAYS << S6_DEPTH_TEST_FUNC_SHIFT;
i915->meta.emitted &= ~I915_UPLOAD_CTX;
}
/* Set stencil unit to replace always with the reference value.
*/
static void set_stencil_replace( i915ContextPtr i915,
GLuint s_mask,
GLuint s_clear)
static void
meta_stencil_replace(struct intel_context *intel,
GLuint s_mask, GLuint s_clear)
{
struct i915_context *i915 = i915_context(&intel->ctx);
GLuint op = STENCILOP_REPLACE;
GLuint func = COMPAREFUNC_ALWAYS;
/* ctx->Driver.Enable( ctx, GL_STENCIL_TEST, GL_TRUE )
*/
i915->meta.Ctx[I915_CTXREG_LIS5] |= (S5_STENCIL_TEST_ENABLE |
S5_STENCIL_WRITE_ENABLE);
/* ctx->Driver.Enable( ctx, GL_DEPTH_TEST, GL_FALSE )
*/
i915->meta.Ctx[I915_CTXREG_LIS6] &= ~(S6_DEPTH_TEST_ENABLE |
S6_DEPTH_WRITE_ENABLE);
i915->meta.Ctx[I915_CTXREG_LIS5] |= (S5_STENCIL_TEST_ENABLE |
S5_STENCIL_WRITE_ENABLE);
/* ctx->Driver.StencilMask( ctx, s_mask )
*/
i915->meta.Ctx[I915_CTXREG_STATE4] &= ~MODE4_ENABLE_STENCIL_WRITE_MASK;
i915->meta.Ctx[I915_CTXREG_STATE4] |= (ENABLE_STENCIL_WRITE_MASK |
STENCIL_WRITE_MASK(s_mask));
STENCIL_WRITE_MASK(s_mask));
/* ctx->Driver.StencilOp( ctx, GL_REPLACE, GL_REPLACE, GL_REPLACE )
*/
i915->meta.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_FAIL_MASK |
S5_STENCIL_PASS_Z_FAIL_MASK |
S5_STENCIL_PASS_Z_PASS_MASK);
S5_STENCIL_PASS_Z_FAIL_MASK |
S5_STENCIL_PASS_Z_PASS_MASK);
i915->meta.Ctx[I915_CTXREG_LIS5] |= ((op << S5_STENCIL_FAIL_SHIFT) |
(op << S5_STENCIL_PASS_Z_FAIL_SHIFT) |
(op << S5_STENCIL_PASS_Z_PASS_SHIFT));
(op << S5_STENCIL_PASS_Z_FAIL_SHIFT) |
(op << S5_STENCIL_PASS_Z_PASS_SHIFT));
/* ctx->Driver.StencilFunc( ctx, GL_ALWAYS, s_ref, ~0 )
*/
i915->meta.Ctx[I915_CTXREG_STATE4] &= ~MODE4_ENABLE_STENCIL_TEST_MASK;
i915->meta.Ctx[I915_CTXREG_STATE4] |= (ENABLE_STENCIL_TEST_MASK |
STENCIL_TEST_MASK(0xff));
STENCIL_TEST_MASK(0xff));
i915->meta.Ctx[I915_CTXREG_LIS5] &= ~(S5_STENCIL_REF_MASK |
S5_STENCIL_TEST_FUNC_MASK);
i915->meta.Ctx[I915_CTXREG_LIS5] |= ((s_clear << S5_STENCIL_REF_SHIFT) |
(func << S5_STENCIL_TEST_FUNC_SHIFT));
S5_STENCIL_TEST_FUNC_MASK);
i915->meta.Ctx[I915_CTXREG_LIS5] |= ((s_clear << S5_STENCIL_REF_SHIFT) |
(func << S5_STENCIL_TEST_FUNC_SHIFT));
i915->meta.emitted &= ~I915_UPLOAD_CTX;
}
static void set_color_mask( i915ContextPtr i915, GLboolean state )
static void
meta_color_mask(struct intel_context *intel, GLboolean state)
{
struct i915_context *i915 = i915_context(&intel->ctx);
const GLuint mask = (S5_WRITEDISABLE_RED |
S5_WRITEDISABLE_GREEN |
S5_WRITEDISABLE_BLUE |
S5_WRITEDISABLE_ALPHA);
S5_WRITEDISABLE_GREEN |
S5_WRITEDISABLE_BLUE | S5_WRITEDISABLE_ALPHA);
/* Copy colormask state from "regular" hw context.
*/
if (state) {
i915->meta.Ctx[I915_CTXREG_LIS5] &= ~mask;
i915->meta.Ctx[I915_CTXREG_LIS5] |=
(i915->state.Ctx[I915_CTXREG_LIS5] & mask);
i915->meta.Ctx[I915_CTXREG_LIS5] |=
(i915->state.Ctx[I915_CTXREG_LIS5] & mask);
}
else
else
i915->meta.Ctx[I915_CTXREG_LIS5] |= mask;
i915->meta.emitted &= ~I915_UPLOAD_CTX;
}
static void
meta_import_pixel_state(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
memcpy(i915->meta.Fog, i915->state.Fog, I915_FOG_SETUP_SIZE * 4);
i915->meta.Ctx[I915_CTXREG_LIS5] = i915->state.Ctx[I915_CTXREG_LIS5];
i915->meta.Ctx[I915_CTXREG_LIS6] = i915->state.Ctx[I915_CTXREG_LIS6];
i915->meta.Ctx[I915_CTXREG_STATE4] = i915->state.Ctx[I915_CTXREG_STATE4];
i915->meta.Ctx[I915_CTXREG_BLENDCOLOR1] =
i915->state.Ctx[I915_CTXREG_BLENDCOLOR1];
i915->meta.Ctx[I915_CTXREG_IAB] = i915->state.Ctx[I915_CTXREG_IAB];
i915->meta.Buffer[I915_DESTREG_SENABLE] =
i915->state.Buffer[I915_DESTREG_SENABLE];
i915->meta.Buffer[I915_DESTREG_SR1] = i915->state.Buffer[I915_DESTREG_SR1];
i915->meta.Buffer[I915_DESTREG_SR2] = i915->state.Buffer[I915_DESTREG_SR2];
i915->meta.emitted &= ~I915_UPLOAD_FOG;
i915->meta.emitted &= ~I915_UPLOAD_BUFFERS;
i915->meta.emitted &= ~I915_UPLOAD_CTX;
}
@@ -211,69 +254,64 @@ static void set_color_mask( i915ContextPtr i915, GLboolean state )
static void set_no_texture( i915ContextPtr i915 )
static void
meta_no_texture(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
static const GLuint prog[] = {
_3DSTATE_PIXEL_SHADER_PROGRAM,
/* Declare incoming diffuse color:
*/
(D0_DCL |
D0_DECL_REG( REG_T_DIFFUSE ) |
D0_CHANNEL_ALL),
(D0_DCL | D0_DECL_REG(REG_T_DIFFUSE) | D0_CHANNEL_ALL),
D1_MBZ,
D2_MBZ,
/* output-color = mov(t_diffuse)
*/
(A0_MOV |
A0_DEST_REG( REG_OC ) |
A0_DEST_CHANNEL_ALL |
A0_SRC0_REG( REG_T_DIFFUSE )),
A0_DEST_REG(REG_OC) |
A0_DEST_CHANNEL_ALL | A0_SRC0_REG(REG_T_DIFFUSE)),
(A1_SRC0_XYZW),
0,
};
memcpy( i915->meta.Program, prog, sizeof(prog) );
memcpy(i915->meta.Program, prog, sizeof(prog));
i915->meta.ProgramSize = sizeof(prog) / sizeof(*prog);
i915->meta.Program[0] |= i915->meta.ProgramSize - 2;
i915->meta.emitted &= ~I915_UPLOAD_PROGRAM;
}
static void enable_texture_blend_replace( i915ContextPtr i915 )
static void
meta_texture_blend_replace(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
static const GLuint prog[] = {
_3DSTATE_PIXEL_SHADER_PROGRAM,
/* Declare the sampler:
*/
(D0_DCL |
D0_DECL_REG( REG_S(0) ) |
D0_SAMPLE_TYPE_2D |
D0_CHANNEL_NONE),
(D0_DCL | D0_DECL_REG(REG_S(0)) | D0_SAMPLE_TYPE_2D | D0_CHANNEL_NONE),
D1_MBZ,
D2_MBZ,
/* Declare the interpolated texture coordinate:
*/
(D0_DCL |
D0_DECL_REG( REG_T_TEX(0) ) |
D0_CHANNEL_ALL),
(D0_DCL | D0_DECL_REG(REG_T_TEX(0)) | D0_CHANNEL_ALL),
D1_MBZ,
D2_MBZ,
/* output-color = texld(sample0, texcoord0)
*/
(T0_TEXLD |
T0_DEST_REG( REG_OC ) |
T0_SAMPLER( 0 )),
(T0_TEXLD | T0_DEST_REG(REG_OC) | T0_SAMPLER(0)),
T1_ADDRESS_REG(REG_TYPE_T, 0),
T2_MBZ
};
memcpy( i915->meta.Program, prog, sizeof(prog) );
memcpy(i915->meta.Program, prog, sizeof(prog));
i915->meta.ProgramSize = sizeof(prog) / sizeof(*prog);
i915->meta.Program[0] |= i915->meta.ProgramSize - 2;
i915->meta.emitted &= ~I915_UPLOAD_PROGRAM;
@@ -286,415 +324,186 @@ static void enable_texture_blend_replace( i915ContextPtr i915 )
/* Set up an arbitary piece of memory as a rectangular texture
* (including the front or back buffer).
*/
static void set_tex_rect_source( i915ContextPtr i915,
GLuint offset,
GLuint width,
GLuint height,
GLuint pitch, /* in bytes! */
GLuint textureFormat )
static GLboolean
meta_tex_rect_source(struct intel_context *intel,
struct _DriBufferObject *buffer,
GLuint offset,
GLuint pitch, GLuint height, GLenum format, GLenum type)
{
struct i915_context *i915 = i915_context(&intel->ctx);
GLuint unit = 0;
GLint numLevels = 1;
GLuint *state = i915->meta.Tex[0];
GLuint textureFormat;
GLuint cpp;
#if 0
printf("TexRect source offset 0x%x pitch %d\n", offset, pitch);
#endif
/* A full implementation of this would do the upload through
* glTexImage2d, and get all the conversion operations at that
* point. We are restricted, but still at least have access to the
* fragment program swizzle.
*/
switch (format) {
case GL_BGRA:
switch (type) {
case GL_UNSIGNED_INT_8_8_8_8_REV:
case GL_UNSIGNED_BYTE:
textureFormat = (MAPSURF_32BIT | MT_32BIT_ARGB8888);
cpp = 4;
break;
default:
return GL_FALSE;
}
break;
case GL_RGBA:
switch (type) {
case GL_UNSIGNED_INT_8_8_8_8_REV:
case GL_UNSIGNED_BYTE:
textureFormat = (MAPSURF_32BIT | MT_32BIT_ABGR8888);
cpp = 4;
break;
default:
return GL_FALSE;
}
break;
case GL_BGR:
switch (type) {
case GL_UNSIGNED_SHORT_5_6_5_REV:
textureFormat = (MAPSURF_16BIT | MT_16BIT_RGB565);
cpp = 2;
break;
default:
return GL_FALSE;
}
break;
case GL_RGB:
switch (type) {
case GL_UNSIGNED_SHORT_5_6_5:
textureFormat = (MAPSURF_16BIT | MT_16BIT_RGB565);
cpp = 2;
break;
default:
return GL_FALSE;
}
break;
/* fprintf(stderr, "%s: offset: %x w: %d h: %d pitch %d format %x\n", */
/* __FUNCTION__, offset, width, height, pitch, textureFormat ); */
default:
return GL_FALSE;
}
if ((pitch * cpp) & 3) {
_mesa_printf("%s: texture is not dword pitch\n", __FUNCTION__);
return GL_FALSE;
}
/* intel_region_release(intel, &i915->meta.tex_region[0]); */
/* intel_region_reference(&i915->meta.tex_region[0], region); */
i915->meta.tex_buffer[0] = buffer;
i915->meta.tex_offset[0] = offset;
state[I915_TEXREG_MS2] = offset;
state[I915_TEXREG_MS3] = (((height - 1) << MS3_HEIGHT_SHIFT) |
((width - 1) << MS3_WIDTH_SHIFT) |
textureFormat |
MS3_USE_FENCE_REGS);
((pitch - 1) << MS3_WIDTH_SHIFT) |
textureFormat | MS3_USE_FENCE_REGS);
state[I915_TEXREG_MS4] = ((((pitch / 4) - 1) << MS4_PITCH_SHIFT) |
((((numLevels-1) * 4)) << MS4_MAX_LOD_SHIFT));
state[I915_TEXREG_MS4] = (((((pitch * cpp) / 4) - 1) << MS4_PITCH_SHIFT) |
MS4_CUBE_FACE_ENA_MASK |
((((numLevels - 1) * 4)) << MS4_MAX_LOD_SHIFT));
state[I915_TEXREG_SS2] = ((FILTER_NEAREST << SS2_MIN_FILTER_SHIFT) |
(MIPFILTER_NONE << SS2_MIP_FILTER_SHIFT) |
(FILTER_NEAREST << SS2_MAG_FILTER_SHIFT));
(MIPFILTER_NONE << SS2_MIP_FILTER_SHIFT) |
(FILTER_NEAREST << SS2_MAG_FILTER_SHIFT));
state[I915_TEXREG_SS3] = ((TEXCOORDMODE_WRAP << SS3_TCX_ADDR_MODE_SHIFT) |
(TEXCOORDMODE_WRAP << SS3_TCY_ADDR_MODE_SHIFT) |
(TEXCOORDMODE_WRAP << SS3_TCZ_ADDR_MODE_SHIFT) |
(unit<<SS3_TEXTUREMAP_INDEX_SHIFT));
(TEXCOORDMODE_WRAP << SS3_TCY_ADDR_MODE_SHIFT) |
(TEXCOORDMODE_WRAP << SS3_TCZ_ADDR_MODE_SHIFT) |
(unit << SS3_TEXTUREMAP_INDEX_SHIFT));
state[I915_TEXREG_SS4] = 0;
i915->meta.emitted &= ~I915_UPLOAD_TEX(0);
return GL_TRUE;
}
/* Select between front and back draw buffers.
/**
* Set the color and depth drawing region for meta ops.
*/
static void set_draw_region( i915ContextPtr i915, const intelRegion *region )
static void
meta_draw_region(struct intel_context *intel,
struct intel_region *color_region,
struct intel_region *depth_region)
{
#if 0
printf("Rotate into region: offset 0x%x pitch %d\n",
region->offset, region->pitch);
#endif
i915->meta.Buffer[I915_DESTREG_CBUFADDR1] =
(BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(region->pitch) | BUF_3D_USE_FENCE);
i915->meta.Buffer[I915_DESTREG_CBUFADDR2] = region->offset;
i915->meta.emitted &= ~I915_UPLOAD_BUFFERS;
struct i915_context *i915 = i915_context(&intel->ctx);
i915_state_draw_region(intel, &i915->meta, color_region, depth_region);
}
#if 0
/* Setup an arbitary draw format, useful for targeting texture or agp
* memory.
*/
static void set_draw_format( i915ContextPtr i915,
GLuint format,
GLuint depth_format)
static void
set_vertex_format(struct intel_context *intel)
{
i915->meta.Buffer[I915_DESTREG_DV1] = (DSTORG_HORT_BIAS(0x8) | /* .5 */
DSTORG_VERT_BIAS(0x8) | /* .5 */
format |
LOD_PRECLAMP_OGL |
TEX_DEFAULT_COLOR_OGL |
depth_format);
struct i915_context *i915 = i915_context(&intel->ctx);
i915->meta.emitted &= ~I915_UPLOAD_BUFFERS;
/* fprintf(stderr, "%s: DV1: %x\n", */
/* __FUNCTION__, i915->meta.Buffer[I915_DESTREG_DV1]); */
}
#endif
static void set_vertex_format( i915ContextPtr i915 )
{
i915->meta.Ctx[I915_CTXREG_LIS2] =
i915->meta.Ctx[I915_CTXREG_LIS2] =
(S2_TEXCOORD_FMT(0, TEXCOORDFMT_2D) |
S2_TEXCOORD_FMT(1, TEXCOORDFMT_NOT_PRESENT) |
S2_TEXCOORD_FMT(1, TEXCOORDFMT_NOT_PRESENT) |
S2_TEXCOORD_FMT(2, TEXCOORDFMT_NOT_PRESENT) |
S2_TEXCOORD_FMT(3, TEXCOORDFMT_NOT_PRESENT) |
S2_TEXCOORD_FMT(4, TEXCOORDFMT_NOT_PRESENT) |
S2_TEXCOORD_FMT(5, TEXCOORDFMT_NOT_PRESENT) |
S2_TEXCOORD_FMT(5, TEXCOORDFMT_NOT_PRESENT) |
S2_TEXCOORD_FMT(6, TEXCOORDFMT_NOT_PRESENT) |
S2_TEXCOORD_FMT(7, TEXCOORDFMT_NOT_PRESENT));
i915->meta.Ctx[I915_CTXREG_LIS4] &= ~S4_VFMT_MASK;
i915->meta.Ctx[I915_CTXREG_LIS4] |=
(S4_VFMT_COLOR |
S4_VFMT_SPEC_FOG |
S4_VFMT_XYZW);
i915->meta.Ctx[I915_CTXREG_LIS4] |= (S4_VFMT_COLOR | S4_VFMT_XYZ);
i915->meta.emitted &= ~I915_UPLOAD_CTX;
}
static void draw_quad(i915ContextPtr i915,
GLfloat x0, GLfloat x1,
GLfloat y0, GLfloat y1,
GLubyte red, GLubyte green,
GLubyte blue, GLubyte alpha,
GLfloat s0, GLfloat s1,
GLfloat t0, GLfloat t1 )
{
GLuint vertex_size = 8;
GLuint *vb = intelEmitInlinePrimitiveLocked( &i915->intel,
PRIM3D_TRIFAN,
4 * vertex_size,
vertex_size );
intelVertex tmp;
int i;
if (0)
fprintf(stderr, "%s: %f,%f-%f,%f 0x%x%x%x%x %f,%f-%f,%f\n",
__FUNCTION__,
x0,y0,x1,y1,red,green,blue,alpha,s0,t0,s1,t1);
/* initial vertex, left bottom */
tmp.v.x = x0;
tmp.v.y = y0;
tmp.v.z = 1.0;
tmp.v.w = 1.0;
tmp.v.color.red = red;
tmp.v.color.green = green;
tmp.v.color.blue = blue;
tmp.v.color.alpha = alpha;
tmp.v.specular.red = 0;
tmp.v.specular.green = 0;
tmp.v.specular.blue = 0;
tmp.v.specular.alpha = 0;
tmp.v.u0 = s0;
tmp.v.v0 = t0;
for (i = 0 ; i < vertex_size ; i++)
vb[i] = tmp.ui[i];
/* right bottom */
vb += vertex_size;
tmp.v.x = x1;
tmp.v.u0 = s1;
for (i = 0 ; i < vertex_size ; i++)
vb[i] = tmp.ui[i];
/* right top */
vb += vertex_size;
tmp.v.y = y1;
tmp.v.v0 = t1;
for (i = 0 ; i < vertex_size ; i++)
vb[i] = tmp.ui[i];
/* left top */
vb += vertex_size;
tmp.v.x = x0;
tmp.v.u0 = s0;
for (i = 0 ; i < vertex_size ; i++)
vb[i] = tmp.ui[i];
}
static void draw_poly(i915ContextPtr i915,
GLubyte red, GLubyte green, GLubyte blue, GLubyte alpha,
GLuint numVerts,
/*const*/ GLfloat verts[][2],
/*const*/ GLfloat texcoords[][2])
{
GLuint vertex_size = 8;
GLuint *vb = intelEmitInlinePrimitiveLocked( &i915->intel,
PRIM3D_TRIFAN,
numVerts * vertex_size,
vertex_size );
intelVertex tmp;
int i, k;
/* initial constant vertex fields */
tmp.v.z = 1.0;
tmp.v.w = 1.0;
tmp.v.color.red = red;
tmp.v.color.green = green;
tmp.v.color.blue = blue;
tmp.v.color.alpha = alpha;
tmp.v.specular.red = 0;
tmp.v.specular.green = 0;
tmp.v.specular.blue = 0;
tmp.v.specular.alpha = 0;
for (k = 0; k < numVerts; k++) {
tmp.v.x = verts[k][0];
tmp.v.y = verts[k][1];
tmp.v.u0 = texcoords[k][0];
tmp.v.v0 = texcoords[k][1];
for (i = 0 ; i < vertex_size ; i++)
vb[i] = tmp.ui[i];
vb += vertex_size;
}
}
void
i915ClearWithTris(intelContextPtr intel, GLbitfield mask,
GLboolean all,
GLint cx, GLint cy, GLint cw, GLint ch)
{
i915ContextPtr i915 = I915_CONTEXT( intel );
__DRIdrawablePrivate *dPriv = intel->driDrawable;
intelScreenPrivate *screen = intel->intelScreen;
int x0, y0, x1, y1;
SET_STATE( i915, meta );
set_initial_state( i915 );
set_no_texture( i915 );
set_vertex_format( i915 );
LOCK_HARDWARE(intel);
if (!all) {
x0 = cx;
y0 = cy;
x1 = x0 + cw;
y1 = y0 + ch;
} else {
x0 = 0;
y0 = 0;
x1 = x0 + dPriv->w;
y1 = y0 + dPriv->h;
}
/* Don't do any clipping to screen - these are window coordinates.
* The active cliprects will be applied as for any other geometry.
*/
if (mask & BUFFER_BIT_FRONT_LEFT) {
set_no_depth_stencil_write( i915 );
set_color_mask( i915, GL_TRUE );
set_draw_region( i915, &screen->front );
draw_quad(i915, x0, x1, y0, y1,
intel->clear_red, intel->clear_green,
intel->clear_blue, intel->clear_alpha,
0, 0, 0, 0);
}
if (mask & BUFFER_BIT_BACK_LEFT) {
set_no_depth_stencil_write( i915 );
set_color_mask( i915, GL_TRUE );
set_draw_region( i915, &screen->back );
draw_quad(i915, x0, x1, y0, y1,
intel->clear_red, intel->clear_green,
intel->clear_blue, intel->clear_alpha,
0, 0, 0, 0);
}
if (mask & BUFFER_BIT_STENCIL) {
set_stencil_replace( i915,
intel->ctx.Stencil.WriteMask[0],
intel->ctx.Stencil.Clear);
set_color_mask( i915, GL_FALSE );
set_draw_region( i915, &screen->front ); /* could be either? */
draw_quad( i915, x0, x1, y0, y1, 0, 0, 0, 0, 0, 0, 0, 0 );
}
UNLOCK_HARDWARE(intel);
SET_STATE( i915, state );
}
/**
* Copy the window contents named by dPriv to the rotated (or reflected)
* color buffer.
* srcBuf is BUFFER_BIT_FRONT_LEFT or BUFFER_BIT_BACK_LEFT to indicate the source.
/* Operations where the 3D engine is decoupled temporarily from the
* current GL state and used for other purposes than simply rendering
* incoming triangles.
*/
void
i915RotateWindow(intelContextPtr intel, __DRIdrawablePrivate *dPriv,
GLuint srcBuf)
static void
install_meta_state(struct intel_context *intel)
{
i915ContextPtr i915 = I915_CONTEXT( intel );
intelScreenPrivate *screen = intel->intelScreen;
const GLuint cpp = screen->cpp;
drm_clip_rect_t fullRect;
GLuint textureFormat, srcOffset, srcPitch;
const drm_clip_rect_t *clipRects;
int numClipRects;
int i;
struct i915_context *i915 = i915_context(&intel->ctx);
memcpy(&i915->meta, &i915->initial, sizeof(i915->meta));
i915->meta.active = ACTIVE;
i915->meta.emitted = 0;
int xOrig, yOrig;
int origNumClipRects;
drm_clip_rect_t *origRects;
/*
* set up hardware state
*/
intelFlush( &intel->ctx );
SET_STATE( i915, meta );
set_initial_state( i915 );
set_no_texture( i915 );
set_vertex_format( i915 );
set_no_depth_stencil_write( i915 );
set_color_mask( i915, GL_TRUE );
LOCK_HARDWARE(intel);
/* save current drawing origin and cliprects (restored at end) */
xOrig = intel->drawX;
yOrig = intel->drawY;
origNumClipRects = intel->numClipRects;
origRects = intel->pClipRects;
if (!intel->numClipRects)
goto done;
/*
* set drawing origin, cliprects for full-screen access to rotated screen
*/
fullRect.x1 = 0;
fullRect.y1 = 0;
fullRect.x2 = screen->rotatedWidth;
fullRect.y2 = screen->rotatedHeight;
intel->drawX = 0;
intel->drawY = 0;
intel->numClipRects = 1;
intel->pClipRects = &fullRect;
set_draw_region( i915, &screen->rotated );
if (cpp == 4)
textureFormat = MAPSURF_32BIT | MT_32BIT_ARGB8888;
else
textureFormat = MAPSURF_16BIT | MT_16BIT_RGB565;
if (srcBuf == BUFFER_BIT_FRONT_LEFT) {
srcPitch = screen->front.pitch; /* in bytes */
srcOffset = screen->front.offset; /* bytes */
clipRects = dPriv->pClipRects;
numClipRects = dPriv->numClipRects;
}
else {
srcPitch = screen->back.pitch; /* in bytes */
srcOffset = screen->back.offset; /* bytes */
clipRects = dPriv->pBackClipRects;
numClipRects = dPriv->numBackClipRects;
}
/* set the whole screen up as a texture to avoid alignment issues */
set_tex_rect_source(i915,
srcOffset,
screen->width,
screen->height,
srcPitch,
textureFormat);
enable_texture_blend_replace(i915);
/*
* loop over the source window's cliprects
*/
for (i = 0; i < numClipRects; i++) {
int srcX0 = clipRects[i].x1;
int srcY0 = clipRects[i].y1;
int srcX1 = clipRects[i].x2;
int srcY1 = clipRects[i].y2;
GLfloat verts[4][2], tex[4][2];
int j;
/* build vertices for four corners of clip rect */
verts[0][0] = srcX0; verts[0][1] = srcY0;
verts[1][0] = srcX1; verts[1][1] = srcY0;
verts[2][0] = srcX1; verts[2][1] = srcY1;
verts[3][0] = srcX0; verts[3][1] = srcY1;
/* .. and texcoords */
tex[0][0] = srcX0; tex[0][1] = srcY0;
tex[1][0] = srcX1; tex[1][1] = srcY0;
tex[2][0] = srcX1; tex[2][1] = srcY1;
tex[3][0] = srcX0; tex[3][1] = srcY1;
/* transform coords to rotated screen coords */
for (j = 0; j < 4; j++) {
matrix23TransformCoordf(&screen->rotMatrix,
&verts[j][0], &verts[j][1]);
}
/* draw polygon to map source image to dest region */
draw_poly(i915, 255, 255, 255, 255, 4, verts, tex);
} /* cliprect loop */
intelFlushBatchLocked( intel, GL_FALSE, GL_FALSE, GL_FALSE );
done:
/* restore original drawing origin and cliprects */
intel->drawX = xOrig;
intel->drawY = yOrig;
intel->numClipRects = origNumClipRects;
intel->pClipRects = origRects;
UNLOCK_HARDWARE(intel);
SET_STATE( i915, state );
SET_STATE(i915, meta);
set_vertex_format(intel);
meta_no_texture(intel);
}
static void
leave_meta_state(struct intel_context *intel)
{
struct i915_context *i915 = i915_context(&intel->ctx);
intel_region_release(intel->intelScreen, &i915->meta.draw_region);
intel_region_release(intel->intelScreen, &i915->meta.depth_region);
/* intel_region_release(intel, &i915->meta.tex_region[0]); */
SET_STATE(i915, state);
}
void
i915InitMetaFuncs(struct i915_context *i915)
{
i915->intel.vtbl.install_meta_state = install_meta_state;
i915->intel.vtbl.leave_meta_state = leave_meta_state;
i915->intel.vtbl.meta_no_depth_write = meta_no_depth_write;
i915->intel.vtbl.meta_no_stencil_write = meta_no_stencil_write;
i915->intel.vtbl.meta_stencil_replace = meta_stencil_replace;
i915->intel.vtbl.meta_depth_replace = meta_depth_replace;
i915->intel.vtbl.meta_color_mask = meta_color_mask;
i915->intel.vtbl.meta_no_texture = meta_no_texture;
i915->intel.vtbl.meta_texture_blend_replace = meta_texture_blend_replace;
i915->intel.vtbl.meta_tex_rect_source = meta_tex_rect_source;
i915->intel.vtbl.meta_draw_region = meta_draw_region;
i915->intel.vtbl.meta_import_pixel_state = meta_import_pixel_state;
}

View File

@@ -72,58 +72,62 @@
#define I915_CONSTFLAG_PARAM 0x1f
GLuint i915_get_temp( struct i915_fragment_program *p )
GLuint
i915_get_temp(struct i915_fragment_program *p)
{
int bit = ffs( ~p->temp_flag );
int bit = ffs(~p->temp_flag);
if (!bit) {
fprintf(stderr, "%s: out of temporaries\n", __FILE__);
exit(1);
}
p->temp_flag |= 1<<(bit-1);
return UREG(REG_TYPE_R, (bit-1));
p->temp_flag |= 1 << (bit - 1);
return UREG(REG_TYPE_R, (bit - 1));
}
GLuint i915_get_utemp( struct i915_fragment_program *p )
GLuint
i915_get_utemp(struct i915_fragment_program * p)
{
int bit = ffs( ~p->utemp_flag );
int bit = ffs(~p->utemp_flag);
if (!bit) {
fprintf(stderr, "%s: out of temporaries\n", __FILE__);
exit(1);
}
p->utemp_flag |= 1<<(bit-1);
return UREG(REG_TYPE_U, (bit-1));
p->utemp_flag |= 1 << (bit - 1);
return UREG(REG_TYPE_U, (bit - 1));
}
void i915_release_utemps( struct i915_fragment_program *p )
void
i915_release_utemps(struct i915_fragment_program *p)
{
p->utemp_flag = ~0x7;
}
GLuint i915_emit_decl( struct i915_fragment_program *p,
GLuint type, GLuint nr, GLuint d0_flags )
GLuint
i915_emit_decl(struct i915_fragment_program *p,
GLuint type, GLuint nr, GLuint d0_flags)
{
GLuint reg = UREG(type, nr);
if (type == REG_TYPE_T) {
if (p->decl_t & (1<<nr))
return reg;
if (p->decl_t & (1 << nr))
return reg;
p->decl_t |= (1<<nr);
p->decl_t |= (1 << nr);
}
else if (type == REG_TYPE_S) {
if (p->decl_s & (1<<nr))
return reg;
if (p->decl_s & (1 << nr))
return reg;
p->decl_s |= (1<<nr);
p->decl_s |= (1 << nr);
}
else
else
return reg;
*(p->decl++) = (D0_DCL | D0_DEST( reg ) | d0_flags);
*(p->decl++) = (D0_DCL | D0_DEST(reg) | d0_flags);
*(p->decl++) = D1_MBZ;
*(p->decl++) = D2_MBZ;
@@ -131,24 +135,26 @@ GLuint i915_emit_decl( struct i915_fragment_program *p,
return reg;
}
GLuint i915_emit_arith( struct i915_fragment_program *p,
GLuint op,
GLuint dest,
GLuint mask,
GLuint saturate,
GLuint src0,
GLuint src1,
GLuint src2 )
GLuint
i915_emit_arith(struct i915_fragment_program * p,
GLuint op,
GLuint dest,
GLuint mask,
GLuint saturate, GLuint src0, GLuint src1, GLuint src2)
{
GLuint c[3];
GLuint nr_const = 0;
assert(GET_UREG_TYPE(dest) != REG_TYPE_CONST);
assert(dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest)));
dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest));
assert(dest);
if (GET_UREG_TYPE(src0) == REG_TYPE_CONST) c[nr_const++] = 0;
if (GET_UREG_TYPE(src1) == REG_TYPE_CONST) c[nr_const++] = 1;
if (GET_UREG_TYPE(src2) == REG_TYPE_CONST) c[nr_const++] = 2;
if (GET_UREG_TYPE(src0) == REG_TYPE_CONST)
c[nr_const++] = 0;
if (GET_UREG_TYPE(src1) == REG_TYPE_CONST)
c[nr_const++] = 1;
if (GET_UREG_TYPE(src2) == REG_TYPE_CONST)
c[nr_const++] = 2;
/* Recursively call this function to MOV additional const values
* into temporary registers. Use utemp registers for this -
@@ -164,81 +170,97 @@ GLuint i915_emit_arith( struct i915_fragment_program *p,
old_utemp_flag = p->utemp_flag;
first = GET_UREG_NR(s[c[0]]);
for (i = 1 ; i < nr_const ; i++) {
if (GET_UREG_NR(s[c[i]]) != first) {
GLuint tmp = i915_get_utemp(p);
for (i = 1; i < nr_const; i++) {
if (GET_UREG_NR(s[c[i]]) != first) {
GLuint tmp = i915_get_utemp(p);
i915_emit_arith( p, A0_MOV, tmp, A0_DEST_CHANNEL_ALL, 0,
s[c[i]], 0, 0 );
s[c[i]] = tmp;
}
i915_emit_arith(p, A0_MOV, tmp, A0_DEST_CHANNEL_ALL, 0,
s[c[i]], 0, 0);
s[c[i]] = tmp;
}
}
src0 = s[0];
src1 = s[1];
src2 = s[2];
p->utemp_flag = old_utemp_flag; /* restore */
p->utemp_flag = old_utemp_flag; /* restore */
}
*(p->csr++) = (op |
A0_DEST( dest ) |
mask |
saturate |
A0_SRC0( src0 ));
*(p->csr++) = (A1_SRC0( src0 ) |
A1_SRC1( src1 ));
*(p->csr++) = (A2_SRC1( src1 ) |
A2_SRC2( src2 ));
*(p->csr++) = (op | A0_DEST(dest) | mask | saturate | A0_SRC0(src0));
*(p->csr++) = (A1_SRC0(src0) | A1_SRC1(src1));
*(p->csr++) = (A2_SRC1(src1) | A2_SRC2(src2));
p->nr_alu_insn++;
return dest;
}
GLuint i915_emit_texld( struct i915_fragment_program *p,
GLuint dest,
GLuint destmask,
GLuint sampler,
GLuint coord,
GLuint op )
GLuint dest,
GLuint destmask,
GLuint sampler,
GLuint coord,
GLuint op )
{
assert(GET_UREG_TYPE(dest) != REG_TYPE_CONST);
assert(dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest)));
if (GET_UREG_TYPE(coord) != REG_TYPE_T) {
p->nr_tex_indirect++;
if (coord != UREG(GET_UREG_TYPE(coord), GET_UREG_NR(coord))) {
/* No real way to work around this in the general case - need to
* allocate and declare a new temporary register (a utemp won't
* do). Will fallback for now.
*/
i915_program_error(p, "Can't (yet) swizzle TEX arguments");
return 0;
}
*(p->csr++) = (op |
T0_DEST( dest ) |
destmask |
T0_SAMPLER( sampler ));
/* Don't worry about saturate as we only support
*/
if (destmask != A0_DEST_CHANNEL_ALL) {
GLuint tmp = i915_get_utemp(p);
i915_emit_texld( p, tmp, A0_DEST_CHANNEL_ALL, sampler, coord, op );
i915_emit_arith( p, A0_MOV, dest, destmask, 0, tmp, 0, 0 );
return dest;
}
else {
assert(GET_UREG_TYPE(dest) != REG_TYPE_CONST);
assert(dest = UREG(GET_UREG_TYPE(dest), GET_UREG_NR(dest)));
*(p->csr++) = T1_ADDRESS_REG( coord );
*(p->csr++) = T2_MBZ;
if (GET_UREG_TYPE(coord) != REG_TYPE_T) {
p->nr_tex_indirect++;
}
p->nr_tex_insn++;
return dest;
*(p->csr++) = (op |
T0_DEST( dest ) |
T0_SAMPLER( sampler ));
*(p->csr++) = T1_ADDRESS_REG( coord );
*(p->csr++) = T2_MBZ;
p->nr_tex_insn++;
return dest;
}
}
GLuint i915_emit_const1f( struct i915_fragment_program *p, GLfloat c0 )
GLuint
i915_emit_const1f(struct i915_fragment_program * p, GLfloat c0)
{
GLint reg, idx;
if (c0 == 0.0) return swizzle(UREG(REG_TYPE_R, 0), ZERO, ZERO, ZERO, ZERO);
if (c0 == 1.0) return swizzle(UREG(REG_TYPE_R, 0), ONE, ONE, ONE, ONE );
if (c0 == 0.0)
return swizzle(UREG(REG_TYPE_R, 0), ZERO, ZERO, ZERO, ZERO);
if (c0 == 1.0)
return swizzle(UREG(REG_TYPE_R, 0), ONE, ONE, ONE, ONE);
for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
if (p->constant_flags[reg] == I915_CONSTFLAG_PARAM)
continue;
continue;
for (idx = 0; idx < 4; idx++) {
if (!(p->constant_flags[reg] & (1<<idx)) ||
p->constant[reg][idx] == c0) {
p->constant[reg][idx] = c0;
p->constant_flags[reg] |= 1<<idx;
if (reg+1 > p->nr_constants) p->nr_constants = reg+1;
return swizzle(UREG(REG_TYPE_CONST, reg),idx,ZERO,ZERO,ONE);
}
if (!(p->constant_flags[reg] & (1 << idx)) ||
p->constant[reg][idx] == c0) {
p->constant[reg][idx] = c0;
p->constant_flags[reg] |= 1 << idx;
if (reg + 1 > p->nr_constants)
p->nr_constants = reg + 1;
return swizzle(UREG(REG_TYPE_CONST, reg), idx, ZERO, ZERO, ONE);
}
}
}
@@ -247,29 +269,35 @@ GLuint i915_emit_const1f( struct i915_fragment_program *p, GLfloat c0 )
return 0;
}
GLuint i915_emit_const2f( struct i915_fragment_program *p,
GLfloat c0, GLfloat c1 )
GLuint
i915_emit_const2f(struct i915_fragment_program * p, GLfloat c0, GLfloat c1)
{
GLint reg, idx;
if (c0 == 0.0) return swizzle(i915_emit_const1f(p, c1), ZERO, X, Z, W);
if (c0 == 1.0) return swizzle(i915_emit_const1f(p, c1), ONE, X, Z, W);
if (c0 == 0.0)
return swizzle(i915_emit_const1f(p, c1), ZERO, X, Z, W);
if (c0 == 1.0)
return swizzle(i915_emit_const1f(p, c1), ONE, X, Z, W);
if (c1 == 0.0) return swizzle(i915_emit_const1f(p, c0), X, ZERO, Z, W);
if (c1 == 1.0) return swizzle(i915_emit_const1f(p, c0), X, ONE, Z, W);
if (c1 == 0.0)
return swizzle(i915_emit_const1f(p, c0), X, ZERO, Z, W);
if (c1 == 1.0)
return swizzle(i915_emit_const1f(p, c0), X, ONE, Z, W);
for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
if (p->constant_flags[reg] == 0xf ||
p->constant_flags[reg] == I915_CONSTFLAG_PARAM)
continue;
p->constant_flags[reg] == I915_CONSTFLAG_PARAM)
continue;
for (idx = 0; idx < 3; idx++) {
if (!(p->constant_flags[reg] & (3<<idx))) {
p->constant[reg][idx] = c0;
p->constant[reg][idx+1] = c1;
p->constant_flags[reg] |= 3<<idx;
if (reg+1 > p->nr_constants) p->nr_constants = reg+1;
return swizzle(UREG(REG_TYPE_CONST, reg),idx,idx+1,ZERO,ONE);
}
if (!(p->constant_flags[reg] & (3 << idx))) {
p->constant[reg][idx] = c0;
p->constant[reg][idx + 1] = c1;
p->constant_flags[reg] |= 3 << idx;
if (reg + 1 > p->nr_constants)
p->nr_constants = reg + 1;
return swizzle(UREG(REG_TYPE_CONST, reg), idx, idx + 1, ZERO,
ONE);
}
}
}
@@ -280,27 +308,28 @@ GLuint i915_emit_const2f( struct i915_fragment_program *p,
GLuint i915_emit_const4f( struct i915_fragment_program *p,
GLfloat c0, GLfloat c1, GLfloat c2, GLfloat c3 )
GLuint
i915_emit_const4f(struct i915_fragment_program * p,
GLfloat c0, GLfloat c1, GLfloat c2, GLfloat c3)
{
GLint reg;
for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
if (p->constant_flags[reg] == 0xf &&
p->constant[reg][0] == c0 &&
p->constant[reg][1] == c1 &&
p->constant[reg][2] == c2 &&
p->constant[reg][3] == c3) {
return UREG(REG_TYPE_CONST, reg);
p->constant[reg][0] == c0 &&
p->constant[reg][1] == c1 &&
p->constant[reg][2] == c2 && p->constant[reg][3] == c3) {
return UREG(REG_TYPE_CONST, reg);
}
else if (p->constant_flags[reg] == 0) {
p->constant[reg][0] = c0;
p->constant[reg][1] = c1;
p->constant[reg][2] = c2;
p->constant[reg][3] = c3;
p->constant_flags[reg] = 0xf;
if (reg+1 > p->nr_constants) p->nr_constants = reg+1;
return UREG(REG_TYPE_CONST, reg);
p->constant[reg][0] = c0;
p->constant[reg][1] = c1;
p->constant[reg][2] = c2;
p->constant[reg][3] = c3;
p->constant_flags[reg] = 0xf;
if (reg + 1 > p->nr_constants)
p->nr_constants = reg + 1;
return UREG(REG_TYPE_CONST, reg);
}
}
@@ -310,34 +339,36 @@ GLuint i915_emit_const4f( struct i915_fragment_program *p,
}
GLuint i915_emit_const4fv( struct i915_fragment_program *p, const GLfloat *c )
GLuint
i915_emit_const4fv(struct i915_fragment_program * p, const GLfloat * c)
{
return i915_emit_const4f( p, c[0], c[1], c[2], c[3] );
return i915_emit_const4f(p, c[0], c[1], c[2], c[3]);
}
GLuint i915_emit_param4fv( struct i915_fragment_program *p,
const GLfloat *values )
GLuint
i915_emit_param4fv(struct i915_fragment_program * p, const GLfloat * values)
{
GLint reg, i;
for (i = 0; i < p->nr_params; i++) {
if (p->param[i].values == values)
return UREG(REG_TYPE_CONST, p->param[i].reg);
return UREG(REG_TYPE_CONST, p->param[i].reg);
}
for (reg = 0; reg < I915_MAX_CONSTANT; reg++) {
if (p->constant_flags[reg] == 0) {
p->constant_flags[reg] = I915_CONSTFLAG_PARAM;
i = p->nr_params++;
p->constant_flags[reg] = I915_CONSTFLAG_PARAM;
i = p->nr_params++;
p->param[i].values = values;
p->param[i].reg = reg;
p->params_uptodate = 0;
p->param[i].values = values;
p->param[i].reg = reg;
p->params_uptodate = 0;
if (reg+1 > p->nr_constants) p->nr_constants = reg+1;
return UREG(REG_TYPE_CONST, reg);
if (reg + 1 > p->nr_constants)
p->nr_constants = reg + 1;
return UREG(REG_TYPE_CONST, reg);
}
}
@@ -349,7 +380,8 @@ GLuint i915_emit_param4fv( struct i915_fragment_program *p,
void i915_program_error( struct i915_fragment_program *p, const char *msg )
void
i915_program_error(struct i915_fragment_program *p, const char *msg)
{
/* XXX we shouldn't print anything to stdout, record GL error or
* call _mesa_problem()
@@ -358,23 +390,24 @@ void i915_program_error( struct i915_fragment_program *p, const char *msg )
p->error = 1;
}
void i915_init_program( i915ContextPtr i915, struct i915_fragment_program *p )
void
i915_init_program(struct i915_context *i915, struct i915_fragment_program *p)
{
GLcontext *ctx = &i915->intel.ctx;
TNLcontext *tnl = TNL_CONTEXT( ctx );
TNLcontext *tnl = TNL_CONTEXT(ctx);
p->translated = 0;
p->params_uptodate = 0;
p->on_hardware = 0;
p->error = 0;
p->nr_tex_indirect = 1; /* correct? */
p->nr_tex_indirect = 1; /* correct? */
p->nr_tex_insn = 0;
p->nr_alu_insn = 0;
p->nr_decl_insn = 0;
p->ctx = ctx;
memset( p->constant_flags, 0, sizeof(p->constant_flags) );
p->ctx = ctx;
memset(p->constant_flags, 0, sizeof(p->constant_flags));
p->nr_constants = 0;
p->csr = p->program;
@@ -396,12 +429,13 @@ void i915_init_program( i915ContextPtr i915, struct i915_fragment_program *p )
}
void i915_fini_program( struct i915_fragment_program *p )
void
i915_fini_program(struct i915_fragment_program *p)
{
GLuint program_size = p->csr - p->program;
GLuint decl_size = p->decl - p->declarations;
if (p->nr_tex_indirect > I915_MAX_TEX_INDIRECT)
if (p->nr_tex_indirect > I915_MAX_TEX_INDIRECT)
i915_program_error(p, "Exceeded max nr indirect texture lookups");
if (p->nr_tex_insn > I915_MAX_TEX_INSN)
@@ -431,22 +465,24 @@ void i915_fini_program( struct i915_fragment_program *p )
p->declarations[0] |= program_size + decl_size - 2;
}
void i915_upload_program( i915ContextPtr i915, struct i915_fragment_program *p )
void
i915_upload_program(struct i915_context *i915,
struct i915_fragment_program *p)
{
GLuint program_size = p->csr - p->program;
GLuint decl_size = p->decl - p->declarations;
FALLBACK( &i915->intel, I915_FALLBACK_PROGRAM, p->error );
FALLBACK(&i915->intel, I915_FALLBACK_PROGRAM, p->error);
/* Could just go straight to the batchbuffer from here:
*/
if (i915->state.ProgramSize != (program_size + decl_size) ||
memcmp(i915->state.Program + decl_size, p->program,
program_size*sizeof(int)) != 0) {
I915_STATECHANGE( i915, I915_UPLOAD_PROGRAM );
memcpy(i915->state.Program, p->declarations, decl_size*sizeof(int));
memcmp(i915->state.Program + decl_size, p->program,
program_size * sizeof(int)) != 0) {
I915_STATECHANGE(i915, I915_UPLOAD_PROGRAM);
memcpy(i915->state.Program, p->declarations, decl_size * sizeof(int));
memcpy(i915->state.Program + decl_size, p->program,
program_size*sizeof(int));
program_size * sizeof(int));
i915->state.ProgramSize = decl_size + program_size;
}
@@ -455,30 +491,28 @@ void i915_upload_program( i915ContextPtr i915, struct i915_fragment_program *p )
*/
if (p->nr_constants) {
GLuint nr = p->nr_constants;
I915_ACTIVESTATE( i915, I915_UPLOAD_CONSTANTS, 1 );
I915_STATECHANGE( i915, I915_UPLOAD_CONSTANTS );
I915_ACTIVESTATE(i915, I915_UPLOAD_CONSTANTS, 1);
I915_STATECHANGE(i915, I915_UPLOAD_CONSTANTS);
i915->state.Constant[0] = _3DSTATE_PIXEL_SHADER_CONSTANTS | ((nr) * 4);
i915->state.Constant[1] = (1<<(nr-1)) | ((1<<(nr-1))-1);
memcpy(&i915->state.Constant[2], p->constant, 4*sizeof(int)*(nr));
i915->state.Constant[1] = (1 << (nr - 1)) | ((1 << (nr - 1)) - 1);
memcpy(&i915->state.Constant[2], p->constant, 4 * sizeof(int) * (nr));
i915->state.ConstantSize = 2 + (nr) * 4;
if (0) {
GLuint i;
for (i = 0; i < nr; i++) {
fprintf(stderr, "const[%d]: %f %f %f %f\n", i,
p->constant[i][0],
p->constant[i][1],
p->constant[i][2],
p->constant[i][3]);
}
GLuint i;
for (i = 0; i < nr; i++) {
fprintf(stderr, "const[%d]: %f %f %f %f\n", i,
p->constant[i][0],
p->constant[i][1], p->constant[i][2], p->constant[i][3]);
}
}
}
else {
I915_ACTIVESTATE( i915, I915_UPLOAD_CONSTANTS, 0 );
}
I915_ACTIVESTATE(i915, I915_UPLOAD_CONSTANTS, 0);
}
p->on_hardware = 1;
}

View File

@@ -48,11 +48,11 @@
#define UREG_CHANNEL_W_NEGATE_SHIFT 11
#define UREG_CHANNEL_W_SHIFT 8
#define UREG_CHANNEL_ZERO_NEGATE_MBZ 5
#define UREG_CHANNEL_ZERO_SHIFT 4
#define UREG_CHANNEL_ZERO_SHIFT 4
#define UREG_CHANNEL_ONE_NEGATE_MBZ 1
#define UREG_CHANNEL_ONE_SHIFT 0
#define UREG_CHANNEL_ONE_SHIFT 0
#define UREG_BAD 0xffffffff /* not a valid ureg */
#define UREG_BAD 0xffffffff /* not a valid ureg */
#define X SRC_X
#define Y SRC_Y
@@ -84,78 +84,75 @@
/* One neat thing about the UREG representation:
*/
static __inline int swizzle( int reg, int x, int y, int z, int w )
static INLINE int
swizzle(int reg, int x, int y, int z, int w)
{
return ((reg & ~UREG_XYZW_CHANNEL_MASK) |
CHANNEL_SRC( GET_CHANNEL_SRC( reg, x ), 0 ) |
CHANNEL_SRC( GET_CHANNEL_SRC( reg, y ), 1 ) |
CHANNEL_SRC( GET_CHANNEL_SRC( reg, z ), 2 ) |
CHANNEL_SRC( GET_CHANNEL_SRC( reg, w ), 3 ));
CHANNEL_SRC(GET_CHANNEL_SRC(reg, x), 0) |
CHANNEL_SRC(GET_CHANNEL_SRC(reg, y), 1) |
CHANNEL_SRC(GET_CHANNEL_SRC(reg, z), 2) |
CHANNEL_SRC(GET_CHANNEL_SRC(reg, w), 3));
}
/* Another neat thing about the UREG representation:
*/
static __inline int negate( int reg, int x, int y, int z, int w )
static INLINE int
negate(int reg, int x, int y, int z, int w)
{
return reg ^ (((x&1)<<UREG_CHANNEL_X_NEGATE_SHIFT)|
((y&1)<<UREG_CHANNEL_Y_NEGATE_SHIFT)|
((z&1)<<UREG_CHANNEL_Z_NEGATE_SHIFT)|
((w&1)<<UREG_CHANNEL_W_NEGATE_SHIFT));
return reg ^ (((x & 1) << UREG_CHANNEL_X_NEGATE_SHIFT) |
((y & 1) << UREG_CHANNEL_Y_NEGATE_SHIFT) |
((z & 1) << UREG_CHANNEL_Z_NEGATE_SHIFT) |
((w & 1) << UREG_CHANNEL_W_NEGATE_SHIFT));
}
extern GLuint i915_get_temp( struct i915_fragment_program *p );
extern GLuint i915_get_utemp( struct i915_fragment_program *p );
extern void i915_release_utemps( struct i915_fragment_program *p );
extern GLuint i915_get_temp(struct i915_fragment_program *p);
extern GLuint i915_get_utemp(struct i915_fragment_program *p);
extern void i915_release_utemps(struct i915_fragment_program *p);
extern GLuint i915_emit_texld( struct i915_fragment_program *p,
GLuint dest,
GLuint destmask,
GLuint sampler,
GLuint coord,
GLuint op );
extern GLuint i915_emit_texld(struct i915_fragment_program *p,
GLuint dest,
GLuint destmask,
GLuint sampler, GLuint coord, GLuint op);
extern GLuint i915_emit_arith( struct i915_fragment_program *p,
GLuint op,
GLuint dest,
GLuint mask,
GLuint saturate,
GLuint src0,
GLuint src1,
GLuint src2 );
extern GLuint i915_emit_arith(struct i915_fragment_program *p,
GLuint op,
GLuint dest,
GLuint mask,
GLuint saturate,
GLuint src0, GLuint src1, GLuint src2);
extern GLuint i915_emit_decl( struct i915_fragment_program *p,
GLuint type, GLuint nr, GLuint d0_flags );
extern GLuint i915_emit_decl(struct i915_fragment_program *p,
GLuint type, GLuint nr, GLuint d0_flags);
extern GLuint i915_emit_const1f( struct i915_fragment_program *p,
GLfloat c0 );
extern GLuint i915_emit_const1f(struct i915_fragment_program *p, GLfloat c0);
extern GLuint i915_emit_const2f( struct i915_fragment_program *p,
GLfloat c0, GLfloat c1 );
extern GLuint i915_emit_const2f(struct i915_fragment_program *p,
GLfloat c0, GLfloat c1);
extern GLuint i915_emit_const4fv( struct i915_fragment_program *p,
const GLfloat *c );
extern GLuint i915_emit_const4fv(struct i915_fragment_program *p,
const GLfloat * c);
extern GLuint i915_emit_const4f( struct i915_fragment_program *p,
GLfloat c0, GLfloat c1,
GLfloat c2, GLfloat c3 );
extern GLuint i915_emit_const4f(struct i915_fragment_program *p,
GLfloat c0, GLfloat c1,
GLfloat c2, GLfloat c3);
extern GLuint i915_emit_param4fv( struct i915_fragment_program *p,
const GLfloat *values );
extern GLuint i915_emit_param4fv(struct i915_fragment_program *p,
const GLfloat * values);
extern void i915_program_error( struct i915_fragment_program *p,
const char *msg );
extern void i915_program_error(struct i915_fragment_program *p,
const char *msg);
extern void i915_init_program( i915ContextPtr i915,
struct i915_fragment_program *p );
extern void i915_init_program(struct i915_context *i915,
struct i915_fragment_program *p);
extern void i915_upload_program( i915ContextPtr i915,
struct i915_fragment_program *p );
extern void i915_upload_program(struct i915_context *i915,
struct i915_fragment_program *p);
extern void i915_fini_program( struct i915_fragment_program *p );
extern void i915_fini_program(struct i915_fragment_program *p);

View File

@@ -425,8 +425,10 @@
#define S7_DEPTH_OFFSET_CONST_MASK ~0
/* 3DSTATE_MAP_DEINTERLACER_PARAMETERS */
/* 3DSTATE_MAP_PALETTE_LOAD_32, p206 */
/* 3DSTATE_MAP_PALETTE_LOAD_32, p206 */
#define _3DSTATE_MAP_PALETTE_LOAD_32 (CMD_3D|(0x1d<<24)|(0x8f<<16))
/* subsequent dwords up to length (max 16) are ARGB8888 color values */
/* _3DSTATE_MODES_4, p218 */
#define _3DSTATE_MODES_4_CMD (CMD_3D|(0x0d<<24))
@@ -435,7 +437,7 @@
#define LOGICOP_MASK (0xf<<18)
#define MODE4_ENABLE_STENCIL_TEST_MASK ((1<<17)|(0xff00))
#define ENABLE_STENCIL_TEST_MASK (1<<17)
#define STENCIL_TEST_MASK(x) ((x)<<8)
#define STENCIL_TEST_MASK(x) (((x)&0xff)<<8)
#define MODE4_ENABLE_STENCIL_WRITE_MASK ((1<<16)|(0x00ff))
#define ENABLE_STENCIL_WRITE_MASK (1<<16)
#define STENCIL_WRITE_MASK(x) ((x)&0xff)
@@ -458,7 +460,7 @@
#define I915_MAX_TEX_INDIRECT 4
#define I915_MAX_TEX_INSN 32
#define I915_MAX_TEX_INSN 32
#define I915_MAX_ALU_INSN 64
#define I915_MAX_DECL_INSN 27
#define I915_MAX_TEMPORARY 16
@@ -470,33 +472,33 @@
*/
#define _3DSTATE_PIXEL_SHADER_PROGRAM (CMD_3D|(0x1d<<24)|(0x5<<16))
#define REG_TYPE_R 0 /* temporary regs, no need to
* dcl, must be written before
* read -- Preserved between
* phases.
*/
#define REG_TYPE_T 1 /* Interpolated values, must be
* dcl'ed before use.
*
* 0..7: texture coord,
* 8: diffuse spec,
* 9: specular color,
* 10: fog parameter in w.
*/
#define REG_TYPE_CONST 2 /* Restriction: only one const
* can be referenced per
* instruction, though it may be
* selected for multiple inputs.
* Constants not initialized
* default to zero.
*/
#define REG_TYPE_S 3 /* sampler */
#define REG_TYPE_OC 4 /* output color (rgba) */
#define REG_TYPE_OD 5 /* output depth (w), xyz are
* temporaries. If not written,
* interpolated depth is used?
*/
#define REG_TYPE_U 6 /* unpreserved temporaries */
#define REG_TYPE_R 0 /* temporary regs, no need to
* dcl, must be written before
* read -- Preserved between
* phases.
*/
#define REG_TYPE_T 1 /* Interpolated values, must be
* dcl'ed before use.
*
* 0..7: texture coord,
* 8: diffuse spec,
* 9: specular color,
* 10: fog parameter in w.
*/
#define REG_TYPE_CONST 2 /* Restriction: only one const
* can be referenced per
* instruction, though it may be
* selected for multiple inputs.
* Constants not initialized
* default to zero.
*/
#define REG_TYPE_S 3 /* sampler */
#define REG_TYPE_OC 4 /* output color (rgba) */
#define REG_TYPE_OD 5 /* output depth (w), xyz are
* temporaries. If not written,
* interpolated depth is used?
*/
#define REG_TYPE_U 6 /* unpreserved temporaries */
#define REG_TYPE_MASK 0x7
#define REG_NR_MASK 0xf
@@ -513,34 +515,34 @@
#define T_TEX7 7
#define T_DIFFUSE 8
#define T_SPECULAR 9
#define T_FOG_W 10 /* interpolated fog is in W coord */
#define T_FOG_W 10 /* interpolated fog is in W coord */
/* Arithmetic instructions */
/* .replicate_swizzle == selection and replication of a particular
* scalar channel, ie., .xxxx, .yyyy, .zzzz or .wwww
*/
#define A0_NOP (0x0<<24) /* no operation */
#define A0_ADD (0x1<<24) /* dst = src0 + src1 */
#define A0_MOV (0x2<<24) /* dst = src0 */
#define A0_MUL (0x3<<24) /* dst = src0 * src1 */
#define A0_MAD (0x4<<24) /* dst = src0 * src1 + src2 */
#define A0_DP2ADD (0x5<<24) /* dst.xyzw = src0.xy dot src1.xy + src2.replicate_swizzle */
#define A0_DP3 (0x6<<24) /* dst.xyzw = src0.xyz dot src1.xyz */
#define A0_DP4 (0x7<<24) /* dst.xyzw = src0.xyzw dot src1.xyzw */
#define A0_FRC (0x8<<24) /* dst = src0 - floor(src0) */
#define A0_RCP (0x9<<24) /* dst.xyzw = 1/(src0.replicate_swizzle) */
#define A0_RSQ (0xa<<24) /* dst.xyzw = 1/(sqrt(abs(src0.replicate_swizzle))) */
#define A0_EXP (0xb<<24) /* dst.xyzw = exp2(src0.replicate_swizzle) */
#define A0_LOG (0xc<<24) /* dst.xyzw = log2(abs(src0.replicate_swizzle)) */
#define A0_CMP (0xd<<24) /* dst = (src0 >= 0.0) ? src1 : src2 */
#define A0_MIN (0xe<<24) /* dst = (src0 < src1) ? src0 : src1 */
#define A0_MAX (0xf<<24) /* dst = (src0 >= src1) ? src0 : src1 */
#define A0_FLR (0x10<<24) /* dst = floor(src0) */
#define A0_MOD (0x11<<24) /* dst = src0 fmod 1.0 */
#define A0_TRC (0x12<<24) /* dst = int(src0) */
#define A0_SGE (0x13<<24) /* dst = src0 >= src1 ? 1.0 : 0.0 */
#define A0_SLT (0x14<<24) /* dst = src0 < src1 ? 1.0 : 0.0 */
#define A0_NOP (0x0<<24) /* no operation */
#define A0_ADD (0x1<<24) /* dst = src0 + src1 */
#define A0_MOV (0x2<<24) /* dst = src0 */
#define A0_MUL (0x3<<24) /* dst = src0 * src1 */
#define A0_MAD (0x4<<24) /* dst = src0 * src1 + src2 */
#define A0_DP2ADD (0x5<<24) /* dst.xyzw = src0.xy dot src1.xy + src2.replicate_swizzle */
#define A0_DP3 (0x6<<24) /* dst.xyzw = src0.xyz dot src1.xyz */
#define A0_DP4 (0x7<<24) /* dst.xyzw = src0.xyzw dot src1.xyzw */
#define A0_FRC (0x8<<24) /* dst = src0 - floor(src0) */
#define A0_RCP (0x9<<24) /* dst.xyzw = 1/(src0.replicate_swizzle) */
#define A0_RSQ (0xa<<24) /* dst.xyzw = 1/(sqrt(abs(src0.replicate_swizzle))) */
#define A0_EXP (0xb<<24) /* dst.xyzw = exp2(src0.replicate_swizzle) */
#define A0_LOG (0xc<<24) /* dst.xyzw = log2(abs(src0.replicate_swizzle)) */
#define A0_CMP (0xd<<24) /* dst = (src0 >= 0.0) ? src1 : src2 */
#define A0_MIN (0xe<<24) /* dst = (src0 < src1) ? src0 : src1 */
#define A0_MAX (0xf<<24) /* dst = (src0 >= src1) ? src0 : src1 */
#define A0_FLR (0x10<<24) /* dst = floor(src0) */
#define A0_MOD (0x11<<24) /* dst = src0 fmod 1.0 */
#define A0_TRC (0x12<<24) /* dst = int(src0) */
#define A0_SGE (0x13<<24) /* dst = src0 >= src1 ? 1.0 : 0.0 */
#define A0_SLT (0x14<<24) /* dst = src0 < src1 ? 1.0 : 0.0 */
#define A0_DEST_SATURATE (1<<22)
#define A0_DEST_TYPE_SHIFT 19
/* Allow: R, OC, OD, U */
@@ -599,23 +601,23 @@
/* Texture instructions */
#define T0_TEXLD (0x15<<24) /* Sample texture using predeclared
* sampler and address, and output
* filtered texel data to destination
* register */
#define T0_TEXLDP (0x16<<24) /* Same as texld but performs a
* perspective divide of the texture
* coordinate .xyz values by .w before
* sampling. */
#define T0_TEXLDB (0x17<<24) /* Same as texld but biases the
* computed LOD by w. Only S4.6 two's
* comp is used. This implies that a
* float to fixed conversion is
* done. */
#define T0_TEXKILL (0x18<<24) /* Does not perform a sampling
* operation. Simply kills the pixel
* if any channel of the address
* register is < 0.0. */
#define T0_TEXLD (0x15<<24) /* Sample texture using predeclared
* sampler and address, and output
* filtered texel data to destination
* register */
#define T0_TEXLDP (0x16<<24) /* Same as texld but performs a
* perspective divide of the texture
* coordinate .xyz values by .w before
* sampling. */
#define T0_TEXLDB (0x17<<24) /* Same as texld but biases the
* computed LOD by w. Only S4.6 two's
* comp is used. This implies that a
* float to fixed conversion is
* done. */
#define T0_TEXKILL (0x18<<24) /* Does not perform a sampling
* operation. Simply kills the pixel
* if any channel of the address
* register is < 0.0. */
#define T0_DEST_TYPE_SHIFT 19
/* Allow: R, OC, OD, U */
/* Note: U (unpreserved) regs do not retain their values between
@@ -627,18 +629,18 @@
*/
#define T0_DEST_NR_SHIFT 14
/* Allow R: 0..15, OC,OD: 0..0, U: 0..2 */
#define T0_SAMPLER_NR_SHIFT 0 /* This field ignored for TEXKILL */
#define T0_SAMPLER_NR_SHIFT 0 /* This field ignored for TEXKILL */
#define T0_SAMPLER_NR_MASK (0xf<<0)
#define T1_ADDRESS_REG_TYPE_SHIFT 24 /* Reg to use as texture coord */
#define T1_ADDRESS_REG_TYPE_SHIFT 24 /* Reg to use as texture coord */
/* Allow R, T, OC, OD -- R, OC, OD are 'dependent' reads, new program phase */
#define T1_ADDRESS_REG_NR_SHIFT 17
#define T2_MBZ 0
/* Declaration instructions */
#define D0_DCL (0x19<<24) /* Declare a t (interpolated attrib)
* register or an s (sampler)
* register. */
#define D0_DCL (0x19<<24) /* Declare a t (interpolated attrib)
* register or an s (sampler)
* register. */
#define D0_SAMPLE_TYPE_SHIFT 22
#define D0_SAMPLE_TYPE_2D (0x0<<22)
#define D0_SAMPLE_TYPE_CUBE (0x1<<22)
@@ -695,12 +697,12 @@
#define MAPSURF_4BIT_INDEXED (7<<7)
#define MS3_MT_FORMAT_MASK (0x7 << 3)
#define MS3_MT_FORMAT_SHIFT 3
#define MT_4BIT_IDX_ARGB8888 (7<<3) /* SURFACE_4BIT_INDEXED */
#define MT_8BIT_I8 (0<<3) /* SURFACE_8BIT */
#define MT_4BIT_IDX_ARGB8888 (7<<3) /* SURFACE_4BIT_INDEXED */
#define MT_8BIT_I8 (0<<3) /* SURFACE_8BIT */
#define MT_8BIT_L8 (1<<3)
#define MT_8BIT_A8 (4<<3)
#define MT_8BIT_MONO8 (5<<3)
#define MT_16BIT_RGB565 (0<<3) /* SURFACE_16BIT */
#define MT_16BIT_RGB565 (0<<3) /* SURFACE_16BIT */
#define MT_16BIT_ARGB1555 (1<<3)
#define MT_16BIT_ARGB4444 (2<<3)
#define MT_16BIT_AY88 (3<<3)
@@ -709,7 +711,7 @@
#define MT_16BIT_I16 (7<<3)
#define MT_16BIT_L16 (8<<3)
#define MT_16BIT_A16 (9<<3)
#define MT_32BIT_ARGB8888 (0<<3) /* SURFACE_32BIT */
#define MT_32BIT_ARGB8888 (0<<3) /* SURFACE_32BIT */
#define MT_32BIT_ABGR8888 (1<<3)
#define MT_32BIT_XRGB8888 (2<<3)
#define MT_32BIT_XBGR8888 (3<<3)
@@ -725,11 +727,11 @@
#define MT_32BIT_xI824 (0xD<<3)
#define MT_32BIT_xA824 (0xE<<3)
#define MT_32BIT_xL824 (0xF<<3)
#define MT_422_YCRCB_SWAPY (0<<3) /* SURFACE_422 */
#define MT_422_YCRCB_SWAPY (0<<3) /* SURFACE_422 */
#define MT_422_YCRCB_NORMAL (1<<3)
#define MT_422_YCRCB_SWAPUV (2<<3)
#define MT_422_YCRCB_SWAPUVY (3<<3)
#define MT_COMPRESS_DXT1 (0<<3) /* SURFACE_COMPRESSED */
#define MT_COMPRESS_DXT1 (0<<3) /* SURFACE_COMPRESSED */
#define MT_COMPRESS_DXT2_3 (1<<3)
#define MT_COMPRESS_DXT4_5 (2<<3)
#define MT_COMPRESS_FXT1 (3<<3)
@@ -751,7 +753,7 @@
#define MS4_MIP_LAYOUT_LEGACY (0<<8)
#define MS4_MIP_LAYOUT_BELOW_LPT (0<<8)
#define MS4_MIP_LAYOUT_RIGHT_LPT (1<<8)
#define MS4_VOLUME_DEPTH_SHIFT 0
#define MS4_VOLUME_DEPTH_SHIFT 0
#define MS4_VOLUME_DEPTH_MASK (0xff<<0)
/* p244 */
@@ -779,7 +781,7 @@
#define FILTER_4X4_1 3
#define FILTER_4X4_2 4
#define FILTER_4X4_FLAT 5
#define FILTER_6X5_MONO 6 /* XXX - check */
#define FILTER_6X5_MONO 6 /* XXX - check */
#define SS2_MIN_FILTER_SHIFT 14
#define SS2_MIN_FILTER_MASK (0x7<<14)
#define SS2_LOD_BIAS_SHIFT 5
@@ -826,10 +828,14 @@
#define ST1_ENABLE (1<<16)
#define ST1_MASK (0xffff)
#define _3DSTATE_DEFAULT_Z ((0x3<<29)|(0x1d<<24)|(0x98<<16))
#define _3DSTATE_DEFAULT_DIFFUSE ((0x3<<29)|(0x1d<<24)|(0x99<<16))
#define _3DSTATE_DEFAULT_SPECULAR ((0x3<<29)|(0x1d<<24)|(0x9a<<16))
#define MI_FLUSH ((0<<29)|(4<<23))
#define FLUSH_MAP_CACHE (1<<0)
#define FLUSH_RENDER_CACHE (1<<1)
#define MI_FLUSH ((0<<29)|(4<<23))
#define FLUSH_MAP_CACHE (1<<0)
#define INHIBIT_FLUSH_RENDER_CACHE (1<<2)
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -45,79 +45,14 @@
/**
* Allocate space for and load the mesa images into the texture memory block.
* This will happen before drawing with a new texture, or drawing with a
* texture after it was swapped out or teximaged again.
*/
intelTextureObjectPtr i915AllocTexObj( struct gl_texture_object *texObj )
static void
i915TexEnv(GLcontext * ctx, GLenum target,
GLenum pname, const GLfloat * param)
{
i915TextureObjectPtr t = CALLOC_STRUCT( i915_texture_object );
if ( !t )
return NULL;
texObj->DriverData = t;
t->intel.base.tObj = texObj;
t->intel.dirty = I915_UPLOAD_TEX_ALL;
make_empty_list( &t->intel.base );
return &t->intel;
}
static void i915TexParameter( GLcontext *ctx, GLenum target,
struct gl_texture_object *tObj,
GLenum pname, const GLfloat *params )
{
i915TextureObjectPtr t = (i915TextureObjectPtr) tObj->DriverData;
switch (pname) {
case GL_TEXTURE_MIN_FILTER:
case GL_TEXTURE_MAG_FILTER:
case GL_TEXTURE_MAX_ANISOTROPY_EXT:
case GL_TEXTURE_WRAP_S:
case GL_TEXTURE_WRAP_T:
case GL_TEXTURE_WRAP_R:
case GL_TEXTURE_BORDER_COLOR:
t->intel.dirty = I915_UPLOAD_TEX_ALL;
break;
case GL_TEXTURE_COMPARE_MODE:
t->intel.dirty = I915_UPLOAD_TEX_ALL;
break;
case GL_TEXTURE_COMPARE_FUNC:
t->intel.dirty = I915_UPLOAD_TEX_ALL;
break;
case GL_TEXTURE_BASE_LEVEL:
case GL_TEXTURE_MAX_LEVEL:
case GL_TEXTURE_MIN_LOD:
case GL_TEXTURE_MAX_LOD:
/* The i915 and its successors can do a lot of this without
* reloading the textures. A project for someone?
*/
intelFlush( ctx );
driSwapOutTextureObject( (driTextureObject *) t );
t->intel.dirty = I915_UPLOAD_TEX_ALL;
break;
default:
return;
}
}
static void i915TexEnv( GLcontext *ctx, GLenum target,
GLenum pname, const GLfloat *param )
{
i915ContextPtr i915 = I915_CONTEXT( ctx );
GLuint unit = ctx->Texture.CurrentUnit;
struct i915_context *i915 = I915_CONTEXT(ctx);
switch (pname) {
case GL_TEXTURE_ENV_COLOR: /* Should be a tracked param */
case GL_TEXTURE_ENV_COLOR: /* Should be a tracked param */
case GL_TEXTURE_ENV_MODE:
case GL_COMBINE_RGB:
case GL_COMBINE_ALPHA:
@@ -135,19 +70,21 @@ static void i915TexEnv( GLcontext *ctx, GLenum target,
case GL_OPERAND2_ALPHA:
case GL_RGB_SCALE:
case GL_ALPHA_SCALE:
i915->tex_program.translated = 0;
i915->tex_program.translated = 0;
break;
case GL_TEXTURE_LOD_BIAS: {
int b = (int) ((*param) * 16.0);
if (b > 255) b = 255;
if (b < -256) b = -256;
I915_STATECHANGE(i915, I915_UPLOAD_TEX(unit));
i915->state.Tex[unit][I915_TEXREG_SS2] &= ~SS2_LOD_BIAS_MASK;
i915->state.Tex[unit][I915_TEXREG_SS2] |=
((b << SS2_LOD_BIAS_SHIFT) & SS2_LOD_BIAS_MASK);
break;
}
case GL_TEXTURE_LOD_BIAS:{
GLuint unit = ctx->Texture.CurrentUnit;
GLint b = (int) ((*param) * 16.0);
if (b > 255)
b = 255;
if (b < -256)
b = -256;
I915_STATECHANGE(i915, I915_UPLOAD_TEX(unit));
i915->lodbias_ss2[unit] =
((b << SS2_LOD_BIAS_SHIFT) & SS2_LOD_BIAS_MASK);
break;
}
default:
break;
@@ -155,33 +92,22 @@ static void i915TexEnv( GLcontext *ctx, GLenum target,
}
static void i915BindTexture( GLcontext *ctx, GLenum target,
struct gl_texture_object *texObj )
static void
i915BindTexture(GLcontext * ctx, GLenum target,
struct gl_texture_object *texobj)
{
i915TextureObjectPtr tex;
if (!texObj->DriverData)
i915AllocTexObj( texObj );
tex = (i915TextureObjectPtr)texObj->DriverData;
if (tex->lastTarget != texObj->Target) {
tex->intel.dirty = I915_UPLOAD_TEX_ALL;
tex->lastTarget = texObj->Target;
}
/* Need this if image format changes between bound textures.
* Could try and shortcircuit by checking for differences in
* state between incoming and outgoing textures:
*/
I915_CONTEXT(ctx)->tex_program.translated = 0;
I915_CONTEXT(ctx)->tex_program.translated = 0;
}
void i915InitTextureFuncs( struct dd_function_table *functions )
void
i915InitTextureFuncs(struct dd_function_table *functions)
{
functions->BindTexture = i915BindTexture;
functions->TexEnv = i915TexEnv;
functions->TexParameter = i915TexParameter;
}

View File

@@ -0,0 +1,380 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/* Code to layout images in a mipmap tree for i915 and i945
* respectively.
*/
#include "intel_mipmap_tree.h"
#include "macros.h"
#include "intel_context.h"
#define FILE_DEBUG_FLAG DEBUG_TEXTURE
static GLint initial_offsets[6][2] = { {0, 0},
{0, 2},
{1, 0},
{1, 2},
{1, 1},
{1, 3}
};
static GLint step_offsets[6][2] = { {0, 2},
{0, 2},
{-1, 2},
{-1, 2},
{-1, 1},
{-1, 1}
};
static GLuint
minify(GLuint d)
{
return MAX2(1, d >> 1);
}
GLboolean
i915_miptree_layout(struct intel_mipmap_tree * mt)
{
GLint level;
switch (mt->target) {
case GL_TEXTURE_CUBE_MAP:{
const GLuint dim = mt->width0;
GLuint face;
/* double pitch for cube layouts */
mt->pitch = ((dim * mt->cpp * 2 + 3) & ~3) / mt->cpp;
mt->total_height = dim * 4;
for (level = mt->first_level; level <= mt->last_level; level++)
intel_miptree_set_level_info(mt, level, 6,
0, 0,
mt->pitch, mt->total_height, 1);
for (face = 0; face < 6; face++) {
GLuint x = initial_offsets[face][0] * dim;
GLuint y = initial_offsets[face][1] * dim;
GLuint d = dim;
for (level = mt->first_level; level <= mt->last_level; level++) {
intel_miptree_set_image_offset(mt, level, face, x, y);
if (d == 0)
_mesa_printf("cube mipmap %d/%d (%d..%d) is 0x0\n",
face, level, mt->first_level, mt->last_level);
d >>= 1;
x += step_offsets[face][0] * d;
y += step_offsets[face][1] * d;
}
}
break;
}
case GL_TEXTURE_3D:{
GLuint width = mt->width0;
GLuint height = mt->height0;
GLuint depth = mt->depth0;
GLuint stack_height = 0;
/* Calculate the size of a single slice.
*/
mt->pitch = ((mt->width0 * mt->cpp + 3) & ~3) / mt->cpp;
/* XXX: hardware expects/requires 9 levels at minimum.
*/
for (level = mt->first_level; level <= MAX2(8, mt->last_level);
level++) {
intel_miptree_set_level_info(mt, level, 1, 0, mt->total_height,
width, height, depth);
stack_height += MAX2(2, height);
width = minify(width);
height = minify(height);
depth = minify(depth);
}
/* Fixup depth image_offsets:
*/
depth = mt->depth0;
for (level = mt->first_level; level <= mt->last_level; level++) {
GLuint i;
for (i = 0; i < depth; i++)
intel_miptree_set_image_offset(mt, level, i,
0, i * stack_height);
depth = minify(depth);
}
/* Multiply slice size by texture depth for total size. It's
* remarkable how wasteful of memory the i915 texture layouts
* are. They are largely fixed in the i945.
*/
mt->total_height = stack_height * mt->depth0;
break;
}
default:{
GLuint width = mt->width0;
GLuint height = mt->height0;
GLuint img_height;
mt->pitch = ((mt->width0 * mt->cpp + 3) & ~3) / mt->cpp;
mt->total_height = 0;
for (level = mt->first_level; level <= mt->last_level; level++) {
intel_miptree_set_level_info(mt, level, 1,
0, mt->total_height,
width, height, 1);
if (mt->compressed)
img_height = MAX2(1, height / 4);
else
img_height = MAX2(2, height);
mt->total_height += img_height;
mt->total_height += 1;
mt->total_height &= ~1;
width = minify(width);
height = minify(height);
}
break;
}
}
DBG("%s: %dx%dx%d - sz 0x%x\n", __FUNCTION__,
mt->pitch,
mt->total_height, mt->cpp, mt->pitch * mt->total_height * mt->cpp);
return GL_TRUE;
}
GLboolean
i945_miptree_layout(struct intel_mipmap_tree * mt)
{
GLint level;
switch (mt->target) {
case GL_TEXTURE_CUBE_MAP:{
const GLuint dim = mt->width0;
GLuint face;
/* Depending on the size of the largest images, pitch can be
* determined either by the old-style packing of cubemap faces,
* or the final row of 4x4, 2x2 and 1x1 faces below this.
*/
if (dim > 32)
mt->pitch = ((dim * mt->cpp * 2 + 3) & ~3) / mt->cpp;
else
mt->pitch = 14 * 8;
mt->total_height = dim * 4 + 4;
/* Set all the levels to effectively occupy the whole rectangular region.
*/
for (level = mt->first_level; level <= mt->last_level; level++)
intel_miptree_set_level_info(mt, level, 6,
0, 0,
mt->pitch, mt->total_height, 1);
for (face = 0; face < 6; face++) {
GLuint x = initial_offsets[face][0] * dim;
GLuint y = initial_offsets[face][1] * dim;
GLuint d = dim;
if (dim == 4 && face >= 4) {
y = mt->total_height - 4;
x = (face - 4) * 8;
}
else if (dim < 4) {
y = mt->total_height - 4;
x = face * 8;
}
for (level = mt->first_level; level <= mt->last_level; level++) {
intel_miptree_set_image_offset(mt, level, face, x, y);
d >>= 1;
switch (d) {
case 4:
switch (face) {
case FACE_POS_X:
case FACE_NEG_X:
x += step_offsets[face][0] * d;
y += step_offsets[face][1] * d;
break;
case FACE_POS_Y:
case FACE_NEG_Y:
y += 12;
x -= 8;
break;
case FACE_POS_Z:
case FACE_NEG_Z:
y = mt->total_height - 4;
x = (face - 4) * 8;
break;
}
case 2:
y = mt->total_height - 4;
x = 16 + face * 8;
break;
case 1:
x += 48;
break;
default:
x += step_offsets[face][0] * d;
y += step_offsets[face][1] * d;
break;
}
}
}
break;
}
case GL_TEXTURE_3D:{
GLuint width = mt->width0;
GLuint height = mt->height0;
GLuint depth = mt->depth0;
GLuint pack_x_pitch, pack_x_nr;
GLuint pack_y_pitch;
GLuint level;
mt->pitch = ((mt->width0 * mt->cpp + 3) & ~3) / mt->cpp;
mt->total_height = 0;
pack_y_pitch = MAX2(mt->height0, 2);
pack_x_pitch = mt->pitch;
pack_x_nr = 1;
for (level = mt->first_level; level <= mt->last_level; level++) {
GLuint nr_images = mt->target == GL_TEXTURE_3D ? depth : 6;
GLint x = 0;
GLint y = 0;
GLint q, j;
intel_miptree_set_level_info(mt, level, nr_images,
0, mt->total_height,
width, height, depth);
for (q = 0; q < nr_images;) {
for (j = 0; j < pack_x_nr && q < nr_images; j++, q++) {
intel_miptree_set_image_offset(mt, level, q, x, y);
x += pack_x_pitch;
}
x = 0;
y += pack_y_pitch;
}
mt->total_height += y;
if (pack_x_pitch > 4) {
pack_x_pitch >>= 1;
pack_x_nr <<= 1;
assert(pack_x_pitch * pack_x_nr <= mt->pitch);
}
if (pack_y_pitch > 2) {
pack_y_pitch >>= 1;
}
width = minify(width);
height = minify(height);
depth = minify(depth);
}
break;
}
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
case GL_TEXTURE_RECTANGLE_ARB:{
GLuint x = 0;
GLuint y = 0;
GLuint width = mt->width0;
GLuint height = mt->height0;
GLint align_h = 2;
mt->pitch = ((mt->width0 * mt->cpp + 3) & ~3) / mt->cpp;
mt->total_height = 0;
for (level = mt->first_level; level <= mt->last_level; level++) {
GLuint img_height;
intel_miptree_set_level_info(mt, level, 1,
x, y,
width,
mt->compressed ? height/4 : height, 1);
if (mt->compressed)
img_height = MAX2(1, height / 4);
else
img_height = MAX2(align_h, height);
/* LPT change: step right after second mipmap.
*/
if (level == mt->first_level + 1) {
x += mt->pitch / 2;
x = (x + 3) & ~3;
}
else {
y += img_height;
y += align_h - 1;
y &= ~(align_h - 1);
}
/* Because the images are packed better, the final offset
* might not be the maximal one:
*/
mt->total_height = MAX2(mt->total_height, y);
width = minify(width);
height = minify(height);
}
break;
}
default:
_mesa_problem(NULL, "Unexpected tex target in i945_miptree_layout()");
}
DBG("%s: %dx%dx%d - sz 0x%x\n", __FUNCTION__,
mt->pitch,
mt->total_height, mt->cpp, mt->pitch * mt->total_height * mt->cpp);
return GL_TRUE;
}

View File

@@ -1,671 +0,0 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <strings.h>
#include "glheader.h"
#include "macros.h"
#include "enums.h"
#include "tnl/t_context.h"
#include "intel_batchbuffer.h"
#include "i915_reg.h"
#include "i915_context.h"
#include "i915_program.h"
static GLuint translate_tex_src_bit( struct i915_fragment_program *p,
GLubyte bit )
{
switch (bit) {
case TEXTURE_1D_BIT: return D0_SAMPLE_TYPE_2D;
case TEXTURE_2D_BIT: return D0_SAMPLE_TYPE_2D;
case TEXTURE_RECT_BIT: return D0_SAMPLE_TYPE_2D;
case TEXTURE_3D_BIT: return D0_SAMPLE_TYPE_VOLUME;
case TEXTURE_CUBE_BIT: return D0_SAMPLE_TYPE_CUBE;
default: i915_program_error(p, "TexSrcBit"); return 0;
}
}
static GLuint get_source( struct i915_fragment_program *p,
GLenum src, GLuint unit )
{
switch (src) {
case GL_TEXTURE:
if (p->src_texture == UREG_BAD) {
/* TODO: Use D0_CHANNEL_XY where possible.
*/
GLuint dim = translate_tex_src_bit( p, p->ctx->Texture.Unit[unit]._ReallyEnabled);
GLuint sampler = i915_emit_decl(p, REG_TYPE_S, unit, dim);
GLuint texcoord = i915_emit_decl(p, REG_TYPE_T, unit, D0_CHANNEL_ALL);
GLuint tmp = i915_get_temp( p );
GLuint op = T0_TEXLD;
if (p->VB->TexCoordPtr[unit]->size == 4)
op = T0_TEXLDP;
p->src_texture = i915_emit_texld( p, tmp, A0_DEST_CHANNEL_ALL,
sampler, texcoord, op );
}
return p->src_texture;
/* Crossbar: */
case GL_TEXTURE0:
case GL_TEXTURE1:
case GL_TEXTURE2:
case GL_TEXTURE3:
case GL_TEXTURE4:
case GL_TEXTURE5:
case GL_TEXTURE6:
case GL_TEXTURE7: {
return UREG_BAD;
}
case GL_CONSTANT:
return i915_emit_const4fv( p, p->ctx->Texture.Unit[unit].EnvColor );
case GL_PRIMARY_COLOR:
return i915_emit_decl(p, REG_TYPE_T, T_DIFFUSE, D0_CHANNEL_ALL);
case GL_PREVIOUS:
default:
i915_emit_decl(p,
GET_UREG_TYPE(p->src_previous),
GET_UREG_NR(p->src_previous), D0_CHANNEL_ALL);
return p->src_previous;
}
}
static GLuint emit_combine_source( struct i915_fragment_program *p,
GLuint mask,
GLuint unit,
GLenum source,
GLenum operand )
{
GLuint arg, src;
src = get_source(p, source, unit);
switch (operand) {
case GL_ONE_MINUS_SRC_COLOR:
/* Get unused tmp,
* Emit tmp = 1.0 + arg.-x-y-z-w
*/
arg = i915_get_temp( p );
return i915_emit_arith( p, A0_ADD, arg, mask, 0,
swizzle(src, ONE, ONE, ONE, ONE ),
negate(src, 1,1,1,1), 0);
case GL_SRC_ALPHA:
if (mask == A0_DEST_CHANNEL_W)
return src;
else
return swizzle( src, W, W, W, W );
case GL_ONE_MINUS_SRC_ALPHA:
/* Get unused tmp,
* Emit tmp = 1.0 + arg.-w-w-w-w
*/
arg = i915_get_temp( p );
return i915_emit_arith( p, A0_ADD, arg, mask, 0,
swizzle(src, ONE, ONE, ONE, ONE ),
negate( swizzle(src,W,W,W,W), 1,1,1,1), 0);
case GL_SRC_COLOR:
default:
return src;
}
}
static int nr_args( GLenum mode )
{
switch (mode) {
case GL_REPLACE: return 1;
case GL_MODULATE: return 2;
case GL_ADD: return 2;
case GL_ADD_SIGNED: return 2;
case GL_INTERPOLATE: return 3;
case GL_SUBTRACT: return 2;
case GL_DOT3_RGB_EXT: return 2;
case GL_DOT3_RGBA_EXT: return 2;
case GL_DOT3_RGB: return 2;
case GL_DOT3_RGBA: return 2;
default: return 0;
}
}
static GLboolean args_match( struct gl_texture_unit *texUnit )
{
int i, nr = nr_args(texUnit->Combine.ModeRGB);
for (i = 0 ; i < nr ; i++) {
if (texUnit->Combine.SourceA[i] != texUnit->Combine.SourceRGB[i])
return GL_FALSE;
switch(texUnit->Combine.OperandA[i]) {
case GL_SRC_ALPHA:
switch(texUnit->Combine.OperandRGB[i]) {
case GL_SRC_COLOR:
case GL_SRC_ALPHA:
break;
default:
return GL_FALSE;
}
break;
case GL_ONE_MINUS_SRC_ALPHA:
switch(texUnit->Combine.OperandRGB[i]) {
case GL_ONE_MINUS_SRC_COLOR:
case GL_ONE_MINUS_SRC_ALPHA:
break;
default:
return GL_FALSE;
}
break;
default:
return GL_FALSE; /* impossible */
}
}
return GL_TRUE;
}
static GLuint emit_combine( struct i915_fragment_program *p,
GLuint dest,
GLuint mask,
GLuint saturate,
GLuint unit,
GLenum mode,
const GLenum *source,
const GLenum *operand)
{
int tmp, src[3], nr = nr_args(mode);
int i;
for (i = 0; i < nr; i++)
src[i] = emit_combine_source( p, mask, unit, source[i], operand[i] );
switch (mode) {
case GL_REPLACE:
if (mask == A0_DEST_CHANNEL_ALL && !saturate)
return src[0];
else
return i915_emit_arith( p, A0_MOV, dest, mask, saturate, src[0], 0, 0 );
case GL_MODULATE:
return i915_emit_arith( p, A0_MUL, dest, mask, saturate,
src[0], src[1], 0 );
case GL_ADD:
return i915_emit_arith( p, A0_ADD, dest, mask, saturate,
src[0], src[1], 0 );
case GL_ADD_SIGNED:
/* tmp = arg0 + arg1
* result = tmp + -.5
*/
tmp = i915_emit_const1f(p, .5);
tmp = negate(swizzle(tmp,X,X,X,X),1,1,1,1);
i915_emit_arith( p, A0_ADD, dest, mask, 0, src[0], src[1], 0 );
i915_emit_arith( p, A0_ADD, dest, mask, saturate, dest, tmp, 0 );
return dest;
case GL_INTERPOLATE: /* TWO INSTRUCTIONS */
/* Arg0 * (Arg2) + Arg1 * (1-Arg2)
*
* Arg0*Arg2 + Arg1 - Arg1Arg2
*
* tmp = Arg0*Arg2 + Arg1,
* result = (-Arg1)Arg2 + tmp
*/
tmp = i915_get_temp( p );
i915_emit_arith( p, A0_MAD, tmp, mask, 0, src[0], src[2], src[1] );
i915_emit_arith( p, A0_MAD, dest, mask, saturate,
negate(src[1], 1,1,1,1), src[2], tmp );
return dest;
case GL_SUBTRACT:
/* negate src[1] */
return i915_emit_arith( p, A0_ADD, dest, mask, saturate, src[0],
negate(src[1],1,1,1,1), 0 );
case GL_DOT3_RGBA:
case GL_DOT3_RGBA_EXT:
case GL_DOT3_RGB_EXT:
case GL_DOT3_RGB: {
GLuint tmp0 = i915_get_temp( p );
GLuint tmp1 = i915_get_temp( p );
GLuint neg1 = negate(swizzle(i915_emit_const1f(p, 1),X,X,X,X), 1,1,1,1);
GLuint two = swizzle(i915_emit_const1f(p, 2),X,X,X,X);
i915_emit_arith( p, A0_MAD, tmp0, A0_DEST_CHANNEL_ALL, 0,
two, src[0], neg1);
if (src[0] == src[1])
tmp1 = tmp0;
else
i915_emit_arith( p, A0_MAD, tmp1, A0_DEST_CHANNEL_ALL, 0,
two, src[1], neg1);
i915_emit_arith( p, A0_DP3, dest, mask, saturate, tmp0, tmp1, 0);
return dest;
}
default:
return src[0];
}
}
static GLuint get_dest( struct i915_fragment_program *p, int unit )
{
if (p->ctx->_TriangleCaps & DD_SEPARATE_SPECULAR)
return i915_get_temp( p );
else if (unit != p->last_tex_stage)
return i915_get_temp( p );
else
return UREG(REG_TYPE_OC, 0);
}
static GLuint emit_texenv( struct i915_fragment_program *p, int unit )
{
struct gl_texture_unit *texUnit = &p->ctx->Texture.Unit[unit];
GLenum envMode = texUnit->EnvMode;
struct gl_texture_object *tObj = texUnit->_Current;
GLenum format = tObj->Image[0][tObj->BaseLevel]->_BaseFormat;
GLuint saturate = unit < p->last_tex_stage ? A0_DEST_SATURATE : 0;
switch(envMode) {
case GL_BLEND: {
const int cf = get_source(p, GL_PREVIOUS, unit);
const int cc = get_source(p, GL_CONSTANT, unit);
const int cs = get_source(p, GL_TEXTURE, unit);
const int out = get_dest(p, unit);
if (format == GL_INTENSITY) {
/* cv = cf(1 - cs) + cc.cs
* cv = cf - cf.cs + cc.cs
*/
/* u[2] = MAD( -cf * cs + cf )
* cv = MAD( cc * cs + u[2] )
*/
i915_emit_arith( p, A0_MAD, out, A0_DEST_CHANNEL_ALL, 0,
negate(cf,1,1,1,1), cs, cf );
i915_emit_arith( p, A0_MAD, out, A0_DEST_CHANNEL_ALL, saturate,
cc, cs, out );
return out;
} else {
/* cv = cf(1 - cs) + cc.cs
* cv = cf - cf.cs + cc.cs
* av = af.as
*/
/* u[2] = MAD( cf.-x-y-zw * cs.xyzw + cf.xyz0 )
* oC = MAD( cc.xyz0 * cs.xyz0 + u[2].xyzw )
*/
i915_emit_arith( p, A0_MAD, out, A0_DEST_CHANNEL_ALL, 0,
negate(cf,1,1,1,0),
cs,
swizzle(cf,X,Y,Z,ZERO) );
i915_emit_arith( p, A0_MAD, out, A0_DEST_CHANNEL_ALL, saturate,
swizzle(cc,X,Y,Z,ZERO),
swizzle(cs,X,Y,Z,ZERO),
out );
return out;
}
}
case GL_DECAL: {
if (format == GL_RGB ||
format == GL_RGBA) {
int cf = get_source( p, GL_PREVIOUS, unit );
int cs = get_source( p, GL_TEXTURE, unit );
int out = get_dest(p, unit);
/* cv = cf(1-as) + cs.as
* cv = cf.(-as) + cf + cs.as
* av = af
*/
/* u[2] = mad( cf.xyzw * cs.-w-w-w1 + cf.xyz0 )
* oc = mad( cs.xyz0 * cs.www0 + u[2].xyzw )
*/
i915_emit_arith( p, A0_MAD, out, A0_DEST_CHANNEL_ALL, 0,
cf,
negate(swizzle(cs,W,W,W,ONE),1,1,1,0),
swizzle(cf,X,Y,Z,ZERO) );
i915_emit_arith( p, A0_MAD, out, A0_DEST_CHANNEL_ALL, saturate,
swizzle(cs,X,Y,Z,ZERO),
swizzle(cs,W,W,W,ZERO),
out );
return out;
}
else {
return get_source( p, GL_PREVIOUS, unit );
}
}
case GL_REPLACE: {
const int cs = get_source( p, GL_TEXTURE, unit ); /* saturated */
switch (format) {
case GL_ALPHA: {
const int cf = get_source( p, GL_PREVIOUS, unit ); /* saturated */
i915_emit_arith( p, A0_MOV, cs, A0_DEST_CHANNEL_XYZ, 0, cf, 0, 0 );
return cs;
}
case GL_RGB:
case GL_LUMINANCE: {
const int cf = get_source( p, GL_PREVIOUS, unit ); /* saturated */
i915_emit_arith( p, A0_MOV, cs, A0_DEST_CHANNEL_W, 0, cf, 0, 0 );
return cs;
}
default:
return cs;
}
}
case GL_MODULATE: {
const int cf = get_source( p, GL_PREVIOUS, unit );
const int cs = get_source( p, GL_TEXTURE, unit );
const int out = get_dest(p, unit);
switch (format) {
case GL_ALPHA:
i915_emit_arith( p, A0_MUL, out, A0_DEST_CHANNEL_ALL, saturate,
swizzle(cs, ONE, ONE, ONE, W), cf, 0 );
break;
default:
i915_emit_arith( p, A0_MUL, out, A0_DEST_CHANNEL_ALL, saturate,
cs, cf, 0 );
break;
}
return out;
}
case GL_ADD: {
int cf = get_source( p, GL_PREVIOUS, unit );
int cs = get_source( p, GL_TEXTURE, unit );
const int out = get_dest( p, unit );
if (format == GL_INTENSITY) {
/* output-color.rgba = add( incoming, u[1] )
*/
i915_emit_arith( p, A0_ADD, out, A0_DEST_CHANNEL_ALL, saturate,
cs, cf, 0 );
return out;
}
else {
/* cv.xyz = cf.xyz + cs.xyz
* cv.w = cf.w * cs.w
*
* cv.xyzw = MAD( cf.111w * cs.xyzw + cf.xyz0 )
*/
i915_emit_arith( p, A0_MAD, out, A0_DEST_CHANNEL_ALL, saturate,
swizzle(cf,ONE,ONE,ONE,W),
cs,
swizzle(cf,X,Y,Z,ZERO) );
return out;
}
break;
}
case GL_COMBINE: {
GLuint rgb_shift, alpha_shift, out, shift;
GLuint dest = get_dest(p, unit);
/* The EXT version of the DOT3 extension does not support the
* scale factor, but the ARB version (and the version in OpenGL
* 1.3) does.
*/
switch (texUnit->Combine.ModeRGB) {
case GL_DOT3_RGB_EXT:
alpha_shift = texUnit->Combine.ScaleShiftA;
rgb_shift = 0;
break;
case GL_DOT3_RGBA_EXT:
alpha_shift = 0;
rgb_shift = 0;
break;
default:
rgb_shift = texUnit->Combine.ScaleShiftRGB;
alpha_shift = texUnit->Combine.ScaleShiftA;
break;
}
/* Emit the RGB and A combine ops
*/
if (texUnit->Combine.ModeRGB == texUnit->Combine.ModeA &&
args_match( texUnit )) {
out = emit_combine( p, dest, A0_DEST_CHANNEL_ALL, saturate,
unit,
texUnit->Combine.ModeRGB,
texUnit->Combine.SourceRGB,
texUnit->Combine.OperandRGB );
}
else if (texUnit->Combine.ModeRGB == GL_DOT3_RGBA_EXT ||
texUnit->Combine.ModeRGB == GL_DOT3_RGBA) {
out = emit_combine( p, dest, A0_DEST_CHANNEL_ALL, saturate,
unit,
texUnit->Combine.ModeRGB,
texUnit->Combine.SourceRGB,
texUnit->Combine.OperandRGB );
}
else {
/* Need to do something to stop from re-emitting identical
* argument calculations here:
*/
out = emit_combine( p, dest, A0_DEST_CHANNEL_XYZ, saturate,
unit,
texUnit->Combine.ModeRGB,
texUnit->Combine.SourceRGB,
texUnit->Combine.OperandRGB );
out = emit_combine( p, dest, A0_DEST_CHANNEL_W, saturate,
unit,
texUnit->Combine.ModeA,
texUnit->Combine.SourceA,
texUnit->Combine.OperandA );
}
/* Deal with the final shift:
*/
if (alpha_shift || rgb_shift) {
if (rgb_shift == alpha_shift) {
shift = i915_emit_const1f(p, 1<<rgb_shift);
shift = swizzle(shift,X,X,X,X);
}
else {
shift = i915_emit_const2f(p, 1<<rgb_shift, 1<<alpha_shift);
shift = swizzle(shift,X,X,X,Y);
}
return i915_emit_arith( p, A0_MUL, dest, A0_DEST_CHANNEL_ALL,
saturate, out, shift, 0 );
}
return out;
}
default:
return get_source(p, GL_PREVIOUS, 0);
}
}
static void emit_program_fini( struct i915_fragment_program *p )
{
int cf = get_source( p, GL_PREVIOUS, 0 );
int out = UREG( REG_TYPE_OC, 0 );
if (p->ctx->_TriangleCaps & DD_SEPARATE_SPECULAR) {
/* Emit specular add.
*/
GLuint s = i915_emit_decl(p, REG_TYPE_T, T_SPECULAR, D0_CHANNEL_ALL);
i915_emit_arith( p, A0_ADD, out, A0_DEST_CHANNEL_ALL, 0, cf,
swizzle(s, X,Y,Z,ZERO), 0 );
}
else if (cf != out) {
/* Will wind up in here if no texture enabled or a couple of
* other scenarios (GL_REPLACE for instance).
*/
i915_emit_arith( p, A0_MOV, out, A0_DEST_CHANNEL_ALL, 0, cf, 0, 0 );
}
}
static void i915EmitTextureProgram( i915ContextPtr i915 )
{
GLcontext *ctx = &i915->intel.ctx;
struct i915_fragment_program *p = &i915->tex_program;
GLuint unit;
if (0) fprintf(stderr, "%s\n", __FUNCTION__);
i915_init_program( i915, p );
if (ctx->Texture._EnabledUnits) {
for (unit = 0 ; unit < ctx->Const.MaxTextureUnits ; unit++)
if (ctx->Texture.Unit[unit]._ReallyEnabled) {
p->last_tex_stage = unit;
}
for (unit = 0 ; unit < ctx->Const.MaxTextureUnits; unit++)
if (ctx->Texture.Unit[unit]._ReallyEnabled) {
p->src_previous = emit_texenv( p, unit );
p->src_texture = UREG_BAD;
p->temp_flag = 0xffff000;
p->temp_flag |= 1 << GET_UREG_NR(p->src_previous);
}
}
emit_program_fini( p );
i915_fini_program( p );
i915_upload_program( i915, p );
p->translated = 1;
}
void i915ValidateTextureProgram( i915ContextPtr i915 )
{
intelContextPtr intel = &i915->intel;
GLcontext *ctx = &intel->ctx;
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
DECLARE_RENDERINPUTS(index_bitset);
int i, offset;
GLuint s4 = i915->state.Ctx[I915_CTXREG_LIS4] & ~S4_VFMT_MASK;
GLuint s2 = S2_TEXCOORD_NONE;
RENDERINPUTS_COPY( index_bitset, tnl->render_inputs_bitset );
/* Important:
*/
VB->AttribPtr[VERT_ATTRIB_POS] = VB->NdcPtr;
intel->vertex_attr_count = 0;
intel->coloroffset = 0;
intel->specoffset = 0;
offset = 0;
if (i915->vertex_fog == I915_FOG_PIXEL) {
EMIT_ATTR( _TNL_ATTRIB_POS, EMIT_4F_VIEWPORT, S4_VFMT_XYZW, 16 );
RENDERINPUTS_CLEAR( index_bitset, _TNL_ATTRIB_FOG );
}
else if (RENDERINPUTS_TEST_RANGE( index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX )) {
EMIT_ATTR( _TNL_ATTRIB_POS, EMIT_4F_VIEWPORT, S4_VFMT_XYZW, 16 );
}
else {
EMIT_ATTR( _TNL_ATTRIB_POS, EMIT_3F_VIEWPORT, S4_VFMT_XYZ, 12 );
}
/* How undefined is undefined? */
if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_POINTSIZE )) {
EMIT_ATTR( _TNL_ATTRIB_POINTSIZE, EMIT_1F, S4_VFMT_POINT_WIDTH, 4 );
}
intel->coloroffset = offset / 4;
EMIT_ATTR( _TNL_ATTRIB_COLOR0, EMIT_4UB_4F_BGRA, S4_VFMT_COLOR, 4 );
if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_COLOR1 ) ||
RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_FOG )) {
if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_COLOR1 )) {
intel->specoffset = offset / 4;
EMIT_ATTR( _TNL_ATTRIB_COLOR1, EMIT_3UB_3F_BGR, S4_VFMT_SPEC_FOG, 3 );
} else
EMIT_PAD( 3 );
if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_FOG ))
EMIT_ATTR( _TNL_ATTRIB_FOG, EMIT_1UB_1F, S4_VFMT_SPEC_FOG, 1 );
else
EMIT_PAD( 1 );
}
if (RENDERINPUTS_TEST_RANGE( index_bitset, _TNL_FIRST_TEX, _TNL_LAST_TEX )) {
for (i = 0; i < 8; i++) {
if (RENDERINPUTS_TEST( index_bitset, _TNL_ATTRIB_TEX(i) )) {
int sz = VB->TexCoordPtr[i]->size;
s2 &= ~S2_TEXCOORD_FMT(i, S2_TEXCOORD_FMT0_MASK);
s2 |= S2_TEXCOORD_FMT(i, SZ_TO_HW(sz));
EMIT_ATTR( _TNL_ATTRIB_TEX0+i, EMIT_SZ(sz), 0, sz * 4 );
}
}
}
/* Only need to change the vertex emit code if there has been a
* statechange to a new hardware vertex format:
*/
if (s2 != i915->state.Ctx[I915_CTXREG_LIS2] ||
s4 != i915->state.Ctx[I915_CTXREG_LIS4]) {
I915_STATECHANGE( i915, I915_UPLOAD_CTX );
i915->tex_program.translated = 0;
/* Must do this *after* statechange, so as not to affect
* buffered vertices reliant on the old state:
*/
intel->vertex_size = _tnl_install_attrs( ctx,
intel->vertex_attrs,
intel->vertex_attr_count,
intel->ViewportMatrix.m, 0 );
intel->vertex_size >>= 2;
i915->state.Ctx[I915_CTXREG_LIS2] = s2;
i915->state.Ctx[I915_CTXREG_LIS4] = s4;
assert(intel->vtbl.check_vertex_size( intel, intel->vertex_size ));
}
if (!i915->tex_program.translated ||
i915->last_ReallyEnabled != ctx->Texture._EnabledUnits) {
i915EmitTextureProgram( i915 );
i915->last_ReallyEnabled = ctx->Texture._EnabledUnits;
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -37,112 +37,138 @@
#include "tnl/t_vertex.h"
#include "intel_batchbuffer.h"
#include "intel_tex.h"
#include "intel_regions.h"
#include "i915_reg.h"
#include "i915_context.h"
static void i915_render_start( intelContextPtr intel )
static void
i915_render_start(struct intel_context *intel)
{
GLcontext *ctx = &intel->ctx;
i915ContextPtr i915 = I915_CONTEXT(intel);
struct i915_context *i915 = i915_context(&intel->ctx);
if (ctx->FragmentProgram._Active)
i915ValidateFragmentProgram( i915 );
else
i915ValidateTextureProgram( i915 );
i915ValidateFragmentProgram(i915);
}
static void i915_reduced_primitive_state( intelContextPtr intel,
GLenum rprim )
static void
i915_reduced_primitive_state(struct intel_context *intel, GLenum rprim)
{
i915ContextPtr i915 = I915_CONTEXT(intel);
GLuint st1 = i915->state.Stipple[I915_STPREG_ST1];
struct i915_context *i915 = i915_context(&intel->ctx);
GLuint st1 = i915->state.Stipple[I915_STPREG_ST1];
st1 &= ~ST1_ENABLE;
st1 &= ~ST1_ENABLE;
switch (rprim) {
case GL_TRIANGLES:
if (intel->ctx.Polygon.StippleFlag &&
intel->hw_stipple)
st1 |= ST1_ENABLE;
break;
case GL_LINES:
case GL_POINTS:
default:
break;
}
switch (rprim) {
case GL_TRIANGLES:
if (intel->ctx.Polygon.StippleFlag && intel->hw_stipple)
st1 |= ST1_ENABLE;
break;
case GL_LINES:
case GL_POINTS:
default:
break;
}
i915->intel.reduced_primitive = rprim;
i915->intel.reduced_primitive = rprim;
if (st1 != i915->state.Stipple[I915_STPREG_ST1]) {
I915_STATECHANGE(i915, I915_UPLOAD_STIPPLE);
i915->state.Stipple[I915_STPREG_ST1] = st1;
}
if (st1 != i915->state.Stipple[I915_STPREG_ST1]) {
INTEL_FIREVERTICES(intel);
I915_STATECHANGE(i915, I915_UPLOAD_STIPPLE);
i915->state.Stipple[I915_STPREG_ST1] = st1;
}
}
/* Pull apart the vertex format registers and figure out how large a
* vertex is supposed to be.
*/
static GLboolean i915_check_vertex_size( intelContextPtr intel,
GLuint expected )
static GLboolean
i915_check_vertex_size(struct intel_context *intel, GLuint expected)
{
i915ContextPtr i915 = I915_CONTEXT(intel);
struct i915_context *i915 = i915_context(&intel->ctx);
int lis2 = i915->current->Ctx[I915_CTXREG_LIS2];
int lis4 = i915->current->Ctx[I915_CTXREG_LIS4];
int i, sz = 0;
switch (lis4 & S4_VFMT_XYZW_MASK) {
case S4_VFMT_XY: sz = 2; break;
case S4_VFMT_XYZ: sz = 3; break;
case S4_VFMT_XYW: sz = 3; break;
case S4_VFMT_XYZW: sz = 4; break;
default:
case S4_VFMT_XY:
sz = 2;
break;
case S4_VFMT_XYZ:
sz = 3;
break;
case S4_VFMT_XYW:
sz = 3;
break;
case S4_VFMT_XYZW:
sz = 4;
break;
default:
fprintf(stderr, "no xyzw specified\n");
return 0;
}
if (lis4 & S4_VFMT_SPEC_FOG) sz++;
if (lis4 & S4_VFMT_COLOR) sz++;
if (lis4 & S4_VFMT_DEPTH_OFFSET) sz++;
if (lis4 & S4_VFMT_POINT_WIDTH) sz++;
if (lis4 & S4_VFMT_FOG_PARAM) sz++;
for (i = 0 ; i < 8 ; i++) {
if (lis4 & S4_VFMT_SPEC_FOG)
sz++;
if (lis4 & S4_VFMT_COLOR)
sz++;
if (lis4 & S4_VFMT_DEPTH_OFFSET)
sz++;
if (lis4 & S4_VFMT_POINT_WIDTH)
sz++;
if (lis4 & S4_VFMT_FOG_PARAM)
sz++;
for (i = 0; i < 8; i++) {
switch (lis2 & S2_TEXCOORD_FMT0_MASK) {
case TEXCOORDFMT_2D: sz += 2; break;
case TEXCOORDFMT_3D: sz += 3; break;
case TEXCOORDFMT_4D: sz += 4; break;
case TEXCOORDFMT_1D: sz += 1; break;
case TEXCOORDFMT_2D_16: sz += 1; break;
case TEXCOORDFMT_4D_16: sz += 2; break;
case TEXCOORDFMT_NOT_PRESENT: break;
case TEXCOORDFMT_2D:
sz += 2;
break;
case TEXCOORDFMT_3D:
sz += 3;
break;
case TEXCOORDFMT_4D:
sz += 4;
break;
case TEXCOORDFMT_1D:
sz += 1;
break;
case TEXCOORDFMT_2D_16:
sz += 1;
break;
case TEXCOORDFMT_4D_16:
sz += 2;
break;
case TEXCOORDFMT_NOT_PRESENT:
break;
default:
fprintf(stderr, "bad texcoord fmt %d\n", i);
return GL_FALSE;
fprintf(stderr, "bad texcoord fmt %d\n", i);
return GL_FALSE;
}
lis2 >>= S2_TEXCOORD_FMT1_SHIFT;
}
if (sz != expected)
if (sz != expected)
fprintf(stderr, "vertex size mismatch %d/%d\n", sz, expected);
return sz == expected;
}
static void i915_emit_invarient_state( intelContextPtr intel )
static void
i915_emit_invarient_state(struct intel_context *intel)
{
BATCH_LOCALS;
BEGIN_BATCH( 200 );
BEGIN_BATCH(200, 0);
OUT_BATCH(_3DSTATE_AA_CMD |
AA_LINE_ECAAR_WIDTH_ENABLE |
AA_LINE_ECAAR_WIDTH_1_0 |
AA_LINE_REGION_WIDTH_ENABLE |
AA_LINE_REGION_WIDTH_1_0);
AA_LINE_ECAAR_WIDTH_ENABLE |
AA_LINE_ECAAR_WIDTH_1_0 |
AA_LINE_REGION_WIDTH_ENABLE | AA_LINE_REGION_WIDTH_1_0);
OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
OUT_BATCH(0);
@@ -155,35 +181,27 @@ static void i915_emit_invarient_state( intelContextPtr intel )
/* Don't support texture crossbar yet */
OUT_BATCH(_3DSTATE_COORD_SET_BINDINGS |
CSB_TCB(0, 0) |
CSB_TCB(1, 1) |
CSB_TCB(2, 2) |
CSB_TCB(3, 3) |
CSB_TCB(4, 4) |
CSB_TCB(5, 5) |
CSB_TCB(6, 6) |
CSB_TCB(7, 7));
CSB_TCB(0, 0) |
CSB_TCB(1, 1) |
CSB_TCB(2, 2) |
CSB_TCB(3, 3) |
CSB_TCB(4, 4) | CSB_TCB(5, 5) | CSB_TCB(6, 6) | CSB_TCB(7, 7));
OUT_BATCH(_3DSTATE_RASTER_RULES_CMD |
ENABLE_POINT_RASTER_RULE |
OGL_POINT_RASTER_RULE |
ENABLE_LINE_STRIP_PROVOKE_VRTX |
ENABLE_TRI_FAN_PROVOKE_VRTX |
LINE_STRIP_PROVOKE_VRTX(1) |
TRI_FAN_PROVOKE_VRTX(2) |
ENABLE_TEXKILL_3D_4D |
TEXKILL_4D);
ENABLE_POINT_RASTER_RULE |
OGL_POINT_RASTER_RULE |
ENABLE_LINE_STRIP_PROVOKE_VRTX |
ENABLE_TRI_FAN_PROVOKE_VRTX |
LINE_STRIP_PROVOKE_VRTX(1) |
TRI_FAN_PROVOKE_VRTX(2) | ENABLE_TEXKILL_3D_4D | TEXKILL_4D);
/* Need to initialize this to zero.
*/
OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 |
I1_LOAD_S(3) |
(1));
OUT_BATCH(_3DSTATE_LOAD_STATE_IMMEDIATE_1 | I1_LOAD_S(3) | (1));
OUT_BATCH(0);
/* XXX: Use this */
OUT_BATCH(_3DSTATE_SCISSOR_ENABLE_CMD |
DISABLE_SCISSOR_RECT);
OUT_BATCH(_3DSTATE_SCISSOR_ENABLE_CMD | DISABLE_SCISSOR_RECT);
OUT_BATCH(_3DSTATE_SCISSOR_RECT_0_CMD);
OUT_BATCH(0);
@@ -191,29 +209,22 @@ static void i915_emit_invarient_state( intelContextPtr intel )
OUT_BATCH(_3DSTATE_DEPTH_SUBRECT_DISABLE);
OUT_BATCH(_3DSTATE_LOAD_INDIRECT | 0); /* disable indirect state */
OUT_BATCH(_3DSTATE_LOAD_INDIRECT | 0); /* disable indirect state */
OUT_BATCH(0);
/* Don't support twosided stencil yet */
OUT_BATCH(_3DSTATE_BACKFACE_STENCIL_OPS |
BFO_ENABLE_STENCIL_TWO_SIDE |
0 );
OUT_BATCH(_3DSTATE_BACKFACE_STENCIL_OPS | BFO_ENABLE_STENCIL_TWO_SIDE | 0);
ADVANCE_BATCH();
}
#define emit( intel, state, size ) \
do { \
int k; \
BEGIN_BATCH( (size) / sizeof(GLuint)); \
for (k = 0 ; k < (size) / sizeof(GLuint) ; k++) \
OUT_BATCH((state)[k]); \
ADVANCE_BATCH(); \
} while (0);
#define emit(intel, state, size ) \
intel_batchbuffer_data(intel->batch, state, size, 0 )
static GLuint get_dirty( struct i915_hw_state *state )
static GLuint
get_dirty(struct i915_hw_state *state)
{
GLuint dirty;
@@ -224,12 +235,12 @@ static GLuint get_dirty( struct i915_hw_state *state )
if (dirty & I915_UPLOAD_TEX_ALL)
state->emitted &= ~I915_UPLOAD_TEX_ALL;
dirty = state->active & ~state->emitted;
return dirty;
}
static GLuint get_state_size( struct i915_hw_state *state )
static GLuint
get_state_size(struct i915_hw_state *state)
{
GLuint dirty = get_dirty(state);
GLuint i;
@@ -238,28 +249,28 @@ static GLuint get_state_size( struct i915_hw_state *state )
if (dirty & I915_UPLOAD_CTX)
sz += sizeof(state->Ctx);
if (dirty & I915_UPLOAD_BUFFERS)
if (dirty & I915_UPLOAD_BUFFERS)
sz += sizeof(state->Buffer);
if (dirty & I915_UPLOAD_STIPPLE)
sz += sizeof(state->Stipple);
if (dirty & I915_UPLOAD_FOG)
if (dirty & I915_UPLOAD_FOG)
sz += sizeof(state->Fog);
if (dirty & I915_UPLOAD_TEX_ALL) {
int nr = 0;
for (i = 0; i < I915_TEX_UNITS; i++)
if (dirty & I915_UPLOAD_TEX(i))
nr++;
for (i = 0; i < I915_TEX_UNITS; i++)
if (dirty & I915_UPLOAD_TEX(i))
nr++;
sz += (2+nr*3) * sizeof(GLuint) * 2;
sz += (2 + nr * 3) * sizeof(GLuint) * 2;
}
if (dirty & I915_UPLOAD_CONSTANTS)
if (dirty & I915_UPLOAD_CONSTANTS)
sz += state->ConstantSize * sizeof(GLuint);
if (dirty & I915_UPLOAD_PROGRAM)
if (dirty & I915_UPLOAD_PROGRAM)
sz += state->ProgramSize * sizeof(GLuint);
return sz;
@@ -268,42 +279,83 @@ static GLuint get_state_size( struct i915_hw_state *state )
/* Push the state into the sarea and/or texture memory.
*/
static void i915_emit_state( intelContextPtr intel )
static void
i915_emit_state(struct intel_context *intel)
{
i915ContextPtr i915 = I915_CONTEXT(intel);
struct i915_context *i915 = i915_context(&intel->ctx);
struct i915_hw_state *state = i915->current;
int i;
GLuint dirty = get_dirty(state);
GLuint counter = intel->batch.counter;
GLuint dirty;
BATCH_LOCALS;
if (intel->batch.space < get_state_size(state)) {
intelFlushBatch(intel, GL_TRUE);
dirty = get_dirty(state);
counter = intel->batch.counter;
}
/* We don't hold the lock at this point, so want to make sure that
* there won't be a buffer wrap.
*
* It might be better to talk about explicit places where
* scheduling is allowed, rather than assume that it is whenever a
* batchbuffer fills up.
*/
intel_batchbuffer_require_space(intel->batch, get_state_size(state), 0);
if (VERBOSE)
/* Do this here as we may have flushed the batchbuffer above,
* causing more state to be dirty!
*/
dirty = get_dirty(state);
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "%s dirty: %x\n", __FUNCTION__, dirty);
if (dirty & I915_UPLOAD_INVARIENT) {
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_INVARIENT:\n");
i915_emit_invarient_state(intel);
}
if (dirty & I915_UPLOAD_CTX) {
if (VERBOSE) fprintf(stderr, "I915_UPLOAD_CTX:\n");
emit( i915, state->Ctx, sizeof(state->Ctx) );
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_CTX:\n");
emit(intel, state->Ctx, sizeof(state->Ctx));
}
if (dirty & I915_UPLOAD_BUFFERS) {
if (VERBOSE) fprintf(stderr, "I915_UPLOAD_BUFFERS:\n");
emit( i915, state->Buffer, sizeof(state->Buffer) );
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_BUFFERS:\n");
BEGIN_BATCH(I915_DEST_SETUP_SIZE + 2, 0);
OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR0]);
OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR1]);
OUT_RELOC(state->draw_region->buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE,
state->draw_region->draw_offset);
if (state->depth_region) {
OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR0]);
OUT_BATCH(state->Buffer[I915_DESTREG_DBUFADDR1]);
OUT_RELOC(state->depth_region->buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE,
state->depth_region->draw_offset);
}
OUT_BATCH(state->Buffer[I915_DESTREG_DV0]);
OUT_BATCH(state->Buffer[I915_DESTREG_DV1]);
OUT_BATCH(state->Buffer[I915_DESTREG_SENABLE]);
OUT_BATCH(state->Buffer[I915_DESTREG_SR0]);
OUT_BATCH(state->Buffer[I915_DESTREG_SR1]);
OUT_BATCH(state->Buffer[I915_DESTREG_SR2]);
ADVANCE_BATCH();
}
if (dirty & I915_UPLOAD_STIPPLE) {
if (VERBOSE) fprintf(stderr, "I915_UPLOAD_STIPPLE:\n");
emit( i915, state->Stipple, sizeof(state->Stipple) );
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_STIPPLE:\n");
emit(intel, state->Stipple, sizeof(state->Stipple));
}
if (dirty & I915_UPLOAD_FOG) {
if (VERBOSE) fprintf(stderr, "I915_UPLOAD_FOG:\n");
emit( i915, state->Fog, sizeof(state->Fog) );
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_FOG:\n");
emit(intel, state->Fog, sizeof(state->Fog));
}
/* Combine all the dirty texture state into a single command to
@@ -312,142 +364,186 @@ static void i915_emit_state( intelContextPtr intel )
if (dirty & I915_UPLOAD_TEX_ALL) {
int nr = 0;
for (i = 0; i < I915_TEX_UNITS; i++)
if (dirty & I915_UPLOAD_TEX(i))
nr++;
for (i = 0; i < I915_TEX_UNITS; i++)
if (dirty & I915_UPLOAD_TEX(i))
nr++;
BEGIN_BATCH(2+nr*3);
OUT_BATCH(_3DSTATE_MAP_STATE | (3*nr));
BEGIN_BATCH(2 + nr * 3, 0);
OUT_BATCH(_3DSTATE_MAP_STATE | (3 * nr));
OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
for (i = 0 ; i < I915_TEX_UNITS ; i++)
if (dirty & I915_UPLOAD_TEX(i)) {
OUT_BATCH(state->Tex[i][I915_TEXREG_MS2]);
OUT_BATCH(state->Tex[i][I915_TEXREG_MS3]);
OUT_BATCH(state->Tex[i][I915_TEXREG_MS4]);
}
for (i = 0; i < I915_TEX_UNITS; i++)
if (dirty & I915_UPLOAD_TEX(i)) {
if (state->tex_buffer[i]) {
OUT_RELOC(state->tex_buffer[i],
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
DRM_BO_MASK_MEM | DRM_BO_FLAG_READ,
state->tex_offset[i]);
}
else {
assert(i == 0);
assert(state == &i915->meta);
OUT_BATCH(0);
}
OUT_BATCH(state->Tex[i][I915_TEXREG_MS3]);
OUT_BATCH(state->Tex[i][I915_TEXREG_MS4]);
}
ADVANCE_BATCH();
BEGIN_BATCH(2+nr*3);
OUT_BATCH(_3DSTATE_SAMPLER_STATE | (3*nr));
BEGIN_BATCH(2 + nr * 3, 0);
OUT_BATCH(_3DSTATE_SAMPLER_STATE | (3 * nr));
OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
for (i = 0 ; i < I915_TEX_UNITS ; i++)
if (dirty & I915_UPLOAD_TEX(i)) {
OUT_BATCH(state->Tex[i][I915_TEXREG_SS2]);
OUT_BATCH(state->Tex[i][I915_TEXREG_SS3]);
OUT_BATCH(state->Tex[i][I915_TEXREG_SS4]);
}
for (i = 0; i < I915_TEX_UNITS; i++)
if (dirty & I915_UPLOAD_TEX(i)) {
OUT_BATCH(state->Tex[i][I915_TEXREG_SS2]);
OUT_BATCH(state->Tex[i][I915_TEXREG_SS3]);
OUT_BATCH(state->Tex[i][I915_TEXREG_SS4]);
}
ADVANCE_BATCH();
}
if (dirty & I915_UPLOAD_CONSTANTS) {
if (VERBOSE) fprintf(stderr, "I915_UPLOAD_CONSTANTS:\n");
emit( i915, state->Constant, state->ConstantSize * sizeof(GLuint) );
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_CONSTANTS:\n");
emit(intel, state->Constant, state->ConstantSize * sizeof(GLuint));
}
if (dirty & I915_UPLOAD_PROGRAM) {
if (VERBOSE) fprintf(stderr, "I915_UPLOAD_PROGRAM:\n");
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_PROGRAM:\n");
assert((state->Program[0] & 0x1ff)+2 == state->ProgramSize);
emit( i915, state->Program, state->ProgramSize * sizeof(GLuint) );
if (VERBOSE)
i915_disassemble_program( state->Program, state->ProgramSize );
assert((state->Program[0] & 0x1ff) + 2 == state->ProgramSize);
emit(intel, state->Program, state->ProgramSize * sizeof(GLuint));
if (INTEL_DEBUG & DEBUG_STATE)
i915_disassemble_program(state->Program, state->ProgramSize);
}
state->emitted |= dirty;
intel->batch.last_emit_state = counter;
assert(counter == intel->batch.counter);
}
static void i915_destroy_context( intelContextPtr intel )
static void
i915_destroy_context(struct intel_context *intel)
{
_tnl_free_vertices(&intel->ctx);
}
/**
* Set the color buffer drawing region.
* Set the drawing regions for the color and depth/stencil buffers.
* This involves setting the pitch, cpp and buffer ID/location.
* Also set pixel format for color and Z rendering
* Used for setting both regular and meta state.
*/
void
i915_state_draw_region(struct intel_context *intel,
struct i915_hw_state *state,
struct intel_region *color_region,
struct intel_region *depth_region)
{
struct i915_context *i915 = i915_context(&intel->ctx);
GLuint value;
ASSERT(state == &i915->state || state == &i915->meta);
if (state->draw_region != color_region) {
intel_region_release(intel->intelScreen, &state->draw_region);
intel_region_reference(&state->draw_region, color_region);
}
if (state->depth_region != depth_region) {
intel_region_release(intel->intelScreen, &state->depth_region);
intel_region_reference(&state->depth_region, depth_region);
}
/*
* Set stride/cpp values
*/
if (color_region) {
state->Buffer[I915_DESTREG_CBUFADDR0] = _3DSTATE_BUF_INFO_CMD;
state->Buffer[I915_DESTREG_CBUFADDR1] =
(BUF_3D_ID_COLOR_BACK |
BUF_3D_PITCH(color_region->pitch * color_region->cpp) |
BUF_3D_USE_FENCE);
}
if (depth_region) {
state->Buffer[I915_DESTREG_DBUFADDR0] = _3DSTATE_BUF_INFO_CMD;
state->Buffer[I915_DESTREG_DBUFADDR1] =
(BUF_3D_ID_DEPTH |
BUF_3D_PITCH(depth_region->pitch * depth_region->cpp) |
BUF_3D_USE_FENCE);
}
/*
* Compute/set I915_DESTREG_DV1 value
*/
value = (DSTORG_HORT_BIAS(0x8) | /* .5 */
DSTORG_VERT_BIAS(0x8) | /* .5 */
LOD_PRECLAMP_OGL | TEX_DEFAULT_COLOR_OGL);
if (color_region && color_region->cpp == 4) {
value |= DV_PF_8888;
}
else {
value |= (DITHER_FULL_ALWAYS | DV_PF_565);
}
if (depth_region && depth_region->cpp == 4) {
value |= DEPTH_FRMT_24_FIXED_8_OTHER;
}
else {
value |= DEPTH_FRMT_16_FIXED;
}
state->Buffer[I915_DESTREG_DV1] = value;
I915_STATECHANGE(i915, I915_UPLOAD_BUFFERS);
}
static void
i915_set_color_region( intelContextPtr intel, const intelRegion *region)
i915_set_draw_region(struct intel_context *intel,
struct intel_region *color_region,
struct intel_region *depth_region)
{
i915ContextPtr i915 = I915_CONTEXT(intel);
I915_STATECHANGE( i915, I915_UPLOAD_BUFFERS );
i915->state.Buffer[I915_DESTREG_CBUFADDR1] =
(BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(region->pitch) | BUF_3D_USE_FENCE);
i915->state.Buffer[I915_DESTREG_CBUFADDR2] = region->offset;
struct i915_context *i915 = i915_context(&intel->ctx);
i915_state_draw_region(intel, &i915->state, color_region, depth_region);
}
/**
* specify the z-buffer/stencil region
*/
static void
i915_set_z_region( intelContextPtr intel, const intelRegion *region)
i915_lost_hardware(struct intel_context *intel)
{
i915ContextPtr i915 = I915_CONTEXT(intel);
I915_STATECHANGE( i915, I915_UPLOAD_BUFFERS );
i915->state.Buffer[I915_DESTREG_DBUFADDR1] =
(BUF_3D_ID_DEPTH | BUF_3D_PITCH(region->pitch) | BUF_3D_USE_FENCE);
i915->state.Buffer[I915_DESTREG_DBUFADDR2] = region->offset;
struct i915_context *i915 = i915_context(&intel->ctx);
i915->state.emitted = 0;
}
static GLuint
i915_flush_cmd(void)
{
return MI_FLUSH | FLUSH_MAP_CACHE;
}
static void
i915_assert_not_dirty( struct intel_context *intel )
{
struct i915_context *i915 = i915_context(&intel->ctx);
struct i915_hw_state *state = i915->current;
GLuint dirty = get_dirty(state);
assert(!dirty);
}
/**
* Set both the color and Z/stencil drawing regions.
* Similar to two previous functions, but don't use I915_STATECHANGE()
*/
static void
i915_update_color_z_regions(intelContextPtr intel,
const intelRegion *colorRegion,
const intelRegion *depthRegion)
void
i915InitVtbl(struct i915_context *i915)
{
i915ContextPtr i915 = I915_CONTEXT(intel);
i915->state.Buffer[I915_DESTREG_CBUFADDR1] =
(BUF_3D_ID_COLOR_BACK | BUF_3D_PITCH(colorRegion->pitch) | BUF_3D_USE_FENCE);
i915->state.Buffer[I915_DESTREG_CBUFADDR2] = colorRegion->offset;
i915->state.Buffer[I915_DESTREG_DBUFADDR1] =
(BUF_3D_ID_DEPTH |
BUF_3D_PITCH(depthRegion->pitch) | /* pitch in bytes */
BUF_3D_USE_FENCE);
i915->state.Buffer[I915_DESTREG_DBUFADDR2] = depthRegion->offset;
}
static void i915_lost_hardware( intelContextPtr intel )
{
I915_CONTEXT(intel)->state.emitted = 0;
}
static void i915_emit_flush( intelContextPtr intel )
{
BATCH_LOCALS;
BEGIN_BATCH(2);
OUT_BATCH( MI_FLUSH | FLUSH_MAP_CACHE | FLUSH_RENDER_CACHE );
OUT_BATCH( 0 );
ADVANCE_BATCH();
}
void i915InitVtbl( i915ContextPtr i915 )
{
i915->intel.vtbl.alloc_tex_obj = i915AllocTexObj;
i915->intel.vtbl.check_vertex_size = i915_check_vertex_size;
i915->intel.vtbl.clear_with_tris = i915ClearWithTris;
i915->intel.vtbl.rotate_window = i915RotateWindow;
i915->intel.vtbl.destroy = i915_destroy_context;
i915->intel.vtbl.emit_invarient_state = i915_emit_invarient_state;
i915->intel.vtbl.emit_state = i915_emit_state;
i915->intel.vtbl.lost_hardware = i915_lost_hardware;
i915->intel.vtbl.reduced_primitive_state = i915_reduced_primitive_state;
i915->intel.vtbl.render_start = i915_render_start;
i915->intel.vtbl.set_color_region = i915_set_color_region;
i915->intel.vtbl.set_z_region = i915_set_z_region;
i915->intel.vtbl.update_color_z_regions = i915_update_color_z_regions;
i915->intel.vtbl.set_draw_region = i915_set_draw_region;
i915->intel.vtbl.update_texture_state = i915UpdateTextureState;
i915->intel.vtbl.emit_flush = i915_emit_flush;
i915->intel.vtbl.flush_cmd = i915_flush_cmd;
i915->intel.vtbl.assert_not_dirty = i915_assert_not_dirty;
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,126 +1,123 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_BATCHBUFFER_H
#define INTEL_BATCHBUFFER_H
#include "intel_context.h"
#include "intel_ioctl.h"
#include "mtypes.h"
#include "dri_bufmgr.h"
struct intel_context;
#define BATCH_LOCALS GLubyte *batch_ptr;
#define BATCH_SZ 4096
#define BATCH_RESERVED 16
/* #define VERBOSE 0 */
#ifndef VERBOSE
extern int VERBOSE;
#endif
#define MAX_RELOCS 100
#define INTEL_BATCH_NO_CLIPRECTS 0x1
#define INTEL_BATCH_CLIPRECTS 0x2
#define BEGIN_BATCH(n) \
do { \
if (VERBOSE) fprintf(stderr, \
"BEGIN_BATCH(%ld) in %s, %d dwords free\n", \
((unsigned long)n), __FUNCTION__, \
intel->batch.space/4); \
if (intel->batch.space < (n)*4) \
intelFlushBatch(intel, GL_TRUE); \
if (intel->batch.space == intel->batch.size) intel->batch.func = __FUNCTION__; \
batch_ptr = intel->batch.ptr; \
} while (0)
#define OUT_BATCH(n) \
do { \
*(GLuint *)batch_ptr = (n); \
if (VERBOSE) fprintf(stderr, " -- %08x at %s/%d\n", (n), __FILE__, __LINE__); \
batch_ptr += 4; \
} while (0)
#define ADVANCE_BATCH() \
do { \
if (VERBOSE) fprintf(stderr, "ADVANCE_BATCH()\n"); \
intel->batch.space -= (batch_ptr - intel->batch.ptr); \
intel->batch.ptr = batch_ptr; \
assert(intel->batch.space >= 0); \
} while(0)
extern void intelInitBatchBuffer( GLcontext *ctx );
extern void intelDestroyBatchBuffer( GLcontext *ctx );
extern void intelStartInlinePrimitive( intelContextPtr intel, GLuint prim );
extern void intelWrapInlinePrimitive( intelContextPtr intel );
extern void intelRestartInlinePrimitive( intelContextPtr intel );
extern GLuint *intelEmitInlinePrimitiveLocked(intelContextPtr intel,
int primitive, int dwords,
int vertex_size);
extern void intelCopyBuffer( const __DRIdrawablePrivate *dpriv,
const drm_clip_rect_t *rect);
extern void intelClearWithBlit(GLcontext *ctx, GLbitfield mask, GLboolean all,
GLint cx1, GLint cy1, GLint cw, GLint ch);
extern void intelEmitCopyBlitLocked( intelContextPtr intel,
GLuint cpp,
GLshort src_pitch,
GLuint src_offset,
GLshort dst_pitch,
GLuint dst_offset,
GLshort srcx, GLshort srcy,
GLshort dstx, GLshort dsty,
GLshort w, GLshort h );
extern void intelEmitFillBlitLocked( intelContextPtr intel,
GLuint cpp,
GLshort dst_pitch,
GLuint dst_offset,
GLshort x, GLshort y,
GLshort w, GLshort h,
GLuint color );
static __inline GLuint *intelExtendInlinePrimitive( intelContextPtr intel,
GLuint dwords )
struct buffer_reloc
{
GLuint sz = dwords * sizeof(GLuint);
GLuint *ptr;
struct _DriBufferObject *buf;
GLuint offset;
GLuint delta; /* not needed? */
};
if (intel->batch.space < sz) {
intelWrapInlinePrimitive( intel );
/* assert(intel->batch.space >= sz); */
}
struct intel_batchbuffer
{
struct bufmgr *bm;
struct intel_context *intel;
/* assert(intel->prim.primitive != ~0); */
ptr = (GLuint *)intel->batch.ptr;
intel->batch.ptr += sz;
intel->batch.space -= sz;
struct _DriBufferObject *buffer;
struct _DriFenceObject *last_fence;
GLuint flags;
return ptr;
drmBOList list;
GLuint list_count;
GLubyte *map;
GLubyte *ptr;
struct buffer_reloc reloc[MAX_RELOCS];
GLuint nr_relocs;
};
struct intel_batchbuffer *intel_batchbuffer_alloc(struct intel_context
*intel);
void intel_batchbuffer_free(struct intel_batchbuffer *batch);
void intel_batchbuffer_finish(struct intel_batchbuffer *batch);
struct _DriFenceObject *intel_batchbuffer_flush(struct intel_batchbuffer
*batch);
void intel_batchbuffer_reset(struct intel_batchbuffer *batch);
/* Unlike bmBufferData, this currently requires the buffer be mapped.
* Consider it a convenience function wrapping multple
* intel_buffer_dword() calls.
*/
void intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, GLuint bytes, GLuint flags);
void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
GLuint bytes);
GLboolean intel_batchbuffer_emit_reloc(struct intel_batchbuffer *batch,
struct _DriBufferObject *buffer,
GLuint flags,
GLuint mask, GLuint offset);
/* Inline functions - might actually be better off with these
* non-inlined. Certainly better off switching all command packets to
* be passed as structs rather than dwords, but that's a little bit of
* work...
*/
static INLINE GLuint
intel_batchbuffer_space(struct intel_batchbuffer *batch)
{
return (BATCH_SZ - BATCH_RESERVED) - (batch->ptr - batch->map);
}
static INLINE void
intel_batchbuffer_emit_dword(struct intel_batchbuffer *batch, GLuint dword)
{
assert(batch->map);
assert(intel_batchbuffer_space(batch) >= 4);
*(GLuint *) (batch->ptr) = dword;
batch->ptr += 4;
}
static INLINE void
intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
GLuint sz, GLuint flags)
{
assert(sz < BATCH_SZ - 8);
if (intel_batchbuffer_space(batch) < sz ||
(batch->flags != 0 && flags != 0 && batch->flags != flags))
intel_batchbuffer_flush(batch);
batch->flags |= flags;
}
/* Here are the crusty old macros, to be removed:
*/
#define BATCH_LOCALS
#define BEGIN_BATCH(n, flags) do { \
assert(!intel->prim.flush); \
intel_batchbuffer_require_space(intel->batch, (n)*4, flags); \
} while (0)
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
#define OUT_RELOC(buf,flags,mask,delta) do { \
assert((delta) >= 0); \
intel_batchbuffer_emit_reloc(intel->batch, buf, flags, mask, delta); \
} while (0)
#define ADVANCE_BATCH() do { } while(0)
#endif

View File

@@ -0,0 +1,416 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*
**************************************************************************/
/*
* Authors: Thomas Hellstr<74>m <thomas-at-tungstengraphics-dot-com>
*/
#include <xf86drm.h>
#include <stdlib.h>
#include <errno.h>
#include "imports.h"
#include "glthread.h"
#include "dri_bufpool.h"
#include "dri_bufmgr.h"
#include "intel_screen.h"
typedef struct
{
drmMMListHead head;
struct _BPool *parent;
struct _DriFenceObject *fence;
unsigned long start;
int unfenced;
int mapped;
} BBuf;
typedef struct _BPool
{
_glthread_Mutex mutex;
unsigned long bufSize;
unsigned poolSize;
unsigned numFree;
unsigned numTot;
unsigned numDelayed;
unsigned checkDelayed;
drmMMListHead free;
drmMMListHead delayed;
drmMMListHead head;
drmBO kernelBO;
void *virtual;
BBuf *bufs;
} BPool;
static BPool *
createBPool(int fd, unsigned long bufSize, unsigned numBufs, unsigned flags,
unsigned checkDelayed)
{
BPool *p = (BPool *) malloc(sizeof(*p));
BBuf *buf;
int i;
if (!p)
return NULL;
p->bufs = (BBuf *) malloc(numBufs * sizeof(*p->bufs));
if (!p->bufs) {
free(p);
return NULL;
}
DRMINITLISTHEAD(&p->free);
DRMINITLISTHEAD(&p->head);
DRMINITLISTHEAD(&p->delayed);
p->numTot = numBufs;
p->numFree = numBufs;
p->bufSize = bufSize;
p->numDelayed = 0;
p->checkDelayed = checkDelayed;
_glthread_INIT_MUTEX(p->mutex);
if (drmBOCreate(fd, NULL, 0, numBufs * bufSize, NULL, drm_bo_type_dc,
flags, 0, &p->kernelBO)) {
free(p->bufs);
free(p);
return NULL;
}
if (drmBOMap(fd, &p->kernelBO, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0,
&p->virtual)) {
drmBODestroy(fd, &p->kernelBO);
free(p->bufs);
free(p);
return NULL;
}
/*
* We unmap the buffer so that we can validate it later. Note that this is
* just a synchronizing operation. The buffer will have a virtual mapping
* until it is destroyed.
*/
drmBOUnmap(fd, &p->kernelBO);
buf = p->bufs;
for (i = 0; i < numBufs; ++i) {
buf->parent = p;
buf->fence = NULL;
buf->start = i * bufSize;
buf->mapped = 0;
buf->unfenced = 0;
DRMLISTADDTAIL(&buf->head, &p->free);
buf++;
}
return p;
}
static void
pool_checkFree(BPool * p, int wait)
{
drmMMListHead *list, *prev;
BBuf *buf;
int signaled = 0;
int i;
list = p->delayed.next;
if (p->numDelayed > 3) {
for (i = 0; i < p->numDelayed; i += 3) {
list = list->next;
}
}
prev = list->prev;
for (; list != &p->delayed; list = prev, prev = list->prev) {
buf = DRMLISTENTRY(BBuf, list, head);
if (!signaled) {
if (wait) {
driFenceFinish(buf->fence, DRM_FENCE_TYPE_EXE, 1);
signaled = 1;
}
else {
signaled = driFenceSignaled(buf->fence, DRM_FENCE_TYPE_EXE);
}
}
if (!signaled)
break;
driFenceUnReference(buf->fence);
buf->fence = NULL;
DRMLISTDEL(list);
p->numDelayed--;
DRMLISTADD(list, &p->free);
p->numFree++;
}
}
static void *
pool_create(struct _DriBufferPool *pool,
unsigned long size, unsigned flags, unsigned hint,
unsigned alignment)
{
BPool *p = (BPool *) pool->data;
drmMMListHead *item;
if (alignment && (alignment != 4096))
return NULL;
_glthread_LOCK_MUTEX(p->mutex);
if (p->numFree == 0)
pool_checkFree(p, GL_TRUE);
if (p->numFree == 0) {
fprintf(stderr, "Out of fixed size buffer objects\n");
BM_CKFATAL(-ENOMEM);
}
item = p->free.next;
if (item == &p->free) {
fprintf(stderr, "Fixed size buffer pool corruption\n");
}
DRMLISTDEL(item);
--p->numFree;
_glthread_UNLOCK_MUTEX(p->mutex);
return (void *) DRMLISTENTRY(BBuf, item, head);
}
static int
pool_destroy(struct _DriBufferPool *pool, void *private)
{
BBuf *buf = (BBuf *) private;
BPool *p = buf->parent;
_glthread_LOCK_MUTEX(p->mutex);
if (buf->fence) {
DRMLISTADDTAIL(&buf->head, &p->delayed);
p->numDelayed++;
}
else {
buf->unfenced = 0;
DRMLISTADD(&buf->head, &p->free);
p->numFree++;
}
if ((p->numDelayed % p->checkDelayed) == 0)
pool_checkFree(p, 0);
_glthread_UNLOCK_MUTEX(p->mutex);
return 0;
}
static int
pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
int hint, void **virtual)
{
BBuf *buf = (BBuf *) private;
BPool *p = buf->parent;
_glthread_LOCK_MUTEX(p->mutex);
/*
* Currently Mesa doesn't have any condition variables to resolve this
* cleanly in a multithreading environment.
* We bail out instead.
*/
if (buf->mapped) {
fprintf(stderr, "Trying to map already mapped buffer object\n");
BM_CKFATAL(-EINVAL);
}
#if 0
if (buf->unfenced && !(hint & DRM_BO_HINT_ALLOW_UNFENCED_MAP)) {
fprintf(stderr, "Trying to map an unfenced buffer object 0x%08x"
" 0x%08x %d\n", hint, flags, buf->start);
BM_CKFATAL(-EINVAL);
}
#endif
if (buf->fence) {
_glthread_UNLOCK_MUTEX(p->mutex);
return -EBUSY;
}
buf->mapped = GL_TRUE;
*virtual = (unsigned char *) p->virtual + buf->start;
_glthread_UNLOCK_MUTEX(p->mutex);
return 0;
}
static int
pool_waitIdle(struct _DriBufferPool *pool, void *private, int lazy)
{
BBuf *buf = (BBuf *) private;
driFenceFinish(buf->fence, 0, lazy);
return 0;
}
static int
pool_unmap(struct _DriBufferPool *pool, void *private)
{
BBuf *buf = (BBuf *) private;
buf->mapped = 0;
return 0;
}
static unsigned long
pool_offset(struct _DriBufferPool *pool, void *private)
{
BBuf *buf = (BBuf *) private;
BPool *p = buf->parent;
return p->kernelBO.offset + buf->start;
}
static unsigned
pool_flags(struct _DriBufferPool *pool, void *private)
{
BPool *p = (BPool *) pool->data;
return p->kernelBO.flags;
}
static unsigned long
pool_size(struct _DriBufferPool *pool, void *private)
{
BPool *p = (BPool *) pool->data;
return p->bufSize;
}
static int
pool_fence(struct _DriBufferPool *pool, void *private,
struct _DriFenceObject *fence)
{
BBuf *buf = (BBuf *) private;
BPool *p = buf->parent;
_glthread_LOCK_MUTEX(p->mutex);
if (buf->fence) {
driFenceUnReference(buf->fence);
}
buf->fence = fence;
buf->unfenced = 0;
driFenceReference(buf->fence);
_glthread_UNLOCK_MUTEX(p->mutex);
return 0;
}
static drmBO *
pool_kernel(struct _DriBufferPool *pool, void *private)
{
BBuf *buf = (BBuf *) private;
BPool *p = buf->parent;
return &p->kernelBO;
}
static int
pool_validate(struct _DriBufferPool *pool, void *private)
{
BBuf *buf = (BBuf *) private;
BPool *p = buf->parent;
_glthread_LOCK_MUTEX(p->mutex);
buf->unfenced = GL_TRUE;
_glthread_UNLOCK_MUTEX(p->mutex);
return 0;
}
static void
pool_takedown(struct _DriBufferPool *pool)
{
BPool *p = (BPool *) pool->data;
/*
* Wait on outstanding fences.
*/
_glthread_LOCK_MUTEX(p->mutex);
while ((p->numFree < p->numTot) && p->numDelayed) {
_glthread_UNLOCK_MUTEX(p->mutex);
sched_yield();
pool_checkFree(p, GL_TRUE);
_glthread_LOCK_MUTEX(p->mutex);
}
drmBODestroy(pool->fd, &p->kernelBO);
free(p->bufs);
_glthread_UNLOCK_MUTEX(p->mutex);
free(p);
free(pool);
}
struct _DriBufferPool *
driBatchPoolInit(int fd, unsigned flags,
unsigned long bufSize,
unsigned numBufs, unsigned checkDelayed)
{
struct _DriBufferPool *pool;
pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
if (!pool)
return NULL;
pool->data = createBPool(fd, bufSize, numBufs, flags, checkDelayed);
pool->fd = fd;
pool->map = &pool_map;
pool->unmap = &pool_unmap;
pool->destroy = &pool_destroy;
pool->offset = &pool_offset;
pool->flags = &pool_flags;
pool->size = &pool_size;
pool->create = &pool_create;
pool->fence = &pool_fence;
pool->kernel = &pool_kernel;
pool->validate = &pool_validate;
pool->waitIdle = &pool_waitIdle;
pool->setstatic = NULL;
pool->takeDown = &pool_takedown;
return pool;
}

View File

@@ -0,0 +1,516 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include <stdio.h>
#include <errno.h>
#include "mtypes.h"
#include "context.h"
#include "enums.h"
#include "intel_batchbuffer.h"
#include "intel_blit.h"
#include "intel_buffers.h"
#include "intel_context.h"
#include "intel_fbo.h"
#include "intel_reg.h"
#include "intel_regions.h"
#include "vblank.h"
#define FILE_DEBUG_FLAG DEBUG_BLIT
/**
* Copy the back color buffer to the front color buffer.
* Used for SwapBuffers().
*/
void
intelCopyBuffer(const __DRIdrawablePrivate * dPriv,
const drm_clip_rect_t * rect)
{
struct intel_context *intel;
GLboolean missed_target;
int64_t ust;
DBG("%s\n", __FUNCTION__);
assert(dPriv);
intel = intelScreenContext(dPriv->driScreenPriv->private);
if (!intel)
return;
if (intel->last_swap_fence) {
driFenceFinish(intel->last_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
driFenceUnReference(intel->last_swap_fence);
intel->last_swap_fence = NULL;
}
intel->last_swap_fence = intel->first_swap_fence;
intel->first_swap_fence = NULL;
if (!rect) {
driWaitForVBlank(dPriv, &intel->vbl_seq, intel->vblank_flags,
&missed_target);
}
/* The LOCK_HARDWARE is required for the cliprects. Buffer offsets
* should work regardless.
*/
LOCK_HARDWARE(intel);
if (intel->driDrawable && intel->driDrawable->numClipRects) {
const intelScreenPrivate *intelScreen = intel->intelScreen;
struct gl_framebuffer *fb
= (struct gl_framebuffer *) dPriv->driverPrivate;
const struct intel_region *frontRegion
= intel_get_rb_region(fb, BUFFER_FRONT_LEFT);
const struct intel_region *backRegion
= intel_get_rb_region(fb, BUFFER_BACK_LEFT);
const int nbox = dPriv->numClipRects;
const drm_clip_rect_t *pbox = dPriv->pClipRects;
const int pitch = frontRegion->pitch;
const int cpp = frontRegion->cpp;
int BR13, CMD;
int i;
ASSERT(fb);
ASSERT(fb->Name == 0); /* Not a user-created FBO */
ASSERT(frontRegion);
ASSERT(backRegion);
ASSERT(frontRegion->pitch == backRegion->pitch);
ASSERT(frontRegion->cpp == backRegion->cpp);
if (cpp == 2) {
BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24);
CMD = XY_SRC_COPY_BLT_CMD;
}
else {
BR13 = (pitch * cpp) | (0xCC << 16) | (1 << 24) | (1 << 25);
CMD = (XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
}
for (i = 0; i < nbox; i++, pbox++) {
drm_clip_rect_t box;
if (pbox->x1 > pbox->x2 ||
pbox->y1 > pbox->y2 ||
pbox->x2 > intelScreen->width || pbox->y2 > intelScreen->height)
continue;
box = *pbox;
if (rect) {
if (rect->x1 > box.x1)
box.x1 = rect->x1;
if (rect->y1 > box.y1)
box.y1 = rect->y1;
if (rect->x2 < box.x2)
box.x2 = rect->x2;
if (rect->y2 < box.y2)
box.y2 = rect->y2;
if (box.x1 > box.x2 || box.y1 > box.y2)
continue;
}
BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((pbox->y1 << 16) | pbox->x1);
OUT_BATCH((pbox->y2 << 16) | pbox->x2);
if (intel->sarea->pf_current_page == 0)
OUT_RELOC(frontRegion->buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, 0);
else
OUT_RELOC(backRegion->buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, 0);
OUT_BATCH((pbox->y1 << 16) | pbox->x1);
OUT_BATCH(BR13 & 0xffff);
if (intel->sarea->pf_current_page == 0)
OUT_RELOC(backRegion->buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
DRM_BO_MASK_MEM | DRM_BO_FLAG_READ, 0);
else
OUT_RELOC(frontRegion->buffer,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
DRM_BO_MASK_MEM | DRM_BO_FLAG_READ, 0);
ADVANCE_BATCH();
}
if (intel->first_swap_fence)
driFenceUnReference(intel->first_swap_fence);
intel->first_swap_fence = intel_batchbuffer_flush(intel->batch);
driFenceReference(intel->first_swap_fence);
}
UNLOCK_HARDWARE(intel);
if (!rect) {
intel->swap_count++;
(*dri_interface->getUST) (&ust);
if (missed_target) {
intel->swap_missed_count++;
intel->swap_missed_ust = ust - intel->swap_ust;
}
intel->swap_ust = ust;
}
}
void
intelEmitFillBlit(struct intel_context *intel,
GLuint cpp,
GLshort dst_pitch,
struct _DriBufferObject *dst_buffer,
GLuint dst_offset,
GLshort x, GLshort y, GLshort w, GLshort h, GLuint color)
{
GLuint BR13, CMD;
BATCH_LOCALS;
dst_pitch *= cpp;
switch (cpp) {
case 1:
case 2:
case 3:
BR13 = dst_pitch | (0xF0 << 16) | (1 << 24);
CMD = XY_COLOR_BLT_CMD;
break;
case 4:
BR13 = dst_pitch | (0xF0 << 16) | (1 << 24) | (1 << 25);
CMD = (XY_COLOR_BLT_CMD | XY_COLOR_BLT_WRITE_ALPHA |
XY_COLOR_BLT_WRITE_RGB);
break;
default:
return;
}
DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
__FUNCTION__, dst_buffer, dst_pitch, dst_offset, x, y, w, h);
BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((y << 16) | x);
OUT_BATCH(((y + h) << 16) | (x + w));
OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, dst_offset);
OUT_BATCH(color);
ADVANCE_BATCH();
}
/* Copy BitBlt
*/
void
intelEmitCopyBlit(struct intel_context *intel,
GLuint cpp,
GLshort src_pitch,
struct _DriBufferObject *src_buffer,
GLuint src_offset,
GLshort dst_pitch,
struct _DriBufferObject *dst_buffer,
GLuint dst_offset,
GLshort src_x, GLshort src_y,
GLshort dst_x, GLshort dst_y, GLshort w, GLshort h)
{
GLuint CMD, BR13;
int dst_y2 = dst_y + h;
int dst_x2 = dst_x + w;
BATCH_LOCALS;
DBG("%s src:buf(%p)/%d+%d %d,%d dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
__FUNCTION__,
src_buffer, src_pitch, src_offset, src_x, src_y,
dst_buffer, dst_pitch, dst_offset, dst_x, dst_y, w, h);
src_pitch *= cpp;
dst_pitch *= cpp;
switch (cpp) {
case 1:
case 2:
case 3:
BR13 = (((GLint) dst_pitch) & 0xffff) | (0xCC << 16) | (1 << 24);
CMD = XY_SRC_COPY_BLT_CMD;
break;
case 4:
BR13 =
(((GLint) dst_pitch) & 0xffff) | (0xCC << 16) | (1 << 24) | (1 <<
25);
CMD =
(XY_SRC_COPY_BLT_CMD | XY_SRC_COPY_BLT_WRITE_ALPHA |
XY_SRC_COPY_BLT_WRITE_RGB);
break;
default:
return;
}
if (dst_y2 < dst_y || dst_x2 < dst_x) {
return;
}
/* Initial y values don't seem to work with negative pitches. If
* we adjust the offsets manually (below), it seems to work fine.
*
* On the other hand, if we always adjust, the hardware doesn't
* know which blit directions to use, so overlapping copypixels get
* the wrong result.
*/
if (dst_pitch > 0 && src_pitch > 0) {
BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((dst_y << 16) | dst_x);
OUT_BATCH((dst_y2 << 16) | dst_x2);
OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE, dst_offset);
OUT_BATCH((src_y << 16) | src_x);
OUT_BATCH(((GLint) src_pitch & 0xffff));
OUT_RELOC(src_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
DRM_BO_MASK_MEM | DRM_BO_FLAG_READ, src_offset);
ADVANCE_BATCH();
}
else {
BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((0 << 16) | dst_x);
OUT_BATCH((h << 16) | dst_x2);
OUT_RELOC(dst_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE,
dst_offset + dst_y * dst_pitch);
OUT_BATCH((0 << 16) | src_x);
OUT_BATCH(((GLint) src_pitch & 0xffff));
OUT_RELOC(src_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
DRM_BO_MASK_MEM | DRM_BO_FLAG_READ,
src_offset + src_y * src_pitch);
ADVANCE_BATCH();
}
}
/**
* Use blitting to clear the renderbuffers named by 'flags'.
* Note: we can't use the ctx->DrawBuffer->_ColorDrawBufferMask field
* since that might include software renderbuffers or renderbuffers
* which we're clearing with triangles.
* \param mask bitmask of BUFFER_BIT_* values indicating buffers to clear
*/
void
intelClearWithBlit(GLcontext * ctx, GLbitfield mask, GLboolean all,
GLint cx, GLint cy, GLint cw, GLint ch)
{
struct intel_context *intel = intel_context(ctx);
GLuint clear_depth;
GLbitfield skipBuffers = 0;
BATCH_LOCALS;
DBG("%s %x\n", __FUNCTION__, mask);
/*
* Compute values for clearing the buffers.
*/
clear_depth = 0;
if (mask & BUFFER_BIT_DEPTH) {
clear_depth = (GLuint) (ctx->DrawBuffer->_DepthMax * ctx->Depth.Clear);
}
if (mask & BUFFER_BIT_STENCIL) {
clear_depth |= (ctx->Stencil.Clear & 0xff) << 24;
}
/* If clearing both depth and stencil, skip BUFFER_BIT_STENCIL in
* the loop below.
*/
if ((mask & BUFFER_BIT_DEPTH) && (mask & BUFFER_BIT_STENCIL)) {
skipBuffers = BUFFER_BIT_STENCIL;
}
/* XXX Move this flush/lock into the following conditional? */
intelFlush(&intel->ctx);
LOCK_HARDWARE(intel);
if (intel->numClipRects) {
drm_clip_rect_t clear;
int i;
/* Refresh the cx/y/w/h values as they may have been invalidated
* by a new window position or size picked up when we did
* LOCK_HARDWARE above. The values passed by mesa are not
* reliable.
*/
{
cx = ctx->DrawBuffer->_Xmin;
cy = ctx->DrawBuffer->_Ymin;
ch = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin;
cw = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin;
}
if (intel->ctx.DrawBuffer->Name == 0) {
/* clearing a window */
/* flip top to bottom */
clear.x1 = cx + intel->drawX;
clear.y1 = intel->driDrawable->y + intel->driDrawable->h - cy - ch;
clear.x2 = clear.x1 + cw;
clear.y2 = clear.y1 + ch;
/* adjust for page flipping */
if (intel->sarea->pf_current_page == 1) {
const GLuint tmp = mask;
mask &= ~(BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_BACK_LEFT);
if (tmp & BUFFER_BIT_FRONT_LEFT)
mask |= BUFFER_BIT_BACK_LEFT;
if (tmp & BUFFER_BIT_BACK_LEFT)
mask |= BUFFER_BIT_FRONT_LEFT;
}
}
else {
/* clearing FBO */
ASSERT(intel->numClipRects == 1);
ASSERT(intel->pClipRects == &intel->fboRect);
clear.x1 = cx;
clear.y1 = intel->ctx.DrawBuffer->Height - cy - ch;
clear.x2 = clear.y1 + cw;
clear.y2 = clear.y1 + ch;
/* no change to mask */
}
for (i = 0; i < intel->numClipRects; i++) {
const drm_clip_rect_t *box = &intel->pClipRects[i];
drm_clip_rect_t b;
GLuint buf;
GLuint clearMask = mask; /* use copy, since we modify it below */
if (!all) {
intel_intersect_cliprects(&b, &clear, box);
}
else {
b = *box;
}
if (0)
_mesa_printf("clear %d,%d..%d,%d, mask %x\n",
b.x1, b.y1, b.x2, b.y2, mask);
/* Loop over all renderbuffers */
for (buf = 0; buf < BUFFER_COUNT && clearMask; buf++) {
const GLbitfield bufBit = 1 << buf;
if ((clearMask & bufBit) && !(bufBit & skipBuffers)) {
/* OK, clear this renderbuffer */
const struct intel_renderbuffer *irb
= intel_renderbuffer(ctx->DrawBuffer->
Attachment[buf].Renderbuffer);
struct _DriBufferObject *write_buffer =
intel_region_buffer(intel->intelScreen, irb->region,
all ? INTEL_WRITE_FULL :
INTEL_WRITE_PART);
GLuint clearVal;
GLint pitch, cpp;
GLuint BR13, CMD;
ASSERT(irb);
ASSERT(irb->region);
pitch = irb->region->pitch;
cpp = irb->region->cpp;
DBG("%s dst:buf(%p)/%d+%d %d,%d sz:%dx%d\n",
__FUNCTION__,
irb->region->buffer, (pitch * cpp),
irb->region->draw_offset,
b.x1, b.y1, b.x2 - b.x1, b.y2 - b.y1);
/* Setup the blit command */
if (cpp == 4) {
BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24) | (1 << 25);
if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) {
CMD = XY_COLOR_BLT_CMD;
if (clearMask & BUFFER_BIT_DEPTH)
CMD |= XY_COLOR_BLT_WRITE_RGB;
if (clearMask & BUFFER_BIT_STENCIL)
CMD |= XY_COLOR_BLT_WRITE_ALPHA;
}
else {
/* clearing RGBA */
CMD = (XY_COLOR_BLT_CMD |
XY_COLOR_BLT_WRITE_ALPHA |
XY_COLOR_BLT_WRITE_RGB);
}
}
else {
ASSERT(cpp == 2 || cpp == 0);
BR13 = (0xF0 << 16) | (pitch * cpp) | (1 << 24);
CMD = XY_COLOR_BLT_CMD;
}
if (buf == BUFFER_DEPTH || buf == BUFFER_STENCIL) {
clearVal = clear_depth;
}
else {
clearVal = (cpp == 4)
? intel->ClearColor8888 : intel->ClearColor565;
}
/*
_mesa_debug(ctx, "hardware blit clear buf %d rb id %d\n",
buf, irb->Base.Name);
*/
BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((b.y1 << 16) | b.x1);
OUT_BATCH((b.y2 << 16) | b.x2);
OUT_RELOC(write_buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_WRITE,
DRM_BO_MASK_MEM | DRM_BO_FLAG_WRITE,
irb->region->draw_offset);
OUT_BATCH(clearVal);
ADVANCE_BATCH();
clearMask &= ~bufBit; /* turn off bit, for faster loop exit */
}
}
}
intel_batchbuffer_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);
}

View File

@@ -0,0 +1,63 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_BLIT_H
#define INTEL_BLIT_H
#include "intel_context.h"
#include "intel_ioctl.h"
#include "dri_bufmgr.h"
extern void intelCopyBuffer(const __DRIdrawablePrivate * dpriv,
const drm_clip_rect_t * rect);
extern void intelClearWithBlit(GLcontext * ctx, GLbitfield mask,
GLboolean all, GLint cx1, GLint cy1, GLint cw,
GLint ch);
extern void intelEmitCopyBlit(struct intel_context *intel,
GLuint cpp,
GLshort src_pitch,
struct _DriBufferObject *src_buffer,
GLuint src_offset,
GLshort dst_pitch,
struct _DriBufferObject *dst_buffer,
GLuint dst_offset,
GLshort srcx, GLshort srcy,
GLshort dstx, GLshort dsty,
GLshort w, GLshort h);
extern void intelEmitFillBlit(struct intel_context *intel,
GLuint cpp,
GLshort dst_pitch,
struct _DriBufferObject *dst_buffer,
GLuint dst_offset,
GLshort x, GLshort y,
GLshort w, GLshort h, GLuint color);
#endif

View File

@@ -0,0 +1,250 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "imports.h"
#include "mtypes.h"
#include "bufferobj.h"
#include "intel_context.h"
#include "intel_buffer_objects.h"
#include "intel_regions.h"
#include "dri_bufmgr.h"
/**
* There is some duplication between mesa's bufferobjects and our
* bufmgr buffers. Both have an integer handle and a hashtable to
* lookup an opaque structure. It would be nice if the handles and
* internal structure where somehow shared.
*/
static struct gl_buffer_object *
intel_bufferobj_alloc(GLcontext * ctx, GLuint name, GLenum target)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *obj = CALLOC_STRUCT(intel_buffer_object);
_mesa_initialize_buffer_object(&obj->Base, name, target);
driGenBuffers(intel->intelScreen->regionPool,
"bufferobj", 1, &obj->buffer, 64, 0, 0);
return &obj->Base;
}
/* Break the COW tie to the region. The region gets to keep the data.
*/
void
intel_bufferobj_release_region(struct intel_context *intel,
struct intel_buffer_object *intel_obj)
{
assert(intel_obj->region->buffer == intel_obj->buffer);
intel_obj->region->pbo = NULL;
intel_obj->region = NULL;
driBOUnReference(intel_obj->buffer);
intel_obj->buffer = NULL;
/* This leads to a large number of buffer deletion/creation events.
* Currently the drm doesn't like that:
*/
driGenBuffers(intel->intelScreen->regionPool,
"buffer object", 1, &intel_obj->buffer, 64, 0, 0);
driBOData(intel_obj->buffer, intel_obj->Base.Size, NULL, 0);
}
/* Break the COW tie to the region. Both the pbo and the region end
* up with a copy of the data.
*/
void
intel_bufferobj_cow(struct intel_context *intel,
struct intel_buffer_object *intel_obj)
{
assert(intel_obj->region);
intel_region_cow(intel->intelScreen, intel_obj->region);
}
/**
* Deallocate/free a vertex/pixel buffer object.
* Called via glDeleteBuffersARB().
*/
static void
intel_bufferobj_free(GLcontext * ctx, struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
if (intel_obj->region) {
intel_bufferobj_release_region(intel, intel_obj);
}
else if (intel_obj->buffer) {
driDeleteBuffers(1, &intel_obj->buffer);
}
_mesa_free(intel_obj);
}
/**
* Allocate space for and store data in a buffer object. Any data that was
* previously stored in the buffer object is lost. If data is NULL,
* memory will be allocated, but no copy will occur.
* Called via glBufferDataARB().
*/
static void
intel_bufferobj_data(GLcontext * ctx,
GLenum target,
GLsizeiptrARB size,
const GLvoid * data,
GLenum usage, struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
intel_obj->Base.Size = size;
intel_obj->Base.Usage = usage;
if (intel_obj->region)
intel_bufferobj_release_region(intel, intel_obj);
driBOData(intel_obj->buffer, size, data, 0);
}
/**
* Replace data in a subrange of buffer object. If the data range
* specified by size + offset extends beyond the end of the buffer or
* if data is NULL, no copy is performed.
* Called via glBufferSubDataARB().
*/
static void
intel_bufferobj_subdata(GLcontext * ctx,
GLenum target,
GLintptrARB offset,
GLsizeiptrARB size,
const GLvoid * data, struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
if (intel_obj->region)
intel_bufferobj_cow(intel, intel_obj);
driBOSubData(intel_obj->buffer, offset, size, data);
}
/**
* Called via glGetBufferSubDataARB().
*/
static void
intel_bufferobj_get_subdata(GLcontext * ctx,
GLenum target,
GLintptrARB offset,
GLsizeiptrARB size,
GLvoid * data, struct gl_buffer_object *obj)
{
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
driBOGetSubData(intel_obj->buffer, offset, size, data);
}
/**
* Called via glMapBufferARB().
*/
static void *
intel_bufferobj_map(GLcontext * ctx,
GLenum target,
GLenum access, struct gl_buffer_object *obj)
{
struct intel_context *intel = intel_context(ctx);
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
/* XXX: Translate access to flags arg below:
*/
assert(intel_obj);
if (intel_obj->region)
intel_bufferobj_cow(intel, intel_obj);
obj->Pointer = driBOMap(intel_obj->buffer,
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
return obj->Pointer;
}
/**
* Called via glMapBufferARB().
*/
static GLboolean
intel_bufferobj_unmap(GLcontext * ctx,
GLenum target, struct gl_buffer_object *obj)
{
struct intel_buffer_object *intel_obj = intel_buffer_object(obj);
assert(intel_obj);
assert(obj->Pointer);
driBOUnmap(intel_obj->buffer);
obj->Pointer = NULL;
return GL_TRUE;
}
struct _DriBufferObject *
intel_bufferobj_buffer(struct intel_context *intel,
struct intel_buffer_object *intel_obj, GLuint flag)
{
if (intel_obj->region) {
if (flag == INTEL_WRITE_PART)
intel_bufferobj_cow(intel, intel_obj);
else if (flag == INTEL_WRITE_FULL)
intel_bufferobj_release_region(intel, intel_obj);
}
return intel_obj->buffer;
}
void
intel_bufferobj_init(struct intel_context *intel)
{
GLcontext *ctx = &intel->ctx;
ctx->Driver.NewBufferObject = intel_bufferobj_alloc;
ctx->Driver.DeleteBuffer = intel_bufferobj_free;
ctx->Driver.BufferData = intel_bufferobj_data;
ctx->Driver.BufferSubData = intel_bufferobj_subdata;
ctx->Driver.GetBufferSubData = intel_bufferobj_get_subdata;
ctx->Driver.MapBuffer = intel_bufferobj_map;
ctx->Driver.UnmapBuffer = intel_bufferobj_unmap;
}

View File

@@ -0,0 +1,86 @@
/**************************************************************************
*
* Copyright 2005 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_BUFFEROBJ_H
#define INTEL_BUFFEROBJ_H
#include "mtypes.h"
struct intel_context;
struct intel_region;
struct gl_buffer_object;
/**
* Intel vertex/pixel buffer object, derived from Mesa's gl_buffer_object.
*/
struct intel_buffer_object
{
struct gl_buffer_object Base;
struct _DriBufferObject *buffer; /* the low-level buffer manager's buffer handle */
struct intel_region *region; /* Is there a zero-copy texture
associated with this (pixel)
buffer object? */
};
/* Get the bm buffer associated with a GL bufferobject:
*/
struct _DriBufferObject *intel_bufferobj_buffer(struct intel_context *intel,
struct intel_buffer_object
*obj, GLuint flag);
/* Hook the bufferobject implementation into mesa:
*/
void intel_bufferobj_init(struct intel_context *intel);
/* Are the obj->Name tests necessary? Unfortunately yes, mesa
* allocates a couple of gl_buffer_object structs statically, and
* the Name == 0 test is the only way to identify them and avoid
* casting them erroneously to our structs.
*/
static INLINE struct intel_buffer_object *
intel_buffer_object(struct gl_buffer_object *obj)
{
if (obj->Name)
return (struct intel_buffer_object *) obj;
else
return NULL;
}
/* Helpers for zerocopy image uploads. See also intel_regions.h:
*/
void intel_bufferobj_cow(struct intel_context *intel,
struct intel_buffer_object *intel_obj);
void intel_bufferobj_release_region(struct intel_context *intel,
struct intel_buffer_object *intel_obj);
#endif

View File

@@ -0,0 +1,968 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_blit.h"
#include "intel_buffers.h"
#include "intel_depthstencil.h"
#include "intel_fbo.h"
#include "intel_tris.h"
#include "intel_regions.h"
#include "intel_batchbuffer.h"
#include "context.h"
#include "framebuffer.h"
#include "swrast/swrast.h"
/**
* XXX move this into a new dri/common/cliprects.c file.
*/
GLboolean
intel_intersect_cliprects(drm_clip_rect_t * dst,
const drm_clip_rect_t * a,
const drm_clip_rect_t * b)
{
GLint bx = b->x1;
GLint by = b->y1;
GLint bw = b->x2 - bx;
GLint bh = b->y2 - by;
if (bx < a->x1)
bw -= a->x1 - bx, bx = a->x1;
if (by < a->y1)
bh -= a->y1 - by, by = a->y1;
if (bx + bw > a->x2)
bw = a->x2 - bx;
if (by + bh > a->y2)
bh = a->y2 - by;
if (bw <= 0)
return GL_FALSE;
if (bh <= 0)
return GL_FALSE;
dst->x1 = bx;
dst->y1 = by;
dst->x2 = bx + bw;
dst->y2 = by + bh;
return GL_TRUE;
}
/**
* Return pointer to current color drawing region, or NULL.
*/
struct intel_region *
intel_drawbuf_region(struct intel_context *intel)
{
struct intel_renderbuffer *irbColor =
intel_renderbuffer(intel->ctx.DrawBuffer->_ColorDrawBuffers[0][0]);
if (irbColor)
return irbColor->region;
else
return NULL;
}
/**
* Return pointer to current color reading region, or NULL.
*/
struct intel_region *
intel_readbuf_region(struct intel_context *intel)
{
struct intel_renderbuffer *irb
= intel_renderbuffer(intel->ctx.ReadBuffer->_ColorReadBuffer);
if (irb)
return irb->region;
else
return NULL;
}
static void
intelBufferSize(GLframebuffer * buffer, GLuint * width, GLuint * height)
{
GET_CURRENT_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
/* Need to lock to make sure the driDrawable is uptodate. This
* information is used to resize Mesa's software buffers, so it has
* to be correct.
*/
/* XXX This isn't 100% correct, the given buffer might not be
* bound to the current context!
*/
LOCK_HARDWARE(intel);
if (intel->driDrawable) {
*width = intel->driDrawable->w;
*height = intel->driDrawable->h;
}
else {
*width = 0;
*height = 0;
}
UNLOCK_HARDWARE(intel);
}
/**
* Update the following fields for rendering to a user-created FBO:
* intel->numClipRects
* intel->pClipRects
* intel->drawX
* intel->drawY
*/
static void
intelSetRenderbufferClipRects(struct intel_context *intel)
{
assert(intel->ctx.DrawBuffer->Width > 0);
assert(intel->ctx.DrawBuffer->Height > 0);
intel->fboRect.x1 = 0;
intel->fboRect.y1 = 0;
intel->fboRect.x2 = intel->ctx.DrawBuffer->Width;
intel->fboRect.y2 = intel->ctx.DrawBuffer->Height;
intel->numClipRects = 1;
intel->pClipRects = &intel->fboRect;
intel->drawX = 0;
intel->drawY = 0;
}
/**
* As above, but for rendering to front buffer of a window.
* \sa intelSetRenderbufferClipRects
*/
static void
intelSetFrontClipRects(struct intel_context *intel)
{
__DRIdrawablePrivate *dPriv = intel->driDrawable;
if (!dPriv)
return;
intel->numClipRects = dPriv->numClipRects;
intel->pClipRects = dPriv->pClipRects;
intel->drawX = dPriv->x;
intel->drawY = dPriv->y;
}
/**
* As above, but for rendering to back buffer of a window.
*/
static void
intelSetBackClipRects(struct intel_context *intel)
{
__DRIdrawablePrivate *dPriv = intel->driDrawable;
if (!dPriv)
return;
if (intel->sarea->pf_enabled == 0 && dPriv->numBackClipRects == 0) {
/* use the front clip rects */
intel->numClipRects = dPriv->numClipRects;
intel->pClipRects = dPriv->pClipRects;
intel->drawX = dPriv->x;
intel->drawY = dPriv->y;
}
else {
/* use the back clip rects */
intel->numClipRects = dPriv->numBackClipRects;
intel->pClipRects = dPriv->pBackClipRects;
intel->drawX = dPriv->backX;
intel->drawY = dPriv->backY;
if (dPriv->numBackClipRects == 1 &&
dPriv->x == dPriv->backX && dPriv->y == dPriv->backY) {
/* Repeat the calculation of the back cliprect dimensions here
* as early versions of dri.a in the Xserver are incorrect. Try
* very hard not to restrict future versions of dri.a which
* might eg. allocate truly private back buffers.
*/
int x1, y1;
int x2, y2;
x1 = dPriv->x;
y1 = dPriv->y;
x2 = dPriv->x + dPriv->w;
y2 = dPriv->y + dPriv->h;
if (x1 < 0)
x1 = 0;
if (y1 < 0)
y1 = 0;
if (x2 > intel->intelScreen->width)
x2 = intel->intelScreen->width;
if (y2 > intel->intelScreen->height)
y2 = intel->intelScreen->height;
if (x1 == dPriv->pBackClipRects[0].x1 &&
y1 == dPriv->pBackClipRects[0].y1) {
dPriv->pBackClipRects[0].x2 = x2;
dPriv->pBackClipRects[0].y2 = y2;
}
}
}
}
/**
* This will be called whenever the currently bound window is moved/resized.
* XXX: actually, it seems to NOT be called when the window is only moved (BP).
*/
void
intelWindowMoved(struct intel_context *intel)
{
GLcontext *ctx = &intel->ctx;
if (!intel->ctx.DrawBuffer) {
/* when would this happen? -BP */
intelSetFrontClipRects(intel);
}
else if (intel->ctx.DrawBuffer->Name != 0) {
/* drawing to user-created FBO - do nothing */
/* Cliprects would be set from intelDrawBuffer() */
}
else {
/* drawing to a window */
switch (intel->ctx.DrawBuffer->_ColorDrawBufferMask[0]) {
case BUFFER_BIT_FRONT_LEFT:
intelSetFrontClipRects(intel);
break;
case BUFFER_BIT_BACK_LEFT:
intelSetBackClipRects(intel);
break;
default:
/* glDrawBuffer(GL_NONE or GL_FRONT_AND_BACK): software fallback */
intelSetFrontClipRects(intel);
}
}
/* this update Mesa's notion of window size */
if (ctx->WinSysDrawBuffer) {
_mesa_resize_framebuffer(ctx, ctx->WinSysDrawBuffer,
intel->driDrawable->w, intel->driDrawable->h);
}
/* Update hardware scissor */
ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
ctx->Scissor.Width, ctx->Scissor.Height);
}
/* A true meta version of this would be very simple and additionally
* machine independent. Maybe we'll get there one day.
*/
static void
intelClearWithTris(struct intel_context *intel,
GLbitfield mask,
GLboolean all, GLint cx, GLint cy, GLint cw, GLint ch)
{
GLcontext *ctx = &intel->ctx;
drm_clip_rect_t clear;
if (INTEL_DEBUG & DEBUG_BLIT)
_mesa_printf("%s 0x%x\n", __FUNCTION__, mask);
LOCK_HARDWARE(intel);
/* XXX FBO: was: intel->driDrawable->numClipRects */
if (intel->numClipRects) {
GLuint buf;
intel->vtbl.install_meta_state(intel);
/* Refresh the cx/y/w/h values as they may have been invalidated
* by a new window position or size picked up when we did
* LOCK_HARDWARE above. The values passed by mesa are not
* reliable.
*/
{
cx = ctx->DrawBuffer->_Xmin;
cy = ctx->DrawBuffer->_Ymin;
ch = ctx->DrawBuffer->_Ymax - ctx->DrawBuffer->_Ymin;
cw = ctx->DrawBuffer->_Xmax - ctx->DrawBuffer->_Xmin;
}
/* note: regardless of 'all', cx, cy, cw, ch are now correct */
clear.x1 = cx;
clear.y1 = cy;
clear.x2 = cx + cw;
clear.y2 = cy + ch;
/* Back and stencil cliprects are the same. Try and do both
* buffers at once:
*/
if (mask &
(BUFFER_BIT_BACK_LEFT | BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH)) {
struct intel_region *backRegion =
intel_get_rb_region(ctx->DrawBuffer, BUFFER_BACK_LEFT);
struct intel_region *depthRegion =
intel_get_rb_region(ctx->DrawBuffer, BUFFER_DEPTH);
const GLuint clearColor = (backRegion && backRegion->cpp == 4)
? intel->ClearColor8888 : intel->ClearColor565;
intel->vtbl.meta_draw_region(intel, backRegion, depthRegion);
if (mask & BUFFER_BIT_BACK_LEFT)
intel->vtbl.meta_color_mask(intel, GL_TRUE);
else
intel->vtbl.meta_color_mask(intel, GL_FALSE);
if (mask & BUFFER_BIT_STENCIL)
intel->vtbl.meta_stencil_replace(intel,
intel->ctx.Stencil.WriteMask[0],
intel->ctx.Stencil.Clear);
else
intel->vtbl.meta_no_stencil_write(intel);
if (mask & BUFFER_BIT_DEPTH)
intel->vtbl.meta_depth_replace(intel);
else
intel->vtbl.meta_no_depth_write(intel);
/* XXX: Using INTEL_BATCH_NO_CLIPRECTS here is dangerous as the
* drawing origin may not be correctly emitted.
*/
intel_meta_draw_quad(intel, clear.x1, clear.x2, clear.y1, clear.y2, intel->ctx.Depth.Clear, clearColor, 0, 0, 0, 0); /* texcoords */
mask &=
~(BUFFER_BIT_BACK_LEFT | BUFFER_BIT_STENCIL | BUFFER_BIT_DEPTH);
}
/* clear the remaining (color) renderbuffers */
for (buf = 0; buf < BUFFER_COUNT && mask; buf++) {
const GLuint bufBit = 1 << buf;
if (mask & bufBit) {
struct intel_renderbuffer *irbColor =
intel_renderbuffer(ctx->DrawBuffer->
Attachment[buf].Renderbuffer);
GLuint color = (irbColor->region->cpp == 4)
? intel->ClearColor8888 : intel->ClearColor565;
ASSERT(irbColor);
intel->vtbl.meta_no_depth_write(intel);
intel->vtbl.meta_no_stencil_write(intel);
intel->vtbl.meta_color_mask(intel, GL_TRUE);
intel->vtbl.meta_draw_region(intel, irbColor->region, NULL);
/* XXX: Using INTEL_BATCH_NO_CLIPRECTS here is dangerous as the
* drawing origin may not be correctly emitted.
*/
intel_meta_draw_quad(intel, clear.x1, clear.x2, clear.y1, clear.y2, 0, /* depth clear val */
color, 0, 0, 0, 0); /* texcoords */
mask &= ~bufBit;
}
}
intel->vtbl.leave_meta_state(intel);
intel_batchbuffer_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);
}
/**
* Copy the window contents named by dPriv to the rotated (or reflected)
* color buffer.
* srcBuf is BUFFER_BIT_FRONT_LEFT or BUFFER_BIT_BACK_LEFT to indicate the source.
*/
void
intelRotateWindow(struct intel_context *intel,
__DRIdrawablePrivate * dPriv, GLuint srcBuf)
{
intelScreenPrivate *screen = intel->intelScreen;
drm_clip_rect_t fullRect;
struct intel_region *src;
const drm_clip_rect_t *clipRects;
int numClipRects;
int i;
GLenum format, type;
int xOrig, yOrig;
int origNumClipRects;
drm_clip_rect_t *origRects;
/*
* set up hardware state
*/
intelFlush(&intel->ctx);
LOCK_HARDWARE(intel);
if (!intel->numClipRects) {
UNLOCK_HARDWARE(intel);
return;
}
intel->vtbl.install_meta_state(intel);
intel->vtbl.meta_no_depth_write(intel);
intel->vtbl.meta_no_stencil_write(intel);
intel->vtbl.meta_color_mask(intel, GL_FALSE);
/* save current drawing origin and cliprects (restored at end) */
xOrig = intel->drawX;
yOrig = intel->drawY;
origNumClipRects = intel->numClipRects;
origRects = intel->pClipRects;
/*
* set drawing origin, cliprects for full-screen access to rotated screen
*/
fullRect.x1 = 0;
fullRect.y1 = 0;
fullRect.x2 = screen->rotatedWidth;
fullRect.y2 = screen->rotatedHeight;
intel->drawX = 0;
intel->drawY = 0;
intel->numClipRects = 1;
intel->pClipRects = &fullRect;
intel->vtbl.meta_draw_region(intel, screen->rotated_region, NULL); /* ? */
if (srcBuf == BUFFER_BIT_FRONT_LEFT) {
src = intel->intelScreen->front_region;
clipRects = dPriv->pClipRects;
numClipRects = dPriv->numClipRects;
}
else {
src = intel->intelScreen->back_region;
clipRects = dPriv->pBackClipRects;
numClipRects = dPriv->numBackClipRects;
}
if (src->cpp == 4) {
format = GL_BGRA;
type = GL_UNSIGNED_BYTE;
}
else {
format = GL_BGR;
type = GL_UNSIGNED_SHORT_5_6_5_REV;
}
/* set the whole screen up as a texture to avoid alignment issues */
intel->vtbl.meta_tex_rect_source(intel,
src->buffer,
screen->width,
screen->height, src->pitch, format, type);
intel->vtbl.meta_texture_blend_replace(intel);
/*
* loop over the source window's cliprects
*/
for (i = 0; i < numClipRects; i++) {
int srcX0 = clipRects[i].x1;
int srcY0 = clipRects[i].y1;
int srcX1 = clipRects[i].x2;
int srcY1 = clipRects[i].y2;
GLfloat verts[4][2], tex[4][2];
int j;
/* build vertices for four corners of clip rect */
verts[0][0] = srcX0;
verts[0][1] = srcY0;
verts[1][0] = srcX1;
verts[1][1] = srcY0;
verts[2][0] = srcX1;
verts[2][1] = srcY1;
verts[3][0] = srcX0;
verts[3][1] = srcY1;
/* .. and texcoords */
tex[0][0] = srcX0;
tex[0][1] = srcY0;
tex[1][0] = srcX1;
tex[1][1] = srcY0;
tex[2][0] = srcX1;
tex[2][1] = srcY1;
tex[3][0] = srcX0;
tex[3][1] = srcY1;
/* transform coords to rotated screen coords */
for (j = 0; j < 4; j++) {
matrix23TransformCoordf(&screen->rotMatrix,
&verts[j][0], &verts[j][1]);
}
/* draw polygon to map source image to dest region */
intel_meta_draw_poly(intel, 4, verts, 0, 0, tex);
} /* cliprect loop */
intel->vtbl.leave_meta_state(intel);
intel_batchbuffer_flush(intel->batch);
/* restore original drawing origin and cliprects */
intel->drawX = xOrig;
intel->drawY = yOrig;
intel->numClipRects = origNumClipRects;
intel->pClipRects = origRects;
UNLOCK_HARDWARE(intel);
}
/**
* Called by ctx->Driver.Clear.
*/
static void
intelClear(GLcontext * ctx,
GLbitfield mask,
GLboolean all, GLint cx, GLint cy, GLint cw, GLint ch)
{
struct intel_context *intel = intel_context(ctx);
const GLuint colorMask = *((GLuint *) & ctx->Color.ColorMask);
GLbitfield tri_mask = 0;
GLbitfield blit_mask = 0;
GLbitfield swrast_mask = 0;
GLuint i;
if (0)
fprintf(stderr, "%s\n", __FUNCTION__);
/* HW color buffers (front, back, aux, generic FBO, etc) */
if (colorMask == ~0) {
/* clear all R,G,B,A */
/* XXX FBO: need to check if colorbuffers are software RBOs! */
blit_mask |= (mask & BUFFER_BITS_COLOR);
}
else {
/* glColorMask in effect */
tri_mask |= (mask & BUFFER_BITS_COLOR);
}
/* HW stencil */
if (mask & BUFFER_BIT_STENCIL) {
const struct intel_region *stencilRegion
= intel_get_rb_region(ctx->DrawBuffer, BUFFER_STENCIL);
if (stencilRegion) {
/* have hw stencil */
if ((ctx->Stencil.WriteMask[0] & 0xff) != 0xff) {
/* not clearing all stencil bits, so use triangle clearing */
tri_mask |= BUFFER_BIT_STENCIL;
}
else {
/* clearing all stencil bits, use blitting */
blit_mask |= BUFFER_BIT_STENCIL;
}
}
}
/* HW depth */
if (mask & BUFFER_BIT_DEPTH) {
/* clear depth with whatever method is used for stencil (see above) */
if (tri_mask & BUFFER_BIT_STENCIL)
tri_mask |= BUFFER_BIT_DEPTH;
else
blit_mask |= BUFFER_BIT_DEPTH;
}
/* SW fallback clearing */
swrast_mask = mask & ~tri_mask & ~blit_mask;
for (i = 0; i < BUFFER_COUNT; i++) {
GLuint bufBit = 1 << i;
if ((blit_mask | tri_mask) & bufBit) {
if (!ctx->DrawBuffer->Attachment[i].Renderbuffer->ClassID) {
blit_mask &= ~bufBit;
tri_mask &= ~bufBit;
swrast_mask |= bufBit;
}
}
}
intelFlush(ctx); /* XXX intelClearWithBlit also does this */
if (blit_mask)
intelClearWithBlit(ctx, blit_mask, all, cx, cy, cw, ch);
if (tri_mask)
intelClearWithTris(intel, tri_mask, all, cx, cy, cw, ch);
if (swrast_mask)
_swrast_Clear(ctx, swrast_mask, all, cx, cy, cw, ch);
}
/* Flip the front & back buffers
*/
static void
intelPageFlip(const __DRIdrawablePrivate * dPriv)
{
#if 0
struct intel_context *intel;
int tmp, ret;
if (INTEL_DEBUG & DEBUG_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
assert(dPriv);
assert(dPriv->driContextPriv);
assert(dPriv->driContextPriv->driverPrivate);
intel = (struct intel_context *) dPriv->driContextPriv->driverPrivate;
intelFlush(&intel->ctx);
LOCK_HARDWARE(intel);
if (dPriv->pClipRects) {
*(drm_clip_rect_t *) intel->sarea->boxes = dPriv->pClipRects[0];
intel->sarea->nbox = 1;
}
ret = drmCommandNone(intel->driFd, DRM_I830_FLIP);
if (ret) {
fprintf(stderr, "%s: %d\n", __FUNCTION__, ret);
UNLOCK_HARDWARE(intel);
exit(1);
}
tmp = intel->sarea->last_enqueue;
intelRefillBatchLocked(intel);
UNLOCK_HARDWARE(intel);
intelSetDrawBuffer(&intel->ctx, intel->ctx.Color.DriverDrawBuffer);
#endif
}
#if 0
void
intelSwapBuffers(__DRIdrawablePrivate * dPriv)
{
if (dPriv->driverPrivate) {
const struct gl_framebuffer *fb
= (struct gl_framebuffer *) dPriv->driverPrivate;
if (fb->Visual.doubleBufferMode) {
GET_CURRENT_CONTEXT(ctx);
if (ctx && ctx->DrawBuffer == fb) {
_mesa_notifySwapBuffers(ctx); /* flush pending rendering */
}
if (0 /*intel->doPageFlip */ ) { /* doPageFlip is never set !!! */
intelPageFlip(dPriv);
}
else {
intelCopyBuffer(dPriv);
}
}
}
else {
_mesa_problem(NULL,
"dPriv has no gl_framebuffer pointer in intelSwapBuffers");
}
}
#else
/* Trunk version:
*/
void
intelSwapBuffers(__DRIdrawablePrivate * dPriv)
{
if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
GET_CURRENT_CONTEXT(ctx);
struct intel_context *intel;
if (ctx == NULL)
return;
intel = intel_context(ctx);
if (ctx->Visual.doubleBufferMode) {
intelScreenPrivate *screen = intel->intelScreen;
_mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
if (0 /*intel->doPageFlip */ ) { /* doPageFlip is never set !!! */
intelPageFlip(dPriv);
}
else {
intelCopyBuffer(dPriv, NULL);
}
if (screen->current_rotation != 0) {
intelRotateWindow(intel, dPriv, BUFFER_BIT_FRONT_LEFT);
}
}
}
else {
/* XXX this shouldn't be an error but we can't handle it for now */
fprintf(stderr, "%s: drawable has no context!\n", __FUNCTION__);
}
}
#endif
void
intelCopySubBuffer(__DRIdrawablePrivate * dPriv, int x, int y, int w, int h)
{
if (dPriv->driContextPriv && dPriv->driContextPriv->driverPrivate) {
struct intel_context *intel =
(struct intel_context *) dPriv->driContextPriv->driverPrivate;
GLcontext *ctx = &intel->ctx;
if (ctx->Visual.doubleBufferMode) {
drm_clip_rect_t rect;
rect.x1 = x + dPriv->x;
rect.y1 = (dPriv->h - y - h) + dPriv->y;
rect.x2 = rect.x1 + w;
rect.y2 = rect.y1 + h;
_mesa_notifySwapBuffers(ctx); /* flush pending rendering comands */
intelCopyBuffer(dPriv, &rect);
}
}
else {
/* XXX this shouldn't be an error but we can't handle it for now */
fprintf(stderr, "%s: drawable has no context!\n", __FUNCTION__);
}
}
/**
* Update the hardware state for drawing into a window or framebuffer object.
*
* Called by glDrawBuffer, glBindFramebufferEXT, MakeCurrent, and other
* places within the driver.
*
* Basically, this needs to be called any time the current framebuffer
* changes, the renderbuffers change, or we need to draw into different
* color buffers.
*/
void
intel_draw_buffer(GLcontext * ctx, struct gl_framebuffer *fb)
{
struct intel_context *intel = intel_context(ctx);
struct intel_region *colorRegion, *depthRegion = NULL;
struct intel_renderbuffer *irbDepth = NULL, *irbStencil = NULL;
int front = 0; /* drawing to front color buffer? */
if (!fb) {
/* this can happen during the initial context initialization */
return;
}
/* Do this here, note core Mesa, since this function is called from
* many places within the driver.
*/
if (ctx->NewState & (_NEW_BUFFERS | _NEW_COLOR | _NEW_PIXEL)) {
/* this updates the DrawBuffer->_NumColorDrawBuffers fields, etc */
_mesa_update_framebuffer(ctx);
/* this updates the DrawBuffer's Width/Height if it's a FBO */
_mesa_update_draw_buffer_bounds(ctx);
}
if (fb->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
/* this may occur when we're called by glBindFrameBuffer() during
* the process of someone setting up renderbuffers, etc.
*/
/*_mesa_debug(ctx, "DrawBuffer: incomplete user FBO\n");*/
return;
}
if (fb->Name)
intel_validate_paired_depth_stencil(ctx, fb);
/*
* How many color buffers are we drawing into?
*/
if (fb->_NumColorDrawBuffers[0] != 1
#if 0
/* XXX FBO temporary - always use software rendering */
|| 1
#endif
) {
/* writing to 0 or 2 or 4 color buffers */
/*_mesa_debug(ctx, "Software rendering\n");*/
FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_TRUE);
front = 1; /* might not have back color buffer */
}
else {
/* draw to exactly one color buffer */
/*_mesa_debug(ctx, "Hardware rendering\n");*/
FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE);
if (fb->_ColorDrawBufferMask[0] == BUFFER_BIT_FRONT_LEFT) {
front = 1;
}
}
/*
* Get the intel_renderbuffer for the colorbuffer we're drawing into.
* And set up cliprects.
*/
if (fb->Name == 0) {
/* drawing to window system buffer */
if (intel->sarea->pf_current_page == 1) {
/* page flipped back/front */
front ^= 1;
}
if (front) {
intelSetFrontClipRects(intel);
colorRegion = intel_get_rb_region(fb, BUFFER_FRONT_LEFT);
}
else {
intelSetBackClipRects(intel);
colorRegion = intel_get_rb_region(fb, BUFFER_BACK_LEFT);
}
}
else {
/* drawing to user-created FBO */
struct intel_renderbuffer *irb;
intelSetRenderbufferClipRects(intel);
irb = intel_renderbuffer(fb->_ColorDrawBuffers[0][0]);
colorRegion = (irb && irb->region) ? irb->region : NULL;
}
/* Update culling direction which changes depending on the
* orientation of the buffer:
*/
if (ctx->Driver.FrontFace)
ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
else
ctx->NewState |= _NEW_POLYGON;
if (!colorRegion) {
FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_TRUE);
}
else {
FALLBACK(intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE);
}
/***
*** Get depth buffer region and check if we need a software fallback.
*** Note that the depth buffer is usually a DEPTH_STENCIL buffer.
***/
if (fb->_DepthBuffer && fb->_DepthBuffer->Wrapped) {
irbDepth = intel_renderbuffer(fb->_DepthBuffer->Wrapped);
if (irbDepth->region) {
FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
depthRegion = irbDepth->region;
}
else {
FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_TRUE);
depthRegion = NULL;
}
}
else {
/* not using depth buffer */
FALLBACK(intel, INTEL_FALLBACK_DEPTH_BUFFER, GL_FALSE);
depthRegion = NULL;
}
/***
*** Stencil buffer
*** This can only be hardware accelerated if we're using a
*** combined DEPTH_STENCIL buffer (for now anyway).
***/
if (fb->_StencilBuffer && fb->_StencilBuffer->Wrapped) {
irbStencil = intel_renderbuffer(fb->_StencilBuffer->Wrapped);
if (irbStencil && irbStencil->region) {
ASSERT(irbStencil->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
/* need to re-compute stencil hw state */
ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
if (!depthRegion)
depthRegion = irbStencil->region;
}
else {
FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_TRUE);
}
}
else {
/* XXX FBO: instead of FALSE, pass ctx->Stencil.Enabled ??? */
FALLBACK(intel, INTEL_FALLBACK_STENCIL_BUFFER, GL_FALSE);
/* need to re-compute stencil hw state */
ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
}
/**
** Release old regions, reference new regions
**/
#if 0 /* XXX FBO: this seems to be redundant with i915_state_draw_region() */
if (intel->draw_region != colorRegion) {
intel_region_release(intel, &intel->draw_region);
intel_region_reference(&intel->draw_region, colorRegion);
}
if (intel->intelScreen->depth_region != depthRegion) {
intel_region_release(intel, &intel->intelScreen->depth_region);
intel_region_reference(&intel->intelScreen->depth_region, depthRegion);
}
#endif
intel->vtbl.set_draw_region(intel, colorRegion, depthRegion);
/* update viewport since it depends on window size */
ctx->Driver.Viewport(ctx, ctx->Viewport.X, ctx->Viewport.Y,
ctx->Viewport.Width, ctx->Viewport.Height);
/* Update hardware scissor */
ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
ctx->Scissor.Width, ctx->Scissor.Height);
}
static void
intelDrawBuffer(GLcontext * ctx, GLenum mode)
{
intel_draw_buffer(ctx, ctx->DrawBuffer);
}
static void
intelReadBuffer(GLcontext * ctx, GLenum mode)
{
if (ctx->ReadBuffer == ctx->DrawBuffer) {
/* This will update FBO completeness status.
* A framebuffer will be incomplete if the GL_READ_BUFFER setting
* refers to a missing renderbuffer. Calling glReadBuffer can set
* that straight and can make the drawing buffer complete.
*/
intel_draw_buffer(ctx, ctx->DrawBuffer);
}
/* Generally, functions which read pixels (glReadPixels, glCopyPixels, etc)
* reference ctx->ReadBuffer and do appropriate state checks.
*/
}
void
intelInitBufferFuncs(struct dd_function_table *functions)
{
functions->Clear = intelClear;
functions->GetBufferSize = intelBufferSize;
functions->ResizeBuffers = _mesa_resize_framebuffer;
functions->DrawBuffer = intelDrawBuffer;
functions->ReadBuffer = intelReadBuffer;
}

View File

@@ -0,0 +1,56 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_BUFFERS_H
#define INTEL_BUFFERS_H
struct intel_context;
extern GLboolean
intel_intersect_cliprects(drm_clip_rect_t * dest,
const drm_clip_rect_t * a,
const drm_clip_rect_t * b);
extern struct intel_region *intel_readbuf_region(struct intel_context *intel);
extern struct intel_region *intel_drawbuf_region(struct intel_context *intel);
extern void intelSwapBuffers(__DRIdrawablePrivate * dPriv);
extern void intelWindowMoved(struct intel_context *intel);
extern void intel_draw_buffer(GLcontext * ctx, struct gl_framebuffer *fb);
extern void intelInitBufferFuncs(struct dd_function_table *functions);
extern void
intelRotateWindow(struct intel_context *intel,
__DRIdrawablePrivate * dPriv, GLuint srcBuf);
#endif /* INTEL_BUFFERS_H */

File diff suppressed because it is too large Load Diff

View File

@@ -48,157 +48,178 @@
#define DV_PF_565 (2<<8)
#define DV_PF_8888 (3<<8)
#define INTEL_CONTEXT(ctx) ((intelContextPtr)(ctx))
struct intel_region;
struct intel_context;
struct _DriBufferObject;
typedef struct intel_context intelContext;
typedef struct intel_context *intelContextPtr;
typedef struct intel_texture_object *intelTextureObjectPtr;
typedef void (*intel_tri_func)(intelContextPtr, intelVertex *, intelVertex *,
intelVertex *);
typedef void (*intel_line_func)(intelContextPtr, intelVertex *, intelVertex *);
typedef void (*intel_point_func)(intelContextPtr, intelVertex *);
typedef void (*intel_tri_func) (struct intel_context *, intelVertex *,
intelVertex *, intelVertex *);
typedef void (*intel_line_func) (struct intel_context *, intelVertex *,
intelVertex *);
typedef void (*intel_point_func) (struct intel_context *, intelVertex *);
#define INTEL_FALLBACK_DRAW_BUFFER 0x1
#define INTEL_FALLBACK_READ_BUFFER 0x2
#define INTEL_FALLBACK_USER 0x4
#define INTEL_FALLBACK_NO_BATCHBUFFER 0x8
#define INTEL_FALLBACK_NO_TEXMEM 0x10
#define INTEL_FALLBACK_DEPTH_BUFFER 0x4
#define INTEL_FALLBACK_STENCIL_BUFFER 0x8
#define INTEL_FALLBACK_USER 0x10
#define INTEL_FALLBACK_RENDERMODE 0x20
extern void intelFallback( intelContextPtr intel, GLuint bit, GLboolean mode );
extern void intelFallback(struct intel_context *intel, GLuint bit,
GLboolean mode);
#define FALLBACK( intel, bit, mode ) intelFallback( intel, bit, mode )
#define INTEL_TEX_MAXLEVELS 10
#define INTEL_WRITE_PART 0x1
#define INTEL_WRITE_FULL 0x2
#define INTEL_READ 0x4
struct intel_texture_object
{
driTextureObject base; /* the parent class */
struct gl_texture_object base; /* The "parent" object */
GLuint texelBytes;
GLuint age;
GLuint Pitch;
GLuint Height;
GLuint TextureOffset;
GLubyte *BufAddr;
/* The mipmap tree must include at least these levels once
* validated:
*/
GLuint firstLevel;
GLuint lastLevel;
GLuint min_level;
GLuint max_level;
GLuint depth_pitch;
/* Offset for firstLevel image:
*/
GLuint textureOffset;
struct {
const struct gl_texture_image *image;
GLuint offset; /* into BufAddr */
GLuint height;
GLuint internalFormat;
} image[6][INTEL_TEX_MAXLEVELS];
GLuint dirty;
GLuint firstLevel,lastLevel;
/* On validation any active images held in main memory or in other
* regions will be copied to this region and the old storage freed.
*/
struct intel_mipmap_tree *mt;
};
struct intel_texture_image
{
struct gl_texture_image base;
/* These aren't stored in gl_texture_image
*/
GLuint level;
GLuint face;
/* If intelImage->mt != NULL, image data is stored here.
* Else if intelImage->base.Data != NULL, image is stored there.
* Else there is no image data.
*/
struct intel_mipmap_tree *mt;
};
#define INTEL_MAX_FIXUP 64
struct intel_context
{
GLcontext ctx; /* the parent class */
GLcontext ctx; /* the parent class */
struct {
void (*destroy)( intelContextPtr intel );
void (*emit_state)( intelContextPtr intel );
void (*emit_invarient_state)( intelContextPtr intel );
void (*lost_hardware)( intelContextPtr intel );
void (*update_texture_state)( intelContextPtr intel );
struct
{
void (*destroy) (struct intel_context * intel);
void (*emit_state) (struct intel_context * intel);
void (*lost_hardware) (struct intel_context * intel);
void (*update_texture_state) (struct intel_context * intel);
void (*render_start)( intelContextPtr intel );
void (*set_color_region)( intelContextPtr intel, const intelRegion *reg );
void (*set_z_region)( intelContextPtr intel, const intelRegion *reg );
void (*update_color_z_regions)(intelContextPtr intel,
const intelRegion *colorRegion,
const intelRegion *depthRegion);
void (*emit_flush)( intelContextPtr intel );
void (*reduced_primitive_state)( intelContextPtr intel, GLenum rprim );
void (*render_start) (struct intel_context * intel);
void (*set_draw_region) (struct intel_context * intel,
struct intel_region * draw_region,
struct intel_region * depth_region);
GLboolean (*check_vertex_size)( intelContextPtr intel, GLuint expected );
GLuint(*flush_cmd) (void);
void (*clear_with_tris)( intelContextPtr intel, GLbitfield mask,
GLboolean all,
GLint cx, GLint cy, GLint cw, GLint ch);
void (*reduced_primitive_state) (struct intel_context * intel,
GLenum rprim);
void (*rotate_window)( intelContextPtr intel,
__DRIdrawablePrivate *dPriv, GLuint srcBuf);
GLboolean(*check_vertex_size) (struct intel_context * intel,
GLuint expected);
intelTextureObjectPtr (*alloc_tex_obj)( struct gl_texture_object *tObj );
/* Metaops:
*/
void (*install_meta_state) (struct intel_context * intel);
void (*leave_meta_state) (struct intel_context * intel);
void (*meta_draw_region) (struct intel_context * intel,
struct intel_region * draw_region,
struct intel_region * depth_region);
void (*meta_color_mask) (struct intel_context * intel, GLboolean);
void (*meta_stencil_replace) (struct intel_context * intel,
GLuint mask, GLuint clear);
void (*meta_depth_replace) (struct intel_context * intel);
void (*meta_texture_blend_replace) (struct intel_context * intel);
void (*meta_no_stencil_write) (struct intel_context * intel);
void (*meta_no_depth_write) (struct intel_context * intel);
void (*meta_no_texture) (struct intel_context * intel);
void (*meta_import_pixel_state) (struct intel_context * intel);
GLboolean(*meta_tex_rect_source) (struct intel_context * intel,
struct _DriBufferObject * buffer,
GLuint offset,
GLuint pitch,
GLuint height,
GLenum format, GLenum type);
void (*rotate_window) (struct intel_context * intel,
__DRIdrawablePrivate * dPriv, GLuint srcBuf);
void (*assert_not_dirty) (struct intel_context *intel);
} vtbl;
GLint refcount;
GLint refcount;
GLuint Fallback;
GLuint NewGLState;
struct {
GLuint start_offset;
GLint size;
GLint space;
GLubyte *ptr;
GLuint counter;
GLuint last_emit_state;
GLboolean contains_geometry;
const char *func;
GLuint last_swap;
} batch;
struct {
void *ptr;
GLint size;
GLuint offset;
GLuint active_buf;
GLuint irq_emitted;
} alloc;
struct {
struct _DriFenceObject *last_swap_fence;
struct _DriFenceObject *first_swap_fence;
struct intel_batchbuffer *batch;
struct
{
GLuint id;
GLuint primitive;
GLubyte *start_ptr;
void (*flush)( GLcontext * );
GLubyte *start_ptr;
void (*flush) (struct intel_context *);
} prim;
GLboolean locked;
char *prevLockFile;
int prevLockLine;
GLubyte clear_red;
GLubyte clear_green;
GLubyte clear_blue;
GLubyte clear_alpha;
GLuint ClearColor;
GLuint ClearDepth;
GLuint ClearColor565;
GLuint ClearColor8888;
/* Offsets of fields within the current vertex:
*/
GLuint coloroffset;
GLuint specoffset;
/* Support for duplicating XYZW as WPOS parameter (crutch for I915).
*/
GLuint wpos_offset;
GLuint wpos_size;
struct tnl_attr_map vertex_attrs[VERT_ATTRIB_MAX];
GLuint vertex_attr_count;
GLfloat depth_scale;
GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
GLuint depth_clear_mask;
GLuint stencil_clear_mask;
GLfloat polygon_offset_scale; /* dependent on depth_scale, bpp */
GLboolean hw_stencil;
GLboolean hw_stipple;
/* Texture object bookkeeping
*/
GLuint nr_heaps;
driTexHeap * texture_heaps[1];
driTextureObject swapped;
GLuint lastStamp;
GLboolean strict_conformance;
/* AGP memory buffer manager:
*/
struct bufmgr *bm;
struct intel_texture_object *CurrentTexObj[MAX_TEXTURE_UNITS];
/* State for intelvb.c and inteltris.c.
*/
@@ -207,8 +228,15 @@ struct intel_context
GLenum render_primitive;
GLenum reduced_primitive;
GLuint vertex_size;
unsigned char *verts; /* points to tnl->clipspace.vertex_buf */
GLubyte *verts; /* points to tnl->clipspace.vertex_buf */
#if 0
struct intel_region *front_region; /* XXX FBO: obsolete */
struct intel_region *rotated_region; /* XXX FBO: obsolete */
struct intel_region *back_region; /* XXX FBO: obsolete */
struct intel_region *draw_region; /* XXX FBO: rename to color_region */
struct intel_region *depth_region; /**< currently bound depth/Z region */
#endif
/* Fallback rasterization functions
*/
@@ -216,17 +244,13 @@ struct intel_context
intel_line_func draw_line;
intel_tri_func draw_tri;
/* Drawing buffer state
/* These refer to the current drawing buffer:
*/
intelRegion *drawRegion; /* current drawing buffer */
intelRegion *readRegion; /* current reading buffer */
int drawX; /* origin of drawable in draw buffer */
int drawY;
GLuint numClipRects; /* cliprects for that buffer */
int drawX, drawY; /**< origin of drawing area within region */
GLuint numClipRects; /**< cliprects for drawing */
drm_clip_rect_t *pClipRects;
drm_clip_rect_t fboRect; /**< cliprect for FBO rendering */
int dirtyAge;
int perf_boxes;
GLuint do_usleeps;
@@ -234,18 +258,16 @@ struct intel_context
GLuint irqsEmitted;
drm_i915_irq_wait_t iw;
GLboolean scissor;
drm_clip_rect_t draw_rect;
drm_clip_rect_t scissor_rect;
drm_context_t hHWContext;
drmLock *driHwLock;
int driFd;
__DRIdrawablePrivate *driDrawable;
__DRIscreenPrivate *driScreen;
intelScreenPrivate *intelScreen;
drmI830Sarea *sarea;
intelScreenPrivate *intelScreen;
drmI830Sarea *sarea;
GLuint lastStamp;
/**
* Configuration cache
@@ -262,87 +284,15 @@ struct intel_context
GLuint swap_count;
GLuint swap_missed_count;
};
#define DEBUG_LOCKING 1
#if DEBUG_LOCKING
extern char *prevLockFile;
extern int prevLockLine;
#define DEBUG_LOCK() \
do { \
prevLockFile = (__FILE__); \
prevLockLine = (__LINE__); \
} while (0)
#define DEBUG_RESET() \
do { \
prevLockFile = 0; \
prevLockLine = 0; \
} while (0)
/* Slightly less broken way of detecting recursive locking in a
* threaded environment. The right way to do this would be to make
* prevLockFile, prevLockLine thread-local.
*
* This technique instead checks to see if the same context is
* requesting the lock twice -- this will not catch application
* breakages where the same context is active in two different threads
* at once, but it will catch driver breakages (recursive locking) in
* threaded apps.
/* These are functions now:
*/
#define DEBUG_CHECK_LOCK() \
do { \
if ( *((volatile int *)intel->driHwLock) == \
(DRM_LOCK_HELD | intel->hHWContext) ) { \
fprintf( stderr, \
"LOCK SET!\n\tPrevious %s:%d\n\tCurrent: %s:%d\n", \
prevLockFile, prevLockLine, __FILE__, __LINE__ ); \
abort(); \
} \
} while (0)
void LOCK_HARDWARE( struct intel_context *intel );
void UNLOCK_HARDWARE( struct intel_context *intel );
#else
#define DEBUG_LOCK()
#define DEBUG_RESET()
#define DEBUG_CHECK_LOCK()
#endif
/* Lock the hardware and validate our state.
*/
#define LOCK_HARDWARE( intel ) \
do { \
char __ret=0; \
DEBUG_CHECK_LOCK(); \
assert(!(intel)->locked); \
DRM_CAS((intel)->driHwLock, (intel)->hHWContext, \
(DRM_LOCK_HELD|(intel)->hHWContext), __ret); \
if (__ret) \
intelGetLock( (intel), 0 ); \
DEBUG_LOCK(); \
(intel)->locked = 1; \
}while (0)
/* Unlock the hardware using the global current context
*/
#define UNLOCK_HARDWARE(intel) \
do { \
intel->locked = 0; \
if (0) { \
intel->perf_boxes |= intel->sarea->perf_boxes; \
intel->sarea->perf_boxes = 0; \
} \
DRM_UNLOCK((intel)->driFd, (intel)->driHwLock, (intel)->hHWContext); \
DEBUG_RESET(); \
} while (0)
extern char *__progname;
#define SUBPIXEL_X 0.125
@@ -351,7 +301,7 @@ do { \
#define INTEL_FIREVERTICES(intel) \
do { \
if ((intel)->prim.flush) \
(intel)->prim.flush(&(intel)->ctx); \
(intel)->prim.flush(intel); \
} while (0)
/* ================================================================
@@ -372,34 +322,26 @@ do { \
((a<<24) | (r<<16) | (g<<8) | b)
#define INTEL_PACKCOLOR(format, r, g, b, a) \
(format == DV_PF_555 ? INTEL_PACKCOLOR1555(r,g,b,a) : \
(format == DV_PF_565 ? INTEL_PACKCOLOR565(r,g,b) : \
(format == DV_PF_8888 ? INTEL_PACKCOLOR8888(r,g,b,a) : \
0)))
/* ================================================================
* From linux kernel i386 header files, copes with odd sizes better
* than COPY_DWORDS would:
* XXX Put this in src/mesa/main/imports.h ???
*/
#if defined(i386) || defined(__i386__)
static __inline__ void * __memcpy(void * to, const void * from, size_t n)
static INLINE void *
__memcpy(void *to, const void *from, size_t n)
{
int d0, d1, d2;
__asm__ __volatile__(
"rep ; movsl\n\t"
"testb $2,%b4\n\t"
"je 1f\n\t"
"movsw\n"
"1:\ttestb $1,%b4\n\t"
"je 2f\n\t"
"movsb\n"
"2:"
: "=&c" (d0), "=&D" (d1), "=&S" (d2)
:"0" (n/4), "q" (n),"1" ((long) to),"2" ((long) from)
: "memory");
__asm__ __volatile__("rep ; movsl\n\t"
"testb $2,%b4\n\t"
"je 1f\n\t"
"movsw\n"
"1:\ttestb $1,%b4\n\t"
"je 2f\n\t"
"movsb\n" "2:":"=&c"(d0), "=&D"(d1), "=&S"(d2)
:"0"(n / 4), "q"(n), "1"((long) to), "2"((long) from)
:"memory");
return (to);
}
#else
@@ -421,16 +363,18 @@ extern int INTEL_DEBUG;
#define DEBUG_TEXTURE 0x1
#define DEBUG_STATE 0x2
#define DEBUG_IOCTL 0x4
#define DEBUG_PRIMS 0x8
#define DEBUG_VERTS 0x10
#define DEBUG_BLIT 0x8
#define DEBUG_MIPTREE 0x10
#define DEBUG_FALLBACKS 0x20
#define DEBUG_VERBOSE 0x40
#define DEBUG_DRI 0x80
#define DEBUG_DMA 0x100
#define DEBUG_SANITY 0x200
#define DEBUG_SYNC 0x400
#define DEBUG_SLEEP 0x800
#define DEBUG_PIXEL 0x1000
#define DEBUG_BATCH 0x80
#define DEBUG_PIXEL 0x100
#define DEBUG_BUFMGR 0x200
#define DEBUG_REGION 0x400
#define DEBUG_FBO 0x800
#define DEBUG_LOCK 0x1000
#define DBG(...) do { if (INTEL_DEBUG & FILE_DEBUG_FLAG) _mesa_printf(__VA_ARGS__); } while(0)
#define PCI_CHIP_845_G 0x2562
@@ -447,27 +391,25 @@ extern int INTEL_DEBUG;
* intel_context.c:
*/
extern void intelInitDriverFunctions( struct dd_function_table *functions );
extern GLboolean intelInitContext(struct intel_context *intel,
const __GLcontextModes * mesaVis,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate,
struct dd_function_table *functions);
extern GLboolean intelInitContext( intelContextPtr intel,
const __GLcontextModes *mesaVis,
__DRIcontextPrivate *driContextPriv,
void *sharedContextPrivate,
struct dd_function_table *functions );
extern void intelGetLock(struct intel_context *intel, GLuint flags);
extern void intelGetLock(intelContextPtr intel, GLuint flags);
extern void intelSetBackClipRects(intelContextPtr intel);
extern void intelSetFrontClipRects(intelContextPtr intel);
extern void intelWindowMoved( intelContextPtr intel );
extern void intelInitState(GLcontext * ctx);
extern void intelFinish(GLcontext * ctx);
extern void intelFlush(GLcontext * ctx);
extern void intelInitState( GLcontext *ctx );
extern const GLubyte *intelGetString( GLcontext *ctx, GLenum name );
extern void intelInitDriverFunctions(struct dd_function_table *functions);
/* ================================================================
* intel_state.c:
*/
extern void intelInitStateFuncs( struct dd_function_table *functions );
extern void intelInitStateFuncs(struct dd_function_table *functions);
#define COMPAREFUNC_ALWAYS 0
#define COMPAREFUNC_NEVER 0x1
@@ -521,27 +463,39 @@ extern void intelInitStateFuncs( struct dd_function_table *functions );
#define BLENDFACT_INV_CONST_ALPHA 0x0f
#define BLENDFACT_MASK 0x0f
extern int intel_translate_compare_func( GLenum func );
extern int intel_translate_stencil_op( GLenum op );
extern int intel_translate_blend_factor( GLenum factor );
extern int intel_translate_logic_op( GLenum opcode );
#define MI_BATCH_BUFFER_END (0xA<<23)
/* ================================================================
* intel_ioctl.c:
extern int intel_translate_compare_func(GLenum func);
extern int intel_translate_stencil_op(GLenum op);
extern int intel_translate_blend_factor(GLenum factor);
extern int intel_translate_logic_op(GLenum opcode);
/*======================================================================
* Inline conversion functions.
* These are better-typed than the macros used previously:
*/
extern void intel_dump_batchbuffer( long offset,
int *ptr,
int count );
static INLINE struct intel_context *
intel_context(GLcontext * ctx)
{
return (struct intel_context *) ctx;
}
static INLINE struct intel_texture_object *
intel_texture_object(struct gl_texture_object *obj)
{
return (struct intel_texture_object *) obj;
}
/* ================================================================
* intel_pixel.c:
*/
extern void intelInitPixelFuncs( struct dd_function_table *functions );
static INLINE struct intel_texture_image *
intel_texture_image(struct gl_texture_image *img)
{
return (struct intel_texture_image *) img;
}
extern struct intel_renderbuffer *intel_renderbuffer(struct gl_renderbuffer
*rb);
#endif

View File

@@ -0,0 +1,282 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "glheader.h"
#include "imports.h"
#include "context.h"
#include "depthstencil.h"
#include "fbobject.h"
#include "framebuffer.h"
#include "hash.h"
#include "mtypes.h"
#include "renderbuffer.h"
#include "intel_context.h"
#include "intel_fbo.h"
#include "intel_depthstencil.h"
#include "intel_regions.h"
/**
* The GL_EXT_framebuffer_object allows the user to create their own
* framebuffer objects consisting of color renderbuffers (0 or more),
* depth renderbuffers (0 or 1) and stencil renderbuffers (0 or 1).
*
* The spec considers depth and stencil renderbuffers to be totally independent
* buffers. In reality, most graphics hardware today uses a combined
* depth+stencil buffer (one 32-bit pixel = 24 bits of Z + 8 bits of stencil).
*
* This causes difficulty because the user may create some number of depth
* renderbuffers and some number of stencil renderbuffers and bind them
* together in framebuffers in any combination.
*
* This code manages all that.
*
* 1. Depth renderbuffers are always allocated in hardware as 32bpp
* GL_DEPTH24_STENCIL8 buffers.
*
* 2. Stencil renderbuffers are initially allocated in software as 8bpp
* GL_STENCIL_INDEX8 buffers.
*
* 3. Depth and Stencil renderbuffers use the PairedStencil and PairedDepth
* fields (respectively) to indicate if the buffer's currently paired
* with another stencil or depth buffer (respectively).
*
* 4. When a depth and stencil buffer are initially both attached to the
* current framebuffer, we merge the stencil buffer values into the
* depth buffer (really a depth+stencil buffer). The then hardware uses
* the combined buffer.
*
* 5. Whenever a depth or stencil buffer is reallocated (with
* glRenderbufferStorage) we undo the pairing and copy the stencil values
* from the combined depth/stencil buffer back to the stencil-only buffer.
*
* 6. We also undo the pairing when we find a change in buffer bindings.
*
* 7. If a framebuffer is only using a depth renderbuffer (no stencil), we
* just use the combined depth/stencil buffer and ignore the stencil values.
*
* 8. If a framebuffer is only using a stencil renderbuffer (no depth) we have
* to promote the 8bpp software stencil buffer to a 32bpp hardware
* depth+stencil buffer.
*
*/
static void
map_regions(GLcontext * ctx,
struct intel_renderbuffer *depthRb,
struct intel_renderbuffer *stencilRb)
{
struct intel_context *intel = intel_context(ctx);
if (depthRb && depthRb->region) {
intel_region_map(intel->intelScreen, depthRb->region);
depthRb->pfMap = depthRb->region->map;
depthRb->pfPitch = depthRb->region->pitch;
}
if (stencilRb && stencilRb->region) {
intel_region_map(intel->intelScreen, stencilRb->region);
stencilRb->pfMap = stencilRb->region->map;
stencilRb->pfPitch = stencilRb->region->pitch;
}
}
static void
unmap_regions(GLcontext * ctx,
struct intel_renderbuffer *depthRb,
struct intel_renderbuffer *stencilRb)
{
struct intel_context *intel = intel_context(ctx);
if (depthRb && depthRb->region) {
intel_region_unmap(intel->intelScreen, depthRb->region);
depthRb->pfMap = NULL;
depthRb->pfPitch = 0;
}
if (stencilRb && stencilRb->region) {
intel_region_unmap(intel->intelScreen, stencilRb->region);
stencilRb->pfMap = NULL;
stencilRb->pfPitch = 0;
}
}
/**
* Undo the pairing/interleaving between depth and stencil buffers.
* irb should be a depth/stencil or stencil renderbuffer.
*/
void
intel_unpair_depth_stencil(GLcontext * ctx, struct intel_renderbuffer *irb)
{
if (irb->PairedStencil) {
/* irb is a depth/stencil buffer */
struct gl_renderbuffer *stencilRb;
struct intel_renderbuffer *stencilIrb;
ASSERT(irb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
stencilRb = _mesa_lookup_renderbuffer(ctx, irb->PairedStencil);
stencilIrb = intel_renderbuffer(stencilRb);
if (stencilIrb) {
/* need to extract stencil values from the depth buffer */
ASSERT(stencilIrb->PairedDepth == irb->Base.Name);
map_regions(ctx, irb, stencilIrb);
_mesa_extract_stencil(ctx, &irb->Base, &stencilIrb->Base);
unmap_regions(ctx, irb, stencilIrb);
stencilIrb->PairedDepth = 0;
}
irb->PairedStencil = 0;
}
else if (irb->PairedDepth) {
/* irb is a stencil buffer */
struct gl_renderbuffer *depthRb;
struct intel_renderbuffer *depthIrb;
ASSERT(irb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT ||
irb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
depthRb = _mesa_lookup_renderbuffer(ctx, irb->PairedDepth);
depthIrb = intel_renderbuffer(depthRb);
if (depthIrb) {
/* need to extract stencil values from the depth buffer */
ASSERT(depthIrb->PairedStencil == irb->Base.Name);
map_regions(ctx, depthIrb, irb);
_mesa_extract_stencil(ctx, &depthIrb->Base, &irb->Base);
unmap_regions(ctx, depthIrb, irb);
depthIrb->PairedStencil = 0;
}
irb->PairedDepth = 0;
}
else {
_mesa_problem(ctx, "Problem in undo_depth_stencil_pairing");
}
ASSERT(irb->PairedStencil == 0);
ASSERT(irb->PairedDepth == 0);
}
/**
* Examine the depth and stencil renderbuffers which are attached to the
* framebuffer. If both depth and stencil are attached, make sure that the
* renderbuffers are 'paired' (combined). If only depth or only stencil is
* attached, undo any previous pairing.
*
* Must be called if NewState & _NEW_BUFFER (when renderbuffer attachments
* change, for example).
*/
void
intel_validate_paired_depth_stencil(GLcontext * ctx,
struct gl_framebuffer *fb)
{
struct intel_renderbuffer *depthRb, *stencilRb;
depthRb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
stencilRb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
if (depthRb && stencilRb) {
if (depthRb == stencilRb) {
/* Using a user-created combined depth/stencil buffer.
* Nothing to do.
*/
ASSERT(depthRb->Base._BaseFormat == GL_DEPTH_STENCIL_EXT);
ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
}
else {
/* Separate depth/stencil buffers, need to interleave now */
ASSERT(depthRb->Base._BaseFormat == GL_DEPTH_COMPONENT);
ASSERT(stencilRb->Base._BaseFormat == GL_STENCIL_INDEX);
/* may need to interleave depth/stencil now */
if (depthRb->PairedStencil == stencilRb->Base.Name) {
/* OK, the depth and stencil buffers are already interleaved */
ASSERT(stencilRb->PairedDepth == depthRb->Base.Name);
}
else {
/* need to setup new pairing/interleaving */
if (depthRb->PairedStencil) {
intel_unpair_depth_stencil(ctx, depthRb);
}
if (stencilRb->PairedDepth) {
intel_unpair_depth_stencil(ctx, stencilRb);
}
ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
ASSERT(stencilRb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT ||
stencilRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
/* establish new pairing: interleave stencil into depth buffer */
map_regions(ctx, depthRb, stencilRb);
_mesa_insert_stencil(ctx, &depthRb->Base, &stencilRb->Base);
unmap_regions(ctx, depthRb, stencilRb);
depthRb->PairedStencil = stencilRb->Base.Name;
stencilRb->PairedDepth = depthRb->Base.Name;
}
}
}
else if (depthRb) {
/* Depth buffer but no stencil buffer.
* We'll use a GL_DEPTH24_STENCIL8 buffer and ignore the stencil bits.
*/
/* can't assert this until storage is allocated:
ASSERT(depthRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
*/
/* intel_undo any previous pairing */
if (depthRb->PairedStencil) {
intel_unpair_depth_stencil(ctx, depthRb);
}
}
else if (stencilRb) {
/* Stencil buffer but no depth buffer.
* Since h/w doesn't typically support just 8bpp stencil w/out Z,
* we'll use a GL_DEPTH24_STENCIL8 buffer and ignore the depth bits.
*/
/* undo any previous pairing */
if (stencilRb->PairedDepth) {
intel_unpair_depth_stencil(ctx, stencilRb);
}
if (stencilRb->Base._ActualFormat == GL_STENCIL_INDEX8_EXT) {
/* promote buffer to GL_DEPTH24_STENCIL8 for hw rendering */
_mesa_promote_stencil(ctx, &stencilRb->Base);
ASSERT(stencilRb->Base._ActualFormat == GL_DEPTH24_STENCIL8_EXT);
}
}
/* Finally, update the fb->_DepthBuffer and fb->_StencilBuffer fields */
_mesa_update_depth_buffer(ctx, fb, BUFFER_DEPTH);
if (depthRb && depthRb->PairedStencil)
_mesa_update_stencil_buffer(ctx, fb, BUFFER_DEPTH);
else
_mesa_update_stencil_buffer(ctx, fb, BUFFER_STENCIL);
/* The hardware should use fb->Attachment[BUFFER_DEPTH].Renderbuffer
* first, if present, then fb->Attachment[BUFFER_STENCIL].Renderbuffer
* if present.
*/
}

View File

@@ -0,0 +1,14 @@
#ifndef INTEL_DEPTH_STENCIL_H
#define INTEL_DEPTH_STENCIL_H
extern void
intel_unpair_depth_stencil(GLcontext * ctx, struct intel_renderbuffer *irb);
extern void
intel_validate_paired_depth_stencil(GLcontext * ctx,
struct gl_framebuffer *fb);
#endif /* INTEL_DEPTH_STENCIL_H */

View File

@@ -0,0 +1,621 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "imports.h"
#include "mtypes.h"
#include "fbobject.h"
#include "framebuffer.h"
#include "renderbuffer.h"
#include "context.h"
#include "texformat.h"
#include "texrender.h"
#include "intel_context.h"
#include "intel_buffers.h"
#include "intel_depthstencil.h"
#include "intel_fbo.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_span.h"
#define FILE_DEBUG_FLAG DEBUG_FBO
#define INTEL_RB_CLASS 0x12345678
/* XXX FBO: move this to intel_context.h (inlined) */
/**
* Return a gl_renderbuffer ptr casted to intel_renderbuffer.
* NULL will be returned if the rb isn't really an intel_renderbuffer.
* This is determiend by checking the ClassID.
*/
struct intel_renderbuffer *
intel_renderbuffer(struct gl_renderbuffer *rb)
{
struct intel_renderbuffer *irb = (struct intel_renderbuffer *) rb;
if (irb && irb->Base.ClassID == INTEL_RB_CLASS) {
/*_mesa_warning(NULL, "Returning non-intel Rb\n");*/
return irb;
}
else
return NULL;
}
struct intel_renderbuffer *
intel_get_renderbuffer(struct gl_framebuffer *fb, GLuint attIndex)
{
return intel_renderbuffer(fb->Attachment[attIndex].Renderbuffer);
}
struct intel_region *
intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
{
struct intel_renderbuffer *irb
= intel_renderbuffer(fb->Attachment[attIndex].Renderbuffer);
if (irb)
return irb->region;
else
return NULL;
}
/**
* Create a new framebuffer object.
*/
static struct gl_framebuffer *
intel_new_framebuffer(GLcontext * ctx, GLuint name)
{
/* there's no intel_framebuffer at this time, just use Mesa's class */
return _mesa_new_framebuffer(ctx, name);
}
static void
intel_delete_renderbuffer(struct gl_renderbuffer *rb)
{
GET_CURRENT_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
ASSERT(irb);
if (irb->PairedStencil || irb->PairedDepth) {
intel_unpair_depth_stencil(ctx, irb);
}
if (intel && irb->region) {
intel_region_release(intel->intelScreen, &irb->region);
}
_mesa_free(irb);
}
/**
* Return a pointer to a specific pixel in a renderbuffer.
*/
static void *
intel_get_pointer(GLcontext * ctx, struct gl_renderbuffer *rb,
GLint x, GLint y)
{
/* By returning NULL we force all software rendering to go through
* the span routines.
*/
return NULL;
}
/**
* Called via glRenderbufferStorageEXT() to set the format and allocate
* storage for a user-created renderbuffer.
*/
static GLboolean
intel_alloc_renderbuffer_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat,
GLuint width, GLuint height)
{
struct intel_context *intel = intel_context(ctx);
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
GLboolean softwareBuffer = GL_FALSE;
int cpp;
ASSERT(rb->Name != 0);
switch (internalFormat) {
case GL_R3_G3_B2:
case GL_RGB4:
case GL_RGB5:
rb->_ActualFormat = GL_RGB5;
rb->DataType = GL_UNSIGNED_BYTE;
rb->RedBits = 5;
rb->GreenBits = 6;
rb->BlueBits = 5;
cpp = 2;
break;
case GL_RGB:
case GL_RGB8:
case GL_RGB10:
case GL_RGB12:
case GL_RGB16:
case GL_RGBA:
case GL_RGBA2:
case GL_RGBA4:
case GL_RGB5_A1:
case GL_RGBA8:
case GL_RGB10_A2:
case GL_RGBA12:
case GL_RGBA16:
rb->_ActualFormat = GL_RGBA8;
rb->DataType = GL_UNSIGNED_BYTE;
rb->RedBits = 8;
rb->GreenBits = 8;
rb->BlueBits = 8;
rb->AlphaBits = 8;
cpp = 4;
break;
case GL_STENCIL_INDEX:
case GL_STENCIL_INDEX1_EXT:
case GL_STENCIL_INDEX4_EXT:
case GL_STENCIL_INDEX8_EXT:
case GL_STENCIL_INDEX16_EXT:
/* alloc a depth+stencil buffer */
rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
rb->StencilBits = 8;
cpp = 4;
break;
case GL_DEPTH_COMPONENT16:
rb->_ActualFormat = GL_DEPTH_COMPONENT16;
rb->DataType = GL_UNSIGNED_SHORT;
rb->DepthBits = 16;
cpp = 2;
break;
case GL_DEPTH_COMPONENT:
case GL_DEPTH_COMPONENT24:
case GL_DEPTH_COMPONENT32:
rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
rb->DepthBits = 24;
cpp = 4;
break;
case GL_DEPTH_STENCIL_EXT:
case GL_DEPTH24_STENCIL8_EXT:
rb->_ActualFormat = GL_DEPTH24_STENCIL8_EXT;
rb->DataType = GL_UNSIGNED_INT_24_8_EXT;
rb->DepthBits = 24;
rb->StencilBits = 8;
cpp = 4;
break;
default:
_mesa_problem(ctx,
"Unexpected format in intel_alloc_renderbuffer_storage");
return GL_FALSE;
}
intelFlush(ctx);
/* free old region */
if (irb->region) {
/*LOCK_HARDWARE(intel); */
intel_region_release(intel->intelScreen, &irb->region);
/*UNLOCK_HARDWARE(intel); */
}
/* allocate new memory region/renderbuffer */
if (softwareBuffer) {
return _mesa_soft_renderbuffer_storage(ctx, rb, internalFormat,
width, height);
}
else {
/* Choose a pitch to match hardware requirements:
*/
GLuint pitch = ((cpp * width + 63) & ~63) / cpp;
/* alloc hardware renderbuffer */
DBG("Allocating %d x %d Intel RBO (pitch %d)\n", width,
height, pitch);
irb->region = intel_region_alloc(intel->intelScreen, cpp, pitch, height);
if (!irb->region)
return GL_FALSE; /* out of memory? */
ASSERT(irb->region->buffer);
rb->Width = width;
rb->Height = height;
/* This sets the Get/PutRow/Value functions */
intel_set_span_functions(&irb->Base);
return GL_TRUE;
}
}
/**
* Called for each hardware renderbuffer when a _window_ is resized.
* Just update fields.
* Not used for user-created renderbuffers!
*/
static GLboolean
intel_alloc_window_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat, GLuint width, GLuint height)
{
ASSERT(rb->Name == 0);
rb->Width = width;
rb->Height = height;
rb->_ActualFormat = internalFormat;
return GL_TRUE;
}
static GLboolean
intel_nop_alloc_storage(GLcontext * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat, GLuint width, GLuint height)
{
_mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
return GL_FALSE;
}
/**
* Create a new intel_renderbuffer which corresponds to an on-screen window,
* not a user-created renderbuffer.
* \param width the screen width
* \param height the screen height
*/
struct intel_renderbuffer *
intel_create_renderbuffer(GLenum intFormat, GLsizei width, GLsizei height,
int offset, int pitch, int cpp, void *map)
{
GET_CURRENT_CONTEXT(ctx);
struct intel_renderbuffer *irb;
const GLuint name = 0;
irb = CALLOC_STRUCT(intel_renderbuffer);
if (!irb) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
return NULL;
}
_mesa_init_renderbuffer(&irb->Base, name);
irb->Base.ClassID = INTEL_RB_CLASS;
switch (intFormat) {
case GL_RGB5:
irb->Base._ActualFormat = GL_RGB5;
irb->Base._BaseFormat = GL_RGBA;
irb->Base.RedBits = 5;
irb->Base.GreenBits = 6;
irb->Base.BlueBits = 5;
irb->Base.DataType = GL_UNSIGNED_BYTE;
cpp = 2;
break;
case GL_RGBA8:
irb->Base._ActualFormat = GL_RGBA8;
irb->Base._BaseFormat = GL_RGBA;
irb->Base.RedBits = 8;
irb->Base.GreenBits = 8;
irb->Base.BlueBits = 8;
irb->Base.AlphaBits = 8;
irb->Base.DataType = GL_UNSIGNED_BYTE;
cpp = 4;
break;
case GL_STENCIL_INDEX8_EXT:
irb->Base._ActualFormat = GL_STENCIL_INDEX8_EXT;
irb->Base._BaseFormat = GL_STENCIL_INDEX;
irb->Base.StencilBits = 8;
irb->Base.DataType = GL_UNSIGNED_BYTE;
cpp = 1;
break;
case GL_DEPTH_COMPONENT16:
irb->Base._ActualFormat = GL_DEPTH_COMPONENT16;
irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
irb->Base.DepthBits = 16;
irb->Base.DataType = GL_UNSIGNED_SHORT;
cpp = 2;
break;
case GL_DEPTH_COMPONENT24:
irb->Base._ActualFormat = GL_DEPTH24_STENCIL8_EXT;
irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
irb->Base.DepthBits = 24;
irb->Base.DataType = GL_UNSIGNED_INT;
cpp = 4;
break;
case GL_DEPTH24_STENCIL8_EXT:
irb->Base._ActualFormat = GL_DEPTH24_STENCIL8_EXT;
irb->Base._BaseFormat = GL_DEPTH_STENCIL_EXT;
irb->Base.DepthBits = 24;
irb->Base.StencilBits = 8;
irb->Base.DataType = GL_UNSIGNED_INT_24_8_EXT;
cpp = 4;
break;
default:
_mesa_problem(NULL,
"Unexpected intFormat in intel_create_renderbuffer");
return NULL;
}
irb->Base.InternalFormat = intFormat;
/* intel-specific methods */
irb->Base.Delete = intel_delete_renderbuffer;
irb->Base.AllocStorage = intel_alloc_window_storage;
irb->Base.GetPointer = intel_get_pointer;
/* This sets the Get/PutRow/Value functions */
intel_set_span_functions(&irb->Base);
irb->pfMap = map;
irb->pfPitch = pitch / cpp; /* in pixels */
#if 00
irb->region = intel_region_create_static(intel,
DRM_MM_TT,
offset, map, cpp, width, height);
#endif
return irb;
}
/**
* Create a new renderbuffer object.
* Typically called via glBindRenderbufferEXT().
*/
static struct gl_renderbuffer *
intel_new_renderbuffer(GLcontext * ctx, GLuint name)
{
/*struct intel_context *intel = intel_context(ctx); */
struct intel_renderbuffer *irb;
irb = CALLOC_STRUCT(intel_renderbuffer);
if (!irb) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
return NULL;
}
_mesa_init_renderbuffer(&irb->Base, name);
irb->Base.ClassID = INTEL_RB_CLASS;
/* intel-specific methods */
irb->Base.Delete = intel_delete_renderbuffer;
irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
irb->Base.GetPointer = intel_get_pointer;
/* span routines set in alloc_storage function */
return &irb->Base;
}
/**
* Called via glBindFramebufferEXT().
*/
static void
intel_bind_framebuffer(GLcontext * ctx, GLenum target,
struct gl_framebuffer *fb)
{
if (target == GL_FRAMEBUFFER_EXT || target == GL_DRAW_FRAMEBUFFER_EXT) {
intel_draw_buffer(ctx, fb);
/* Integer depth range depends on depth buffer bits */
ctx->Driver.DepthRange(ctx, ctx->Viewport.Near, ctx->Viewport.Far);
}
else {
/* don't need to do anything if target == GL_READ_FRAMEBUFFER_EXT */
}
}
/**
* Called via glFramebufferRenderbufferEXT().
*/
static void
intel_framebuffer_renderbuffer(GLcontext * ctx,
struct gl_framebuffer *fb,
GLenum attachment, struct gl_renderbuffer *rb)
{
DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
intelFlush(ctx);
_mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
intel_draw_buffer(ctx, fb);
}
/**
* When glFramebufferTexture[123]D is called this function sets up the
* gl_renderbuffer wrapp around the texture image.
* This will have the region info needed for hardware rendering.
*/
static struct intel_renderbuffer *
intel_wrap_texture(GLcontext * ctx, struct gl_texture_image *texImage)
{
const GLuint name = ~0; /* not significant, but distinct for debugging */
struct intel_renderbuffer *irb;
/* make an intel_renderbuffer to wrap the texture image */
irb = CALLOC_STRUCT(intel_renderbuffer);
if (!irb) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
return NULL;
}
_mesa_init_renderbuffer(&irb->Base, name);
irb->Base.ClassID = INTEL_RB_CLASS;
if (texImage->TexFormat == &_mesa_texformat_argb8888) {
irb->Base._ActualFormat = GL_RGBA8;
irb->Base._BaseFormat = GL_RGBA;
DBG("Render to RGBA8 texture OK\n");
}
else if (texImage->TexFormat == &_mesa_texformat_rgb565) {
irb->Base._ActualFormat = GL_RGB5;
irb->Base._BaseFormat = GL_RGB;
DBG("Render to RGB5 texture OK\n");
}
else if (texImage->TexFormat == &_mesa_texformat_z16) {
irb->Base._ActualFormat = GL_DEPTH_COMPONENT16;
irb->Base._BaseFormat = GL_DEPTH_COMPONENT;
DBG("Render to DEPTH16 texture OK\n");
}
else {
DBG("Render to texture BAD FORMAT %d\n",
texImage->TexFormat->MesaFormat);
_mesa_free(irb);
return NULL;
}
irb->Base.InternalFormat = irb->Base._ActualFormat;
irb->Base.Width = texImage->Width;
irb->Base.Height = texImage->Height;
irb->Base.DataType = GL_UNSIGNED_BYTE; /* FBO XXX fix */
irb->Base.RedBits = texImage->TexFormat->RedBits;
irb->Base.GreenBits = texImage->TexFormat->GreenBits;
irb->Base.BlueBits = texImage->TexFormat->BlueBits;
irb->Base.AlphaBits = texImage->TexFormat->AlphaBits;
irb->Base.DepthBits = texImage->TexFormat->DepthBits;
irb->Base.Delete = intel_delete_renderbuffer;
irb->Base.AllocStorage = intel_nop_alloc_storage;
intel_set_span_functions(&irb->Base);
irb->RenderToTexture = GL_TRUE;
return irb;
}
/**
* Called by glFramebufferTexture[123]DEXT() (and other places) to
* prepare for rendering into texture memory. This might be called
* many times to choose different texture levels, cube faces, etc
* before intel_finish_render_texture() is ever called.
*/
static void
intel_render_texture(GLcontext * ctx,
struct gl_framebuffer *fb,
struct gl_renderbuffer_attachment *att)
{
struct gl_texture_image *newImage
= att->Texture->Image[att->CubeMapFace][att->TextureLevel];
struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
struct intel_texture_image *intel_image;
GLuint imageOffset;
(void) fb;
ASSERT(newImage);
if (!irb) {
irb = intel_wrap_texture(ctx, newImage);
if (irb) {
/* bind the wrapper to the attachment point */
att->Renderbuffer = &irb->Base;
}
else {
/* fallback to software rendering */
_mesa_render_texture(ctx, fb, att);
return;
}
}
DBG("Begin render texture tex=%u w=%d h=%d refcount=%d\n",
att->Texture->Name, newImage->Width, newImage->Height,
irb->Base.RefCount);
/* point the renderbufer's region to the texture image region */
intel_image = intel_texture_image(newImage);
if (irb->region != intel_image->mt->region)
intel_region_reference(&irb->region, intel_image->mt->region);
/* compute offset of the particular 2D image within the texture region */
imageOffset = intel_miptree_image_offset(intel_image->mt,
att->CubeMapFace,
att->TextureLevel);
if (att->Texture->Target == GL_TEXTURE_3D) {
const GLuint *offsets = intel_miptree_depth_offsets(intel_image->mt,
att->TextureLevel);
imageOffset += offsets[att->Zoffset];
}
/* store that offset in the region */
intel_image->mt->region->draw_offset = imageOffset;
/* update drawing region, etc */
intel_draw_buffer(ctx, fb);
}
/**
* Called by Mesa when rendering to a texture is done.
*/
static void
intel_finish_render_texture(GLcontext * ctx,
struct gl_renderbuffer_attachment *att)
{
struct intel_context *intel = intel_context(ctx);
struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
DBG("End render texture (tid %u) tex %u\n", _glthread_GetID(), att->Texture->Name);
if (irb) {
/* just release the region */
intel_region_release(intel->intelScreen, &irb->region);
}
else if (att->Renderbuffer) {
/* software fallback */
_mesa_finish_render_texture(ctx, att);
/* XXX FBO: Need to unmap the buffer (or in intelSpanRenderStart???) */
}
}
/**
* Do one-time context initializations related to GL_EXT_framebuffer_object.
* Hook in device driver functions.
*/
void
intel_fbo_init(struct intel_context *intel)
{
intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
intel->ctx.Driver.BindFramebuffer = intel_bind_framebuffer;
intel->ctx.Driver.FramebufferRenderbuffer = intel_framebuffer_renderbuffer;
intel->ctx.Driver.RenderTexture = intel_render_texture;
intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
}

View File

@@ -0,0 +1,80 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_FBO_H
#define INTEL_FBO_H
struct intel_context;
struct intel_region;
/**
* Intel renderbuffer, derived from gl_renderbuffer.
* Note: The PairedDepth and PairedStencil fields use renderbuffer IDs,
* not pointers because in some circumstances a deleted renderbuffer could
* result in a dangling pointer here.
*/
struct intel_renderbuffer
{
struct gl_renderbuffer Base;
struct intel_region *region;
void *pfMap; /* possibly paged flipped map pointer */
GLuint pfPitch; /* possibly paged flipped pitch */
GLboolean RenderToTexture; /* RTT? */
GLuint PairedDepth; /**< only used if this is a depth renderbuffer */
GLuint PairedStencil; /**< only used if this is a stencil renderbuffer */
};
extern struct intel_renderbuffer *intel_create_renderbuffer(GLenum intFormat,
GLsizei width,
GLsizei height,
int offset,
int pitch,
int cpp,
void *map);
extern void intel_fbo_init(struct intel_context *intel);
/* XXX make inline or macro */
extern struct intel_renderbuffer *intel_get_renderbuffer(struct gl_framebuffer
*fb,
GLuint attIndex);
/* XXX make inline or macro */
extern struct intel_region *intel_get_rb_region(struct gl_framebuffer *fb,
GLuint attIndex);
#endif /* INTEL_FBO_H */

View File

@@ -38,623 +38,101 @@
#include "intel_context.h"
#include "intel_ioctl.h"
#include "intel_batchbuffer.h"
#include "intel_blit.h"
#include "intel_regions.h"
#include "drm.h"
u_int32_t intelGetLastFrame (intelContextPtr intel)
{
int ret;
u_int32_t frame;
drm_i915_getparam_t gp;
gp.param = I915_PARAM_LAST_DISPATCH;
gp.value = (int *)&frame;
ret = drmCommandWriteRead( intel->driFd, DRM_I915_GETPARAM,
&gp, sizeof(gp) );
return frame;
}
#define FILE_DEBUG_FLAG DEBUG_IOCTL
int intelEmitIrqLocked( intelContextPtr intel )
int
intelEmitIrqLocked(struct intel_context *intel)
{
drmI830IrqEmit ie;
int ret, seq;
assert(((*(int *)intel->driHwLock) & ~DRM_LOCK_CONT) ==
(DRM_LOCK_HELD|intel->hHWContext));
assert(((*(int *) intel->driHwLock) & ~DRM_LOCK_CONT) ==
(DRM_LOCK_HELD | intel->hHWContext));
ie.irq_seq = &seq;
ret = drmCommandWriteRead( intel->driFd, DRM_I830_IRQ_EMIT,
&ie, sizeof(ie) );
if ( ret ) {
fprintf( stderr, "%s: drmI830IrqEmit: %d\n", __FUNCTION__, ret );
ret = drmCommandWriteRead(intel->driFd, DRM_I830_IRQ_EMIT,
&ie, sizeof(ie));
if (ret) {
fprintf(stderr, "%s: drmI830IrqEmit: %d\n", __FUNCTION__, ret);
exit(1);
}
if (0)
fprintf(stderr, "%s --> %d\n", __FUNCTION__, seq );
DBG("%s --> %d\n", __FUNCTION__, seq);
return seq;
}
void intelWaitIrq( intelContextPtr intel, int seq )
void
intelWaitIrq(struct intel_context *intel, int seq)
{
int ret;
if (0)
fprintf(stderr, "%s %d\n", __FUNCTION__, seq );
DBG("%s %d\n", __FUNCTION__, seq);
intel->iw.irq_seq = seq;
do {
ret = drmCommandWrite( intel->driFd, DRM_I830_IRQ_WAIT, &intel->iw, sizeof(intel->iw) );
ret =
drmCommandWrite(intel->driFd, DRM_I830_IRQ_WAIT, &intel->iw,
sizeof(intel->iw));
} while (ret == -EAGAIN || ret == -EINTR);
if ( ret ) {
fprintf( stderr, "%s: drmI830IrqWait: %d\n", __FUNCTION__, ret );
if (0)
intel_dump_batchbuffer( intel->alloc.offset,
intel->alloc.ptr,
intel->alloc.size );
if (ret) {
fprintf(stderr, "%s: drmI830IrqWait: %d\n", __FUNCTION__, ret);
exit(1);
}
}
static void age_intel( intelContextPtr intel, int age )
{
GLuint i;
for (i = 0 ; i < MAX_TEXTURE_UNITS ; i++)
if (intel->CurrentTexObj[i])
intel->CurrentTexObj[i]->age = age;
}
void intel_dump_batchbuffer( long offset,
int *ptr,
int count )
{
int i;
fprintf(stderr, "\n\n\nSTART BATCH (%d dwords):\n", count);
for (i = 0; i < count/4; i += 4)
fprintf(stderr, "\t0x%x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
(unsigned int)offset + i*4, ptr[i], ptr[i+1], ptr[i+2], ptr[i+3]);
fprintf(stderr, "END BATCH\n\n\n");
}
void intelRefillBatchLocked( intelContextPtr intel, GLboolean allow_unlock )
{
GLuint last_irq = intel->alloc.irq_emitted;
GLuint half = intel->alloc.size / 2;
GLuint buf = (intel->alloc.active_buf ^= 1);
intel->alloc.irq_emitted = intelEmitIrqLocked( intel );
if (last_irq) {
if (allow_unlock) UNLOCK_HARDWARE( intel );
intelWaitIrq( intel, last_irq );
if (allow_unlock) LOCK_HARDWARE( intel );
}
if (0)
fprintf(stderr, "%s: now using half %d\n", __FUNCTION__, buf);
intel->batch.start_offset = intel->alloc.offset + buf * half;
intel->batch.ptr = (unsigned char *)intel->alloc.ptr + buf * half;
intel->batch.size = half - 8;
intel->batch.space = half - 8;
assert(intel->batch.space >= 0);
}
#define MI_BATCH_BUFFER_END (0xA<<23)
void intelFlushBatchLocked( intelContextPtr intel,
GLboolean ignore_cliprects,
GLboolean refill,
GLboolean allow_unlock)
void
intel_batch_ioctl(struct intel_context *intel,
GLuint start_offset,
GLuint used,
GLboolean ignore_cliprects, GLboolean allow_unlock)
{
drmI830BatchBuffer batch;
assert(intel->locked);
assert(used);
if (0)
fprintf(stderr, "%s used %d of %d offset %x..%x refill %d (started in %s)\n",
__FUNCTION__,
(intel->batch.size - intel->batch.space),
intel->batch.size,
intel->batch.start_offset,
intel->batch.start_offset +
(intel->batch.size - intel->batch.space),
refill,
intel->batch.func);
DBG("%s used %d offset %x..%x ignore_cliprects %d\n",
__FUNCTION__,
used, start_offset, start_offset + used, ignore_cliprects);
/* Throw away non-effective packets. Won't work once we have
* hardware contexts which would preserve statechanges beyond a
* single buffer.
*/
if (intel->numClipRects == 0 && !ignore_cliprects) {
/* Without this yeild, an application with no cliprects can hog
* the hardware. Without unlocking, the effect is much worse -
* effectively a lock-out of other contexts.
*/
if (allow_unlock) {
UNLOCK_HARDWARE( intel );
sched_yield();
LOCK_HARDWARE( intel );
}
/* Note that any state thought to have been emitted actually
* hasn't:
*/
intel->batch.ptr -= (intel->batch.size - intel->batch.space);
intel->batch.space = intel->batch.size;
intel->vtbl.lost_hardware( intel );
}
if (intel->batch.space != intel->batch.size) {
if (intel->sarea->ctxOwner != intel->hHWContext) {
intel->perf_boxes |= I830_BOX_LOST_CONTEXT;
intel->sarea->ctxOwner = intel->hHWContext;
}
batch.start = intel->batch.start_offset;
batch.used = intel->batch.size - intel->batch.space;
batch.cliprects = intel->pClipRects;
batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
batch.DR1 = 0;
batch.DR4 = ((((GLuint)intel->drawX) & 0xffff) |
(((GLuint)intel->drawY) << 16));
if (intel->alloc.offset) {
if ((batch.used & 0x4) == 0) {
((int *)intel->batch.ptr)[0] = 0;
((int *)intel->batch.ptr)[1] = MI_BATCH_BUFFER_END;
batch.used += 0x8;
intel->batch.ptr += 0x8;
}
else {
((int *)intel->batch.ptr)[0] = MI_BATCH_BUFFER_END;
batch.used += 0x4;
intel->batch.ptr += 0x4;
}
}
if (0)
intel_dump_batchbuffer( batch.start,
(int *)(intel->batch.ptr - batch.used),
batch.used );
intel->batch.start_offset += batch.used;
intel->batch.size -= batch.used;
if (intel->batch.size < 8) {
refill = GL_TRUE;
intel->batch.space = intel->batch.size = 0;
}
else {
intel->batch.size -= 8;
intel->batch.space = intel->batch.size;
}
assert(intel->batch.space >= 0);
assert(batch.start >= intel->alloc.offset);
assert(batch.start < intel->alloc.offset + intel->alloc.size);
assert(batch.start + batch.used > intel->alloc.offset);
assert(batch.start + batch.used <=
intel->alloc.offset + intel->alloc.size);
batch.start = start_offset;
batch.used = used;
batch.cliprects = intel->pClipRects;
batch.num_cliprects = ignore_cliprects ? 0 : intel->numClipRects;
batch.DR1 = 0;
batch.DR4 = ((((GLuint) intel->drawX) & 0xffff) |
(((GLuint) intel->drawY) << 16));
if (intel->alloc.offset) {
if (drmCommandWrite (intel->driFd, DRM_I830_BATCHBUFFER, &batch,
sizeof(batch))) {
fprintf(stderr, "DRM_I830_BATCHBUFFER: %d\n", -errno);
UNLOCK_HARDWARE(intel);
exit(1);
}
} else {
drmI830CmdBuffer cmd;
cmd.buf = (char *)intel->alloc.ptr + batch.start;
cmd.sz = batch.used;
cmd.DR1 = batch.DR1;
cmd.DR4 = batch.DR4;
cmd.num_cliprects = batch.num_cliprects;
cmd.cliprects = batch.cliprects;
if (drmCommandWrite (intel->driFd, DRM_I830_CMDBUFFER, &cmd,
sizeof(cmd))) {
fprintf(stderr, "DRM_I830_CMDBUFFER: %d\n", -errno);
UNLOCK_HARDWARE(intel);
exit(1);
}
}
DBG("%s: 0x%x..0x%x DR4: %x cliprects: %d\n",
__FUNCTION__,
batch.start,
batch.start + batch.used * 4, batch.DR4, batch.num_cliprects);
age_intel(intel, intel->sarea->last_enqueue);
/* FIXME: use hardware contexts to avoid 'losing' hardware after
* each buffer flush.
*/
if (intel->batch.contains_geometry)
assert(intel->batch.last_emit_state == intel->batch.counter);
intel->batch.counter++;
intel->batch.contains_geometry = 0;
intel->batch.func = 0;
intel->vtbl.lost_hardware( intel );
}
if (refill)
intelRefillBatchLocked( intel, allow_unlock );
}
void intelFlushBatch( intelContextPtr intel, GLboolean refill )
{
if (intel->locked) {
intelFlushBatchLocked( intel, GL_FALSE, refill, GL_FALSE );
}
else {
LOCK_HARDWARE(intel);
intelFlushBatchLocked( intel, GL_FALSE, refill, GL_TRUE );
if (drmCommandWrite(intel->driFd, DRM_I830_BATCHBUFFER, &batch,
sizeof(batch))) {
fprintf(stderr, "DRM_I830_BATCHBUFFER: %d\n", -errno);
UNLOCK_HARDWARE(intel);
}
}
void intelWaitForIdle( intelContextPtr intel )
{
if (0)
fprintf(stderr, "%s\n", __FUNCTION__);
intel->vtbl.emit_flush( intel );
intelFlushBatch( intel, GL_TRUE );
/* Use an irq to wait for dma idle -- Need to track lost contexts
* to shortcircuit consecutive calls to this function:
*/
intelWaitIrq( intel, intel->alloc.irq_emitted );
intel->alloc.irq_emitted = 0;
}
/**
* Check if we need to rotate/warp the front color buffer to the
* rotated screen. We generally need to do this when we get a glFlush
* or glFinish after drawing to the front color buffer.
*/
static void
intelCheckFrontRotate(GLcontext *ctx)
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
if (intel->ctx.DrawBuffer->_ColorDrawBufferMask[0] == BUFFER_BIT_FRONT_LEFT) {
intelScreenPrivate *screen = intel->intelScreen;
if (screen->current_rotation != 0) {
__DRIdrawablePrivate *dPriv = intel->driDrawable;
intelRotateWindow(intel, dPriv, BUFFER_BIT_FRONT_LEFT);
}
}
}
/**
* NOT directly called via glFlush.
*/
void intelFlush( GLcontext *ctx )
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
if (intel->Fallback)
_swrast_flush( ctx );
INTEL_FIREVERTICES( intel );
if (intel->batch.size != intel->batch.space)
intelFlushBatch( intel, GL_FALSE );
}
/**
* Called via glFlush.
*/
void intelglFlush( GLcontext *ctx )
{
intelFlush(ctx);
intelCheckFrontRotate(ctx);
}
void intelFinish( GLcontext *ctx )
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
intelFlush( ctx );
intelWaitForIdle( intel );
intelCheckFrontRotate(ctx);
}
void intelClear(GLcontext *ctx, GLbitfield mask, GLboolean all,
GLint cx, GLint cy, GLint cw, GLint ch)
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
const GLuint colorMask = *((GLuint *) &ctx->Color.ColorMask);
GLbitfield tri_mask = 0;
GLbitfield blit_mask = 0;
GLbitfield swrast_mask = 0;
if (0)
fprintf(stderr, "%s\n", __FUNCTION__);
/* Take care of cliprects, which are handled differently for
* clears, etc.
*/
intelFlush( &intel->ctx );
if (mask & BUFFER_BIT_FRONT_LEFT) {
if (colorMask == ~0) {
blit_mask |= BUFFER_BIT_FRONT_LEFT;
}
else {
tri_mask |= BUFFER_BIT_FRONT_LEFT;
}
}
if (mask & BUFFER_BIT_BACK_LEFT) {
if (colorMask == ~0) {
blit_mask |= BUFFER_BIT_BACK_LEFT;
}
else {
tri_mask |= BUFFER_BIT_BACK_LEFT;
}
}
if (mask & BUFFER_BIT_DEPTH) {
blit_mask |= BUFFER_BIT_DEPTH;
}
if (mask & BUFFER_BIT_STENCIL) {
if (!intel->hw_stencil) {
swrast_mask |= BUFFER_BIT_STENCIL;
}
else if ((ctx->Stencil.WriteMask[0] & 0xff) != 0xff) {
tri_mask |= BUFFER_BIT_STENCIL;
}
else {
blit_mask |= BUFFER_BIT_STENCIL;
}
}
swrast_mask |= (mask & BUFFER_BIT_ACCUM);
if (blit_mask)
intelClearWithBlit( ctx, blit_mask, all, cx, cy, cw, ch );
if (tri_mask)
intel->vtbl.clear_with_tris( intel, tri_mask, all, cx, cy, cw, ch);
if (swrast_mask)
_swrast_Clear( ctx, swrast_mask, all, cx, cy, cw, ch );
}
void
intelRotateWindow(intelContextPtr intel, __DRIdrawablePrivate *dPriv,
GLuint srcBuffer)
{
if (intel->vtbl.rotate_window) {
intel->vtbl.rotate_window(intel, dPriv, srcBuffer);
}
}
void *intelAllocateAGP( intelContextPtr intel, GLsizei size )
{
int region_offset;
drmI830MemAlloc alloc;
int ret;
if (0)
fprintf(stderr, "%s: %d bytes\n", __FUNCTION__, size);
alloc.region = I830_MEM_REGION_AGP;
alloc.alignment = 0;
alloc.size = size;
alloc.region_offset = &region_offset;
LOCK_HARDWARE(intel);
/* Make sure the global heap is initialized
*/
if (intel->texture_heaps[0])
driAgeTextures( intel->texture_heaps[0] );
ret = drmCommandWriteRead( intel->driFd,
DRM_I830_ALLOC,
&alloc, sizeof(alloc));
if (ret) {
fprintf(stderr, "%s: DRM_I830_ALLOC ret %d\n", __FUNCTION__, ret);
UNLOCK_HARDWARE(intel);
return NULL;
}
if (0)
fprintf(stderr, "%s: allocated %d bytes\n", __FUNCTION__, size);
/* Need to propogate this information (agp memory in use) to our
* local texture lru. The kernel has already updated the global
* lru. An alternative would have been to allocate memory the
* usual way and then notify the kernel to pin the allocation.
*/
if (intel->texture_heaps[0])
driAgeTextures( intel->texture_heaps[0] );
UNLOCK_HARDWARE(intel);
return (void *)((char *)intel->intelScreen->tex.map + region_offset);
}
void intelFreeAGP( intelContextPtr intel, void *pointer )
{
int region_offset;
drmI830MemFree memfree;
int ret;
region_offset = (char *)pointer - (char *)intel->intelScreen->tex.map;
if (region_offset < 0 ||
region_offset > intel->intelScreen->tex.size) {
fprintf(stderr, "offset %d outside range 0..%d\n", region_offset,
intel->intelScreen->tex.size);
return;
}
memfree.region = I830_MEM_REGION_AGP;
memfree.region_offset = region_offset;
ret = drmCommandWrite( intel->driFd,
DRM_I830_FREE,
&memfree, sizeof(memfree));
if (ret)
fprintf(stderr, "%s: DRM_I830_FREE ret %d\n", __FUNCTION__, ret);
}
/* This version of AllocateMemoryMESA allocates only agp memory, and
* only does so after the point at which the driver has been
* initialized.
*
* Theoretically a valid context isn't required. However, in this
* implementation, it is, as I'm using the hardware lock to protect
* the kernel data structures, and the current context to get the
* device fd.
*/
void *intelAllocateMemoryMESA(__DRInativeDisplay *dpy, int scrn,
GLsizei size, GLfloat readfreq,
GLfloat writefreq, GLfloat priority)
{
GET_CURRENT_CONTEXT(ctx);
if (INTEL_DEBUG & DEBUG_IOCTL)
fprintf(stderr, "%s sz %d %f/%f/%f\n", __FUNCTION__, size, readfreq,
writefreq, priority);
if (getenv("INTEL_NO_ALLOC"))
return NULL;
if (!ctx || INTEL_CONTEXT(ctx) == 0)
return NULL;
return intelAllocateAGP( INTEL_CONTEXT(ctx), size );
}
/* Called via glXFreeMemoryMESA() */
void intelFreeMemoryMESA(__DRInativeDisplay *dpy, int scrn, GLvoid *pointer)
{
GET_CURRENT_CONTEXT(ctx);
if (INTEL_DEBUG & DEBUG_IOCTL)
fprintf(stderr, "%s %p\n", __FUNCTION__, pointer);
if (!ctx || INTEL_CONTEXT(ctx) == 0) {
fprintf(stderr, "%s: no context\n", __FUNCTION__);
return;
}
intelFreeAGP( INTEL_CONTEXT(ctx), pointer );
}
/* Called via glXGetMemoryOffsetMESA()
*
* Returns offset of pointer from the start of agp aperture.
*/
GLuint intelGetMemoryOffsetMESA(__DRInativeDisplay *dpy, int scrn,
const GLvoid *pointer)
{
GET_CURRENT_CONTEXT(ctx);
intelContextPtr intel;
if (!ctx || !(intel = INTEL_CONTEXT(ctx)) ) {
fprintf(stderr, "%s: no context\n", __FUNCTION__);
return ~0;
}
if (!intelIsAgpMemory( intel, pointer, 0 ))
return ~0;
return intelAgpOffsetFromVirtual( intel, pointer );
}
GLboolean intelIsAgpMemory( intelContextPtr intel, const GLvoid *pointer,
GLint size )
{
int offset = (char *)pointer - (char *)intel->intelScreen->tex.map;
int valid = (size >= 0 &&
offset >= 0 &&
offset + size < intel->intelScreen->tex.size);
if (INTEL_DEBUG & DEBUG_IOCTL)
fprintf(stderr, "intelIsAgpMemory( %p ) : %d\n", pointer, valid );
return valid;
}
GLuint intelAgpOffsetFromVirtual( intelContextPtr intel, const GLvoid *pointer )
{
int offset = (char *)pointer - (char *)intel->intelScreen->tex.map;
if (offset < 0 || offset > intel->intelScreen->tex.size)
return ~0;
else
return intel->intelScreen->tex.offset + offset;
}
/* Flip the front & back buffes
*/
void intelPageFlip( const __DRIdrawablePrivate *dPriv )
{
#if 0
intelContextPtr intel;
int tmp, ret;
if (INTEL_DEBUG & DEBUG_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
assert(dPriv);
assert(dPriv->driContextPriv);
assert(dPriv->driContextPriv->driverPrivate);
intel = (intelContextPtr) dPriv->driContextPriv->driverPrivate;
intelFlush( &intel->ctx );
LOCK_HARDWARE( intel );
if (dPriv->pClipRects) {
*(drm_clip_rect_t *)intel->sarea->boxes = dPriv->pClipRects[0];
intel->sarea->nbox = 1;
}
ret = drmCommandNone(intel->driFd, DRM_I830_FLIP);
if (ret) {
fprintf(stderr, "%s: %d\n", __FUNCTION__, ret);
UNLOCK_HARDWARE( intel );
exit(1);
}
tmp = intel->sarea->last_enqueue;
intelRefillBatchLocked( intel );
UNLOCK_HARDWARE( intel );
intelSetDrawBuffer( &intel->ctx, intel->ctx.Color.DriverDrawBuffer );
#endif
/* FIXME: use hardware contexts to avoid 'losing' hardware after
* each buffer flush.
*/
intel->vtbl.lost_hardware(intel);
}

View File

@@ -30,44 +30,11 @@
#include "intel_context.h"
extern void intelWaitAgeLocked( intelContextPtr intel, int age, GLboolean unlock );
void intelWaitIrq(struct intel_context *intel, int seq);
int intelEmitIrqLocked(struct intel_context *intel);
extern void intelClear(GLcontext *ctx, GLbitfield mask, GLboolean all,
GLint cx, GLint cy, GLint cw, GLint ch);
extern void intelPageFlip( const __DRIdrawablePrivate *dpriv );
extern void intelRotateWindow(intelContextPtr intel,
__DRIdrawablePrivate *dPriv, GLuint srcBuffer);
extern void intelWaitForIdle( intelContextPtr intel );
extern void intelFlushBatch( intelContextPtr intel, GLboolean refill );
extern void intelFlushBatchLocked( intelContextPtr intel,
GLboolean ignore_cliprects,
GLboolean refill,
GLboolean allow_unlock);
extern void intelRefillBatchLocked( intelContextPtr intel, GLboolean allow_unlock );
extern void intelFinish( GLcontext *ctx );
extern void intelFlush( GLcontext *ctx );
extern void intelglFlush( GLcontext *ctx );
extern void *intelAllocateAGP( intelContextPtr intel, GLsizei size );
extern void intelFreeAGP( intelContextPtr intel, void *pointer );
extern void *intelAllocateMemoryMESA( __DRInativeDisplay *dpy, int scrn,
GLsizei size, GLfloat readfreq,
GLfloat writefreq, GLfloat priority );
extern void intelFreeMemoryMESA( __DRInativeDisplay *dpy, int scrn,
GLvoid *pointer );
extern GLuint intelGetMemoryOffsetMESA( __DRInativeDisplay *dpy, int scrn, const GLvoid *pointer );
extern GLboolean intelIsAgpMemory( intelContextPtr intel, const GLvoid *pointer,
GLint size );
extern GLuint intelAgpOffsetFromVirtual( intelContextPtr intel, const GLvoid *p );
extern void intelWaitIrq( intelContextPtr intel, int seq );
extern u_int32_t intelGetLastFrame (intelContextPtr intel);
extern int intelEmitIrqLocked( intelContextPtr intel );
void intel_batch_ioctl(struct intel_context *intel,
GLuint start_offset,
GLuint used,
GLboolean ignore_cliprects, GLboolean allow_unlock);
#endif

View File

@@ -0,0 +1,341 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "enums.h"
#define FILE_DEBUG_FLAG DEBUG_MIPTREE
static GLenum
target_to_target(GLenum target)
{
switch (target) {
case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
return GL_TEXTURE_CUBE_MAP_ARB;
default:
return target;
}
}
struct intel_mipmap_tree *
intel_miptree_create(struct intel_context *intel,
GLenum target,
GLenum internal_format,
GLuint first_level,
GLuint last_level,
GLuint width0,
GLuint height0,
GLuint depth0, GLuint cpp, GLboolean compressed)
{
GLboolean ok;
struct intel_mipmap_tree *mt = calloc(sizeof(*mt), 1);
DBG("%s target %s format %s level %d..%d\n", __FUNCTION__,
_mesa_lookup_enum_by_nr(target),
_mesa_lookup_enum_by_nr(internal_format), first_level, last_level);
mt->target = target_to_target(target);
mt->internal_format = internal_format;
mt->first_level = first_level;
mt->last_level = last_level;
mt->width0 = width0;
mt->height0 = height0;
mt->depth0 = depth0;
mt->cpp = compressed ? 2 : cpp;
mt->compressed = compressed;
mt->refcount = 0; /*Allow for unused miptrees */
switch (intel->intelScreen->deviceID) {
case PCI_CHIP_I945_G:
case PCI_CHIP_I945_GM:
ok = i945_miptree_layout(mt);
break;
case PCI_CHIP_I915_G:
case PCI_CHIP_I915_GM:
case PCI_CHIP_I830_M:
case PCI_CHIP_I855_GM:
case PCI_CHIP_I865_G:
default:
/* All the i830 chips and the i915 use this layout:
*/
ok = i915_miptree_layout(mt);
break;
}
if (ok)
mt->region = intel_region_alloc(intel->intelScreen,
mt->cpp, mt->pitch, mt->total_height);
if (!mt->region) {
free(mt);
return NULL;
}
return mt;
}
void
intel_miptree_reference(struct intel_mipmap_tree **dst,
struct intel_mipmap_tree *src)
{
src->refcount++;
*dst = src;
DBG("%s %p refcount now %d\n", __FUNCTION__, src, src->refcount);
}
void
intel_miptree_release(struct intel_context *intel,
struct intel_mipmap_tree **mt)
{
if (!*mt)
return;
DBG("%s %p refcount will be %d\n", __FUNCTION__, *mt, (*mt)->refcount - 1);
if (--(*mt)->refcount <= 0) {
GLuint i;
DBG("%s deleting %p\n", __FUNCTION__, *mt);
intel_region_release(intel->intelScreen, &((*mt)->region));
for (i = 0; i < MAX_TEXTURE_LEVELS; i++)
if ((*mt)->level[i].image_offset)
free((*mt)->level[i].image_offset);
free(*mt);
}
*mt = NULL;
}
/* Can the image be pulled into a unified mipmap tree. This mirrors
* the completeness test in a lot of ways.
*
* Not sure whether I want to pass gl_texture_image here.
*/
GLboolean
intel_miptree_match_image(struct intel_mipmap_tree *mt,
struct gl_texture_image *image,
GLuint face, GLuint level)
{
/* Images with borders are never pulled into mipmap trees.
*/
if (image->Border)
return GL_FALSE;
if (image->InternalFormat != mt->internal_format ||
image->IsCompressed != mt->compressed)
return GL_FALSE;
/* Test image dimensions against the base level image adjusted for
* minification. This will also catch images not present in the
* tree, changed targets, etc.
*/
if (image->Width != mt->level[level].width ||
image->Height != mt->level[level].height ||
image->Depth != mt->level[level].depth)
return GL_FALSE;
return GL_TRUE;
}
void
intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
GLuint level,
GLuint nr_images,
GLuint x, GLuint y, GLuint w, GLuint h, GLuint d)
{
mt->level[level].width = w;
mt->level[level].height = h;
mt->level[level].depth = d;
mt->level[level].level_offset = (x + y * mt->pitch) * mt->cpp;
mt->level[level].nr_images = nr_images;
DBG("%s level %d size: %d,%d,%d offset %d,%d (0x%x)\n", __FUNCTION__,
level, w, h, d, x, y, mt->level[level].level_offset);
/* Not sure when this would happen, but anyway:
*/
if (mt->level[level].image_offset) {
free(mt->level[level].image_offset);
mt->level[level].image_offset = NULL;
}
assert(nr_images);
mt->level[level].image_offset = malloc(nr_images * sizeof(GLuint));
mt->level[level].image_offset[0] = 0;
}
void
intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
GLuint level, GLuint img, GLuint x, GLuint y)
{
if (img == 0 && level == 0)
assert(x == 0 && y == 0);
assert(img < mt->level[level].nr_images);
mt->level[level].image_offset[img] = (x + y * mt->pitch);
DBG("%s level %d img %d pos %d,%d image_offset %x\n",
__FUNCTION__, level, img, x, y, mt->level[level].image_offset[img]);
}
/* Although we use the image_offset[] array to store relative offsets
* to cube faces, Mesa doesn't know anything about this and expects
* each cube face to be treated as a separate image.
*
* These functions present that view to mesa:
*/
const GLuint *
intel_miptree_depth_offsets(struct intel_mipmap_tree *mt, GLuint level)
{
static const GLuint zero = 0;
if (mt->target != GL_TEXTURE_3D || mt->level[level].nr_images == 1)
return &zero;
else
return mt->level[level].image_offset;
}
GLuint
intel_miptree_image_offset(struct intel_mipmap_tree * mt,
GLuint face, GLuint level)
{
if (mt->target == GL_TEXTURE_CUBE_MAP_ARB)
return (mt->level[level].level_offset +
mt->level[level].image_offset[face] * mt->cpp);
else
return mt->level[level].level_offset;
}
/**
* Map a teximage in a mipmap tree.
* \param row_stride returns row stride in bytes
* \param image_stride returns image stride in bytes (for 3D textures).
* \return address of mapping
*/
GLubyte *
intel_miptree_image_map(struct intel_context * intel,
struct intel_mipmap_tree * mt,
GLuint face,
GLuint level,
GLuint * row_stride, GLuint * image_offsets)
{
DBG("%s \n", __FUNCTION__);
if (row_stride)
*row_stride = mt->pitch * mt->cpp;
if (image_offsets)
memcpy(image_offsets, mt->level[level].image_offset,
mt->level[level].depth * sizeof(GLuint));
return (intel_region_map(intel->intelScreen, mt->region) +
intel_miptree_image_offset(mt, face, level));
}
void
intel_miptree_image_unmap(struct intel_context *intel,
struct intel_mipmap_tree *mt)
{
DBG("%s\n", __FUNCTION__);
intel_region_unmap(intel->intelScreen, mt->region);
}
/* Upload data for a particular image.
*/
void
intel_miptree_image_data(struct intel_context *intel,
struct intel_mipmap_tree *dst,
GLuint face,
GLuint level,
void *src,
GLuint src_row_pitch, GLuint src_image_pitch)
{
GLuint depth = dst->level[level].depth;
GLuint dst_offset = intel_miptree_image_offset(dst, face, level);
const GLuint *dst_depth_offset = intel_miptree_depth_offsets(dst, level);
GLuint i;
DBG("%s\n", __FUNCTION__);
for (i = 0; i < depth; i++) {
intel_region_data(intel->intelScreen, dst->region, dst_offset + dst_depth_offset[i], 0, 0, src, src_row_pitch, 0, 0, /* source x,y */
dst->level[level].width, dst->level[level].height);
src += src_image_pitch;
}
}
/* Copy mipmap image between trees
*/
void
intel_miptree_image_copy(struct intel_context *intel,
struct intel_mipmap_tree *dst,
GLuint face, GLuint level,
struct intel_mipmap_tree *src)
{
GLuint width = src->level[level].width;
GLuint height = src->level[level].height;
GLuint depth = src->level[level].depth;
GLuint dst_offset = intel_miptree_image_offset(dst, face, level);
GLuint src_offset = intel_miptree_image_offset(src, face, level);
const GLuint *dst_depth_offset = intel_miptree_depth_offsets(dst, level);
const GLuint *src_depth_offset = intel_miptree_depth_offsets(src, level);
GLuint i;
for (i = 0; i < depth; i++) {
intel_region_copy(intel->intelScreen,
dst->region, dst_offset + dst_depth_offset[i],
0,
0,
src->region, src_offset + src_depth_offset[i],
0, 0, width, height);
}
}

View File

@@ -0,0 +1,198 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_MIPMAP_TREE_H
#define INTEL_MIPMAP_TREE_H
#include "intel_regions.h"
/* A layer on top of the intel_regions code which adds:
*
* - Code to size and layout a region to hold a set of mipmaps.
* - Query to determine if a new image fits in an existing tree.
* - More refcounting
* - maybe able to remove refcounting from intel_region?
* - ?
*
* The fixed mipmap layout of intel hardware where one offset
* specifies the position of all images in a mipmap hierachy
* complicates the implementation of GL texture image commands,
* compared to hardware where each image is specified with an
* independent offset.
*
* In an ideal world, each texture object would be associated with a
* single bufmgr buffer or 2d intel_region, and all the images within
* the texture object would slot into the tree as they arrive. The
* reality can be a little messier, as images can arrive from the user
* with sizes that don't fit in the existing tree, or in an order
* where the tree layout cannot be guessed immediately.
*
* This structure encodes an idealized mipmap tree. The GL image
* commands build these where possible, otherwise store the images in
* temporary system buffers.
*/
/**
* Describes the location of each texture image within a texture region.
*/
struct intel_mipmap_level
{
GLuint level_offset;
GLuint width;
GLuint height;
GLuint depth;
GLuint nr_images;
/* Explicitly store the offset of each image for each cube face or
* depth value. Pretty much have to accept that hardware formats
* are going to be so diverse that there is no unified way to
* compute the offsets of depth/cube images within a mipmap level,
* so have to store them as a lookup table:
*/
GLuint *image_offset;
};
struct intel_mipmap_tree
{
/* Effectively the key:
*/
GLenum target;
GLenum internal_format;
GLuint first_level;
GLuint last_level;
GLuint width0, height0, depth0; /**< Level zero image dimensions */
GLuint cpp;
GLboolean compressed;
/* Derived from the above:
*/
GLuint pitch;
GLuint depth_pitch; /* per-image on i945? */
GLuint total_height;
/* Includes image offset tables:
*/
struct intel_mipmap_level level[MAX_TEXTURE_LEVELS];
/* The data is held here:
*/
struct intel_region *region;
/* These are also refcounted:
*/
GLuint refcount;
};
struct intel_mipmap_tree *intel_miptree_create(struct intel_context *intel,
GLenum target,
GLenum internal_format,
GLuint first_level,
GLuint last_level,
GLuint width0,
GLuint height0,
GLuint depth0,
GLuint cpp,
GLboolean compressed);
void intel_miptree_reference(struct intel_mipmap_tree **dst,
struct intel_mipmap_tree *src);
void intel_miptree_release(struct intel_context *intel,
struct intel_mipmap_tree **mt);
/* Check if an image fits an existing mipmap tree layout
*/
GLboolean intel_miptree_match_image(struct intel_mipmap_tree *mt,
struct gl_texture_image *image,
GLuint face, GLuint level);
/* Return a pointer to an image within a tree. Return image stride as
* well.
*/
GLubyte *intel_miptree_image_map(struct intel_context *intel,
struct intel_mipmap_tree *mt,
GLuint face,
GLuint level,
GLuint * row_stride, GLuint * image_stride);
void intel_miptree_image_unmap(struct intel_context *intel,
struct intel_mipmap_tree *mt);
/* Return the linear offset of an image relative to the start of the
* tree:
*/
GLuint intel_miptree_image_offset(struct intel_mipmap_tree *mt,
GLuint face, GLuint level);
/* Return pointers to each 2d slice within an image. Indexed by depth
* value.
*/
const GLuint *intel_miptree_depth_offsets(struct intel_mipmap_tree *mt,
GLuint level);
void intel_miptree_set_level_info(struct intel_mipmap_tree *mt,
GLuint level,
GLuint nr_images,
GLuint x, GLuint y,
GLuint w, GLuint h, GLuint d);
void intel_miptree_set_image_offset(struct intel_mipmap_tree *mt,
GLuint level,
GLuint img, GLuint x, GLuint y);
/* Upload an image into a tree
*/
void intel_miptree_image_data(struct intel_context *intel,
struct intel_mipmap_tree *dst,
GLuint face,
GLuint level,
void *src,
GLuint src_row_pitch, GLuint src_image_pitch);
/* Copy an image between two trees
*/
void intel_miptree_image_copy(struct intel_context *intel,
struct intel_mipmap_tree *dst,
GLuint face, GLuint level,
struct intel_mipmap_tree *src);
/* i915_mipmap_tree.c:
*/
GLboolean i915_miptree_layout(struct intel_mipmap_tree *mt);
GLboolean i945_miptree_layout(struct intel_mipmap_tree *mt);
#endif

View File

@@ -1,6 +1,6 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
@@ -12,7 +12,7 @@
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* next paragraph) shall be included in all copies or substantial portionsalloc
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
@@ -25,487 +25,95 @@
*
**************************************************************************/
#include "glheader.h"
#include "enums.h"
#include "mtypes.h"
#include "macros.h"
#include "state.h"
#include "swrast/swrast.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_ioctl.h"
#include "intel_batchbuffer.h"
static GLboolean
check_color( const GLcontext *ctx, GLenum type, GLenum format,
const struct gl_pixelstore_attrib *packing,
const void *pixels, GLint sz, GLint pitch )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
GLuint cpp = intel->intelScreen->cpp;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
if ( (pitch & 63) ||
ctx->_ImageTransferState ||
packing->SwapBytes ||
packing->LsbFirst) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: failed 1\n", __FUNCTION__);
return GL_FALSE;
}
if ( type == GL_UNSIGNED_INT_8_8_8_8_REV &&
cpp == 4 &&
format == GL_BGRA ) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: passed 2\n", __FUNCTION__);
return GL_TRUE;
}
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: failed\n", __FUNCTION__);
return GL_FALSE;
}
static GLboolean
check_color_per_fragment_ops( const GLcontext *ctx )
{
int result;
result = (!( ctx->Color.AlphaEnabled ||
ctx->Depth.Test ||
ctx->Fog.Enabled ||
ctx->Scissor.Enabled ||
ctx->Stencil.Enabled ||
!ctx->Color.ColorMask[0] ||
!ctx->Color.ColorMask[1] ||
!ctx->Color.ColorMask[2] ||
!ctx->Color.ColorMask[3] ||
ctx->Color.ColorLogicOpEnabled ||
ctx->Texture._EnabledUnits
) &&
ctx->Current.RasterPosValid);
return result;
}
#include "intel_pixel.h"
#include "intel_regions.h"
/**
* Clip the given rectangle against the buffer's bounds (including scissor).
* \param size returns the
* \return GL_TRUE if any pixels remain, GL_FALSE if totally clipped.
* Check if any fragment operations are in effect which might effect
* glDraw/CopyPixels.
*/
GLboolean
intel_check_blit_fragment_ops(GLcontext * ctx)
{
if (ctx->NewState)
_mesa_update_state(ctx);
/* XXX Note: Scissor could be done with the blitter:
*/
return !(ctx->_ImageTransferState ||
ctx->Color.AlphaEnabled ||
ctx->Depth.Test ||
ctx->Fog.Enabled ||
ctx->Scissor.Enabled ||
ctx->Stencil.Enabled ||
!ctx->Color.ColorMask[0] ||
!ctx->Color.ColorMask[1] ||
!ctx->Color.ColorMask[2] ||
!ctx->Color.ColorMask[3] ||
ctx->Color.ColorLogicOpEnabled ||
ctx->Texture._EnabledUnits || ctx->FragmentProgram._Enabled);
}
GLboolean
intel_check_meta_tex_fragment_ops(GLcontext * ctx)
{
if (ctx->NewState)
_mesa_update_state(ctx);
/* Some of _ImageTransferState (scale, bias) could be done with
* fragment programs on i915.
*/
return !(ctx->_ImageTransferState || ctx->Fog.Enabled || /* not done yet */
ctx->Texture._EnabledUnits || ctx->FragmentProgram._Enabled);
}
/* The intel_region struct doesn't really do enough to capture the
* format of the pixels in the region. For now this code assumes that
* the region is a display surface and hence is either ARGB8888 or
* RGB565.
* XXX FBO: If we'd pass in the intel_renderbuffer instead of region, we'd
* know the buffer's pixel format.
*
* XXX Replace this with _mesa_clip_drawpixels() and _mesa_clip_readpixels()
* from Mesa 6.4. We shouldn't apply scissor for ReadPixels.
* \param format as given to glDraw/ReadPixels
* \param type as given to glDraw/ReadPixels
*/
static GLboolean
clip_pixelrect( const GLcontext *ctx,
const GLframebuffer *buffer,
GLint *x, GLint *y,
GLsizei *width, GLsizei *height)
GLboolean
intel_check_blit_format(struct intel_region * region,
GLenum format, GLenum type)
{
/* left clipping */
if (*x < buffer->_Xmin) {
*width -= (buffer->_Xmin - *x);
*x = buffer->_Xmin;
}
/* right clipping */
if (*x + *width > buffer->_Xmax)
*width -= (*x + *width - buffer->_Xmax - 1);
if (*width <= 0)
return GL_FALSE;
/* bottom clipping */
if (*y < buffer->_Ymin) {
*height -= (buffer->_Ymin - *y);
*y = buffer->_Ymin;
}
/* top clipping */
if (*y + *height > buffer->_Ymax)
*height -= (*y + *height - buffer->_Ymax - 1);
if (*height <= 0)
return GL_FALSE;
return GL_TRUE;
}
/**
* Compute intersection of a clipping rectangle and pixel rectangle,
* returning results in x/y/w/hOut vars.
* \return GL_TRUE if there's intersection, GL_FALSE if disjoint.
*/
static INLINE GLboolean
intersect_region(const drm_clip_rect_t *box,
GLint x, GLint y, GLsizei width, GLsizei height,
GLint *xOut, GLint *yOut, GLint *wOut, GLint *hOut)
{
GLint bx = box->x1;
GLint by = box->y1;
GLint bw = box->x2 - bx;
GLint bh = box->y2 - by;
if (bx < x) bw -= x - bx, bx = x;
if (by < y) bh -= y - by, by = y;
if (bx + bw > x + width) bw = x + width - bx;
if (by + bh > y + height) bh = y + height - by;
*xOut = bx;
*yOut = by;
*wOut = bw;
*hOut = bh;
if (bw <= 0) return GL_FALSE;
if (bh <= 0) return GL_FALSE;
return GL_TRUE;
}
static GLboolean
intelTryReadPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack,
GLvoid *pixels )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
GLint size = 0; /* not really used */
GLint pitch = pack->RowLength ? pack->RowLength : width;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
/* Only accelerate reading to agp buffers.
*/
if ( !intelIsAgpMemory(intel, pixels,
pitch * height * intel->intelScreen->cpp ) ) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: dest not agp\n", __FUNCTION__);
return GL_FALSE;
}
/* Need GL_PACK_INVERT_MESA to cope with upsidedown results from
* blitter:
*/
if (!pack->Invert) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: MESA_PACK_INVERT not set\n", __FUNCTION__);
return GL_FALSE;
}
if (!check_color(ctx, type, format, pack, pixels, size, pitch))
return GL_FALSE;
switch ( intel->intelScreen->cpp ) {
case 4:
break;
default:
return GL_FALSE;
}
/* Although the blits go on the command buffer, need to do this and
* fire with lock held to guarentee cliprects and drawing offset are
* correct.
*
* This is an unusual situation however, as the code which flushes
* a full command buffer expects to be called unlocked. As a
* workaround, immediately flush the buffer on aquiring the lock.
*/
intelFlush( &intel->ctx );
LOCK_HARDWARE( intel );
{
__DRIdrawablePrivate *dPriv = intel->driDrawable;
int nbox = dPriv->numClipRects;
int src_offset = intel->readRegion->offset;
int src_pitch = intel->intelScreen->front.pitch;
int dst_offset = intelAgpOffsetFromVirtual( intel, pixels);
drm_clip_rect_t *box = dPriv->pClipRects;
int i;
assert(dst_offset != ~0); /* should have been caught above */
if (!clip_pixelrect(ctx, ctx->ReadBuffer, &x, &y, &width, &height)) {
UNLOCK_HARDWARE( intel );
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s totally clipped -- nothing to do\n",
__FUNCTION__);
return GL_TRUE;
}
/* convert to screen coords (y=0=top) */
y = dPriv->h - y - height;
x += dPriv->x;
y += dPriv->y;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "readpixel blit src_pitch %d dst_pitch %d\n",
src_pitch, pitch);
/* We don't really have to do window clipping for readpixels.
* The OpenGL spec says that pixels read from outside the
* visible window region (pixel ownership) have undefined value.
*/
for (i = 0 ; i < nbox ; i++)
{
GLint bx, by, bw, bh;
if (intersect_region(box+i, x, y, width, height,
&bx, &by, &bw, &bh)) {
intelEmitCopyBlitLocked( intel,
intel->intelScreen->cpp,
src_pitch, src_offset,
pitch, dst_offset,
bx, by,
bx - x, by - y,
bw, bh );
}
}
}
UNLOCK_HARDWARE( intel );
intelFinish( &intel->ctx );
return GL_TRUE;
}
static void
intelReadPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack,
GLvoid *pixels )
{
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
if (!intelTryReadPixels( ctx, x, y, width, height, format, type, pack,
pixels))
_swrast_ReadPixels( ctx, x, y, width, height, format, type, pack,
pixels);
}
static void do_draw_pix( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLint pitch,
const void *pixels,
GLuint dest )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
__DRIdrawablePrivate *dPriv = intel->driDrawable;
drm_clip_rect_t *box = dPriv->pClipRects;
int nbox = dPriv->numClipRects;
int i;
int src_offset = intelAgpOffsetFromVirtual( intel, pixels);
int src_pitch = pitch;
assert(src_offset != ~0); /* should be caught earlier */
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
intelFlush( &intel->ctx );
LOCK_HARDWARE( intel );
if (ctx->DrawBuffer)
{
y -= height; /* cope with pixel zoom */
if (!clip_pixelrect(ctx, ctx->DrawBuffer,
&x, &y, &width, &height)) {
UNLOCK_HARDWARE( intel );
return;
}
y = dPriv->h - y - height; /* convert from gl to hardware coords */
x += dPriv->x;
y += dPriv->y;
for (i = 0 ; i < nbox ; i++ )
{
GLint bx, by, bw, bh;
if (intersect_region(box + i, x, y, width, height,
&bx, &by, &bw, &bh)) {
intelEmitCopyBlitLocked( intel,
intel->intelScreen->cpp,
src_pitch, src_offset,
intel->intelScreen->front.pitch,
intel->drawRegion->offset,
bx - x, by - y,
bx, by,
bw, bh );
}
}
}
UNLOCK_HARDWARE( intel );
intelFinish( &intel->ctx );
}
static GLboolean
intelTryDrawPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid *pixels )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
GLint pitch = unpack->RowLength ? unpack->RowLength : width;
GLuint dest;
GLuint cpp = intel->intelScreen->cpp;
GLint size = width * pitch * cpp;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
switch (format) {
case GL_RGB:
case GL_RGBA:
case GL_BGRA:
dest = intel->drawRegion->offset;
/* Planemask doesn't have full support in blits.
*/
if (!ctx->Color.ColorMask[RCOMP] ||
!ctx->Color.ColorMask[GCOMP] ||
!ctx->Color.ColorMask[BCOMP] ||
!ctx->Color.ColorMask[ACOMP]) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: planemask\n", __FUNCTION__);
return GL_FALSE;
}
/* Can't do conversions on agp reads/draws.
*/
if ( !intelIsAgpMemory( intel, pixels, size ) ) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: not agp memory\n", __FUNCTION__);
return GL_FALSE;
}
if (!check_color(ctx, type, format, unpack, pixels, size, pitch)) {
return GL_FALSE;
}
if (!check_color_per_fragment_ops(ctx)) {
return GL_FALSE;
}
if (ctx->Pixel.ZoomX != 1.0F ||
ctx->Pixel.ZoomY != -1.0F)
return GL_FALSE;
break;
default:
return GL_FALSE;
}
if ( intelIsAgpMemory(intel, pixels, size) )
{
do_draw_pix( ctx, x, y, width, height, pitch, pixels, dest );
if (region->cpp == 4 &&
(type == GL_UNSIGNED_INT_8_8_8_8_REV ||
type == GL_UNSIGNED_BYTE) && format == GL_BGRA) {
return GL_TRUE;
}
else if (0)
{
/* Pixels is in regular memory -- get dma buffers and perform
* upload through them. No point doing this for regular uploads
* but once we remove some of the restrictions above (colormask,
* pixelformat conversion, zoom?, etc), this could be a win.
*/
if (region->cpp == 2 &&
type == GL_UNSIGNED_SHORT_5_6_5_REV && format == GL_BGR) {
return GL_TRUE;
}
else
return GL_FALSE;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: bad format for blit (cpp %d, type %s format %s)\n",
__FUNCTION__, region->cpp,
_mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
return GL_FALSE;
}
static void
intelDrawPixels( GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid *pixels )
void
intelInitPixelFuncs(struct dd_function_table *functions)
{
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
if (!intelTryDrawPixels( ctx, x, y, width, height, format, type,
unpack, pixels ))
_swrast_DrawPixels( ctx, x, y, width, height, format, type,
unpack, pixels );
}
/**
* Implement glCopyPixels for the front color buffer (or back buffer Pixmap)
* for the color buffer. Don't support zooming, pixel transfer, etc.
* We do support copying from one window to another, ala glXMakeCurrentRead.
*/
static void
intelCopyPixels( GLcontext *ctx,
GLint srcx, GLint srcy, GLsizei width, GLsizei height,
GLint destx, GLint desty, GLenum type )
{
#if 0
const XMesaContext xmesa = XMESA_CONTEXT(ctx);
const SWcontext *swrast = SWRAST_CONTEXT( ctx );
XMesaDisplay *dpy = xmesa->xm_visual->display;
const XMesaDrawable drawBuffer = xmesa->xm_draw_buffer->buffer;
const XMesaDrawable readBuffer = xmesa->xm_read_buffer->buffer;
const XMesaGC gc = xmesa->xm_draw_buffer->gc;
ASSERT(dpy);
ASSERT(gc);
if (drawBuffer && /* buffer != 0 means it's a Window or Pixmap */
readBuffer &&
type == GL_COLOR &&
(swrast->_RasterMask & ~CLIP_BIT) == 0 && /* no blend, z-test, etc */
ctx->_ImageTransferState == 0 && /* no color tables, scale/bias, etc */
ctx->Pixel.ZoomX == 1.0 && /* no zooming */
ctx->Pixel.ZoomY == 1.0) {
/* Note: we don't do any special clipping work here. We could,
* but X will do it for us.
*/
srcy = FLIP(xmesa->xm_read_buffer, srcy) - height + 1;
desty = FLIP(xmesa->xm_draw_buffer, desty) - height + 1;
XCopyArea(dpy, readBuffer, drawBuffer, gc,
srcx, srcy, width, height, destx, desty);
}
#else
_swrast_CopyPixels(ctx, srcx, srcy, width, height, destx, desty, type );
#endif
}
void intelInitPixelFuncs( struct dd_function_table *functions )
{
/* Pixel path fallbacks.
*/
functions->Accum = _swrast_Accum;
functions->Bitmap = _swrast_Bitmap;
functions->CopyPixels = intelCopyPixels;
if (!getenv("INTEL_NO_BLITS")) {
functions->ReadPixels = intelReadPixels;
functions->DrawPixels = intelDrawPixels;
}
else {
functions->ReadPixels = _swrast_ReadPixels;
functions->DrawPixels = _swrast_DrawPixels;
}
functions->ReadPixels = intelReadPixels;
functions->DrawPixels = intelDrawPixels;
}

View File

@@ -0,0 +1,63 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_PIXEL_H
#define INTEL_PIXEL_H
#include "mtypes.h"
void intelInitPixelFuncs(struct dd_function_table *functions);
GLboolean intel_check_blit_fragment_ops(GLcontext * ctx);
GLboolean intel_check_meta_tex_fragment_ops(GLcontext * ctx);
GLboolean intel_check_blit_format(struct intel_region *region,
GLenum format, GLenum type);
void intelReadPixels(GLcontext * ctx,
GLint x, GLint y,
GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack,
GLvoid * pixels);
void intelDrawPixels(GLcontext * ctx,
GLint x, GLint y,
GLsizei width, GLsizei height,
GLenum format,
GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid * pixels);
void intelCopyPixels(GLcontext * ctx,
GLint srcx, GLint srcy,
GLsizei width, GLsizei height,
GLint destx, GLint desty, GLenum type);
#endif

View File

@@ -0,0 +1,380 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "glheader.h"
#include "enums.h"
#include "image.h"
#include "state.h"
#include "mtypes.h"
#include "macros.h"
#include "swrast/swrast.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_ioctl.h"
#include "intel_batchbuffer.h"
#include "intel_buffers.h"
#include "intel_blit.h"
#include "intel_regions.h"
#include "intel_tris.h"
#include "intel_pixel.h"
#define FILE_DEBUG_FLAG DEBUG_PIXEL
static struct intel_region *
copypix_src_region(struct intel_context *intel, GLenum type)
{
switch (type) {
case GL_COLOR:
return intel_readbuf_region(intel);
case GL_DEPTH:
/* Don't think this is really possible execpt at 16bpp, when we have no stencil.
*/
if (intel->intelScreen->depth_region &&
intel->intelScreen->depth_region->cpp == 2)
return intel->intelScreen->depth_region;
case GL_STENCIL:
/* Don't think this is really possible.
*/
break;
case GL_DEPTH_STENCIL_EXT:
/* Does it matter whether it is stencil/depth or depth/stencil?
*/
return intel->intelScreen->depth_region;
default:
break;
}
return NULL;
}
/**
* Check if any fragment operations are in effect which might effect
* glCopyPixels. Differs from intel_check_blit_fragment_ops in that
* we allow Scissor.
*/
static GLboolean
intel_check_copypixel_blit_fragment_ops(GLcontext * ctx)
{
if (ctx->NewState)
_mesa_update_state(ctx);
/* Could do logicop with the blitter:
*/
return !(ctx->_ImageTransferState ||
ctx->Color.AlphaEnabled ||
ctx->Depth.Test ||
ctx->Fog.Enabled ||
ctx->Stencil.Enabled ||
!ctx->Color.ColorMask[0] ||
!ctx->Color.ColorMask[1] ||
!ctx->Color.ColorMask[2] ||
!ctx->Color.ColorMask[3] ||
ctx->Color.ColorLogicOpEnabled ||
ctx->Texture._EnabledUnits ||
ctx->FragmentProgram._Enabled);
}
/* Doesn't work for overlapping regions. Could do a double copy or
* just fallback.
*/
static GLboolean
do_texture_copypixels(GLcontext * ctx,
GLint srcx, GLint srcy,
GLsizei width, GLsizei height,
GLint dstx, GLint dsty, GLenum type)
{
struct intel_context *intel = intel_context(ctx);
struct intel_region *dst = intel_drawbuf_region(intel);
struct intel_region *src = copypix_src_region(intel, type);
GLenum src_format;
GLenum src_type;
DBG("%s %d,%d %dx%d --> %d,%d\n", __FUNCTION__,
srcx, srcy, width, height, dstx, dsty);
if (!src || !dst || type != GL_COLOR)
return GL_FALSE;
/* Can't handle overlapping regions. Don't have sufficient control
* over rasterization to pull it off in-place. Punt on these for
* now.
*
* XXX: do a copy to a temporary.
*/
if (src->buffer == dst->buffer) {
drm_clip_rect_t srcbox;
drm_clip_rect_t dstbox;
drm_clip_rect_t tmp;
srcbox.x1 = srcx;
srcbox.y1 = srcy;
srcbox.x2 = srcx + width;
srcbox.y2 = srcy + height;
dstbox.x1 = dstx;
dstbox.y1 = dsty;
dstbox.x2 = dstx + width * ctx->Pixel.ZoomX;
dstbox.y2 = dsty + height * ctx->Pixel.ZoomY;
DBG("src %d,%d %d,%d\n", srcbox.x1, srcbox.y1, srcbox.x2, srcbox.y2);
DBG("dst %d,%d %d,%d (%dx%d) (%f,%f)\n", dstbox.x1, dstbox.y1, dstbox.x2, dstbox.y2,
width, height, ctx->Pixel.ZoomX, ctx->Pixel.ZoomY);
if (intel_intersect_cliprects(&tmp, &srcbox, &dstbox)) {
DBG("%s: regions overlap\n", __FUNCTION__);
return GL_FALSE;
}
}
intelFlush(&intel->ctx);
intel->vtbl.install_meta_state(intel);
/* Is this true? Also will need to turn depth testing on according
* to state:
*/
intel->vtbl.meta_no_stencil_write(intel);
intel->vtbl.meta_no_depth_write(intel);
/* Set the 3d engine to draw into the destination region:
*/
intel->vtbl.meta_draw_region(intel, dst, intel->intelScreen->depth_region);
intel->vtbl.meta_import_pixel_state(intel);
if (src->cpp == 2) {
src_format = GL_RGB;
src_type = GL_UNSIGNED_SHORT_5_6_5;
}
else {
src_format = GL_BGRA;
src_type = GL_UNSIGNED_BYTE;
}
/* Set the frontbuffer up as a large rectangular texture.
*/
if (!intel->vtbl.meta_tex_rect_source(intel, src->buffer, 0,
src->pitch,
src->height, src_format, src_type)) {
intel->vtbl.leave_meta_state(intel);
return GL_FALSE;
}
intel->vtbl.meta_texture_blend_replace(intel);
LOCK_HARDWARE(intel);
if (intel->driDrawable->numClipRects) {
__DRIdrawablePrivate *dPriv = intel->driDrawable;
srcy = dPriv->h - srcy - height; /* convert from gl to hardware coords */
srcx += dPriv->x;
srcy += dPriv->y;
/* Clip against the source region. This is the only source
* clipping we do. XXX: Just set the texcord wrap mode to clamp
* or similar.
*
*/
if (0) {
GLint orig_x = srcx;
GLint orig_y = srcy;
if (!_mesa_clip_to_region(0, 0, src->pitch, src->height,
&srcx, &srcy, &width, &height))
goto out;
dstx += srcx - orig_x;
dsty += (srcy - orig_y) * ctx->Pixel.ZoomY;
}
/* Just use the regular cliprect mechanism... Does this need to
* even hold the lock???
*/
intel_meta_draw_quad(intel,
dstx,
dstx + width * ctx->Pixel.ZoomX,
dPriv->h - (dsty + height * ctx->Pixel.ZoomY),
dPriv->h - (dsty), 0, /* XXX: what z value? */
0x00ff00ff,
srcx, srcx + width, srcy, srcy + height);
out:
intel->vtbl.leave_meta_state(intel);
intel_batchbuffer_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);
DBG("%s: success\n", __FUNCTION__);
return GL_TRUE;
}
/**
* CopyPixels with the blitter. Don't support zooming, pixel transfer, etc.
*/
static GLboolean
do_blit_copypixels(GLcontext * ctx,
GLint srcx, GLint srcy,
GLsizei width, GLsizei height,
GLint dstx, GLint dsty, GLenum type)
{
struct intel_context *intel = intel_context(ctx);
struct intel_region *dst = intel_drawbuf_region(intel);
struct intel_region *src = copypix_src_region(intel, type);
/* Copypixels can be more than a straight copy. Ensure all the
* extra operations are disabled:
*/
if (!intel_check_copypixel_blit_fragment_ops(ctx) ||
ctx->Pixel.ZoomX != 1.0F || ctx->Pixel.ZoomY != 1.0F)
return GL_FALSE;
if (!src || !dst)
return GL_FALSE;
intelFlush(&intel->ctx);
LOCK_HARDWARE(intel);
if (intel->driDrawable->numClipRects) {
__DRIdrawablePrivate *dPriv = intel->driDrawable;
drm_clip_rect_t *box = dPriv->pClipRects;
drm_clip_rect_t dest_rect;
GLint nbox = dPriv->numClipRects;
GLint delta_x = 0;
GLint delta_y = 0;
GLuint i;
/* Do scissoring in GL coordinates:
*/
if (ctx->Scissor.Enabled)
{
GLint x = ctx->Scissor.X;
GLint y = ctx->Scissor.Y;
GLuint w = ctx->Scissor.Width;
GLuint h = ctx->Scissor.Height;
GLint dx = dstx - srcx;
GLint dy = dsty - srcy;
if (!_mesa_clip_to_region(x, y, x+w, y+h, &dstx, &dsty, &width, &height))
goto out;
srcx = dstx - dx;
srcy = dsty - dy;
}
/* Convert from GL to hardware coordinates:
*/
dsty = dPriv->h - dsty - height;
srcy = dPriv->h - srcy - height;
dstx += dPriv->x;
dsty += dPriv->y;
srcx += dPriv->x;
srcy += dPriv->y;
/* Clip against the source region. This is the only source
* clipping we do. Dst is clipped with cliprects below.
*/
{
delta_x = srcx - dstx;
delta_y = srcy - dsty;
if (!_mesa_clip_to_region(0, 0, src->pitch, src->height,
&srcx, &srcy, &width, &height))
goto out;
dstx = srcx - delta_x;
dsty = srcy - delta_y;
}
dest_rect.x1 = dstx;
dest_rect.y1 = dsty;
dest_rect.x2 = dstx + width;
dest_rect.y2 = dsty + height;
/* Could do slightly more clipping: Eg, take the intersection of
* the existing set of cliprects and those cliprects translated
* by delta_x, delta_y:
*
* This code will not overwrite other windows, but will
* introduce garbage when copying from obscured window regions.
*/
for (i = 0; i < nbox; i++) {
drm_clip_rect_t rect;
if (!intel_intersect_cliprects(&rect, &dest_rect, &box[i]))
continue;
intelEmitCopyBlit(intel, dst->cpp,
src->pitch, src->buffer, 0,
dst->pitch, dst->buffer, 0,
rect.x1 + delta_x, rect.y1 + delta_y, /* srcx, srcy */
rect.x1, rect.y1, /* dstx, dsty */
rect.x2 - rect.x1, rect.y2 - rect.y1);
}
out:
intel_batchbuffer_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);
DBG("%s: success\n", __FUNCTION__);
return GL_TRUE;
}
void
intelCopyPixels(GLcontext * ctx,
GLint srcx, GLint srcy,
GLsizei width, GLsizei height,
GLint destx, GLint desty, GLenum type)
{
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
if (do_blit_copypixels(ctx, srcx, srcy, width, height, destx, desty, type))
return;
if (do_texture_copypixels(ctx, srcx, srcy, width, height, destx, desty, type))
return;
DBG("fallback to _swrast_CopyPixels\n");
_swrast_CopyPixels(ctx, srcx, srcy, width, height, destx, desty, type);
}

View File

@@ -0,0 +1,365 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portionsalloc
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "glheader.h"
#include "enums.h"
#include "image.h"
#include "mtypes.h"
#include "macros.h"
#include "bufferobj.h"
#include "swrast/swrast.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_ioctl.h"
#include "intel_batchbuffer.h"
#include "intel_blit.h"
#include "intel_buffers.h"
#include "intel_regions.h"
#include "intel_pixel.h"
#include "intel_buffer_objects.h"
#include "intel_tris.h"
static GLboolean
do_texture_drawpixels(GLcontext * ctx,
GLint x, GLint y,
GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid * pixels)
{
struct intel_context *intel = intel_context(ctx);
struct intel_region *dst = intel_drawbuf_region(intel);
struct intel_buffer_object *src = intel_buffer_object(unpack->BufferObj);
GLuint rowLength = unpack->RowLength ? unpack->RowLength : width;
GLuint src_offset;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
intelFlush(&intel->ctx);
intel->vtbl.render_start(intel);
intel->vtbl.emit_state(intel);
if (!dst)
return GL_FALSE;
if (src) {
if (!_mesa_validate_pbo_access(2, unpack, width, height, 1,
format, type, pixels)) {
_mesa_error(ctx, GL_INVALID_OPERATION, "glDrawPixels");
return GL_TRUE;
}
}
else {
/* PBO only for now:
*/
/* _mesa_printf("%s - not PBO\n", __FUNCTION__); */
return GL_FALSE;
}
/* There are a couple of things we can't do yet, one of which is
* set the correct state for pixel operations when GL texturing is
* enabled. That's a pretty rare state and probably not worth the
* effort. A completely device-independent version of this may do
* more.
*
* Similarly, we make no attempt to merge metaops processing with
* an enabled fragment program, though it would certainly be
* possible.
*/
if (!intel_check_meta_tex_fragment_ops(ctx)) {
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s - bad GL fragment state for metaops texture\n",
__FUNCTION__);
return GL_FALSE;
}
intel->vtbl.install_meta_state(intel);
/* Is this true? Also will need to turn depth testing on according
* to state:
*/
intel->vtbl.meta_no_stencil_write(intel);
intel->vtbl.meta_no_depth_write(intel);
/* Set the 3d engine to draw into the destination region:
*/
intel->vtbl.meta_draw_region(intel, dst, intel->intelScreen->depth_region);
intel->vtbl.meta_import_pixel_state(intel);
src_offset = (GLuint) _mesa_image_address(2, unpack, pixels, width, height,
format, type, 0, 0, 0);
/* Setup the pbo up as a rectangular texture, if possible.
*
* TODO: This is almost always possible if the i915 fragment
* program is adjusted to correctly swizzle the sampled colors.
* The major exception is any 24bit texture, like RGB888, for which
* there is no hardware support.
*/
if (!intel->vtbl.meta_tex_rect_source(intel, src->buffer, src_offset,
rowLength, height, format, type)) {
intel->vtbl.leave_meta_state(intel);
return GL_FALSE;
}
intel->vtbl.meta_texture_blend_replace(intel);
LOCK_HARDWARE(intel);
if (intel->driDrawable->numClipRects) {
__DRIdrawablePrivate *dPriv = intel->driDrawable;
GLint srcx, srcy;
GLint dstx, dsty;
dstx = x;
dsty = dPriv->h - (y + height);
srcx = 0; /* skiprows/pixels already done */
srcy = 0;
if (0) {
const GLint orig_x = dstx;
const GLint orig_y = dsty;
if (!_mesa_clip_to_region(0, 0, dst->pitch, dst->height,
&dstx, &dsty, &width, &height))
goto out;
srcx += dstx - orig_x;
srcy += dsty - orig_y;
}
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("draw %d,%d %dx%d\n", dstx, dsty, width, height);
/* Must use the regular cliprect mechanism in order to get the
* drawing origin set correctly. Otherwise scissor state is in
* incorrect coordinate space. Does this even need to hold the
* lock???
*/
intel_meta_draw_quad(intel,
dstx, dstx + width * ctx->Pixel.ZoomX,
dPriv->h - (y + height * ctx->Pixel.ZoomY),
dPriv->h - (y),
-ctx->Current.RasterPos[2] * .5,
0x00ff00ff,
srcx, srcx + width, srcy + height, srcy);
out:
intel->vtbl.leave_meta_state(intel);
intel_batchbuffer_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);
return GL_TRUE;
}
/* Pros:
* - no waiting for idle before updating framebuffer.
*
* Cons:
* - if upload is by memcpy, this may actually be slower than fallback path.
* - uploads the whole image even if destination is clipped
*
* Need to benchmark.
*
* Given the questions about performance, implement for pbo's only.
* This path is definitely a win if the pbo is already in agp. If it
* turns out otherwise, we can add the code necessary to upload client
* data to agp space before performing the blit. (Though it may turn
* out to be better/simpler just to use the texture engine).
*/
static GLboolean
do_blit_drawpixels(GLcontext * ctx,
GLint x, GLint y,
GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid * pixels)
{
struct intel_context *intel = intel_context(ctx);
struct intel_region *dest = intel_drawbuf_region(intel);
struct intel_buffer_object *src = intel_buffer_object(unpack->BufferObj);
GLuint src_offset;
GLuint rowLength;
struct _DriFenceObject *fence = NULL;
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s\n", __FUNCTION__);
if (!dest) {
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s - no dest\n", __FUNCTION__);
return GL_FALSE;
}
if (src) {
/* This validation should be done by core mesa:
*/
if (!_mesa_validate_pbo_access(2, unpack, width, height, 1,
format, type, pixels)) {
_mesa_error(ctx, GL_INVALID_OPERATION, "glDrawPixels");
return GL_TRUE;
}
}
else {
/* PBO only for now:
*/
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s - not PBO\n", __FUNCTION__);
return GL_FALSE;
}
if (!intel_check_blit_format(dest, format, type)) {
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s - bad format for blit\n", __FUNCTION__);
return GL_FALSE;
}
if (!intel_check_meta_tex_fragment_ops(ctx)) {
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s - bad GL fragment state for meta tex\n",
__FUNCTION__);
return GL_FALSE;
}
if (ctx->Pixel.ZoomX != 1.0F) {
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s - bad PixelZoomX for blit\n", __FUNCTION__);
return GL_FALSE;
}
if (unpack->RowLength > 0)
rowLength = unpack->RowLength;
else
rowLength = width;
if (ctx->Pixel.ZoomY == -1.0F) {
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s - bad PixelZoomY for blit\n", __FUNCTION__);
return GL_FALSE; /* later */
y -= height;
}
else if (ctx->Pixel.ZoomY == 1.0F) {
rowLength = -rowLength;
}
else {
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s - bad PixelZoomY for blit\n", __FUNCTION__);
return GL_FALSE;
}
src_offset = (GLuint) _mesa_image_address(2, unpack, pixels, width, height,
format, type, 0, 0, 0);
intelFlush(&intel->ctx);
LOCK_HARDWARE(intel);
if (intel->driDrawable->numClipRects) {
__DRIdrawablePrivate *dPriv = intel->driDrawable;
int nbox = dPriv->numClipRects;
drm_clip_rect_t *box = dPriv->pClipRects;
drm_clip_rect_t rect;
drm_clip_rect_t dest_rect;
struct _DriBufferObject *src_buffer =
intel_bufferobj_buffer(intel, src, INTEL_READ);
int i;
dest_rect.x1 = dPriv->x + x;
dest_rect.y1 = dPriv->y + dPriv->h - (y + height);
dest_rect.x2 = dest_rect.x1 + width;
dest_rect.y2 = dest_rect.y1 + height;
for (i = 0; i < nbox; i++) {
if (!intel_intersect_cliprects(&rect, &dest_rect, &box[i]))
continue;
intelEmitCopyBlit(intel,
dest->cpp,
rowLength,
src_buffer, src_offset,
dest->pitch,
dest->buffer, 0,
rect.x1 - dest_rect.x1,
rect.y2 - dest_rect.y2,
rect.x1,
rect.y1, rect.x2 - rect.x1, rect.y2 - rect.y1);
}
fence = intel_batchbuffer_flush(intel->batch);
driFenceReference(fence);
}
UNLOCK_HARDWARE(intel);
if (intel->driDrawable->numClipRects)
driFenceFinish(fence, DRM_FENCE_TYPE_EXE | DRM_I915_FENCE_TYPE_RW, GL_FALSE);
driFenceUnReference(fence);
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s - DONE\n", __FUNCTION__);
return GL_TRUE;
}
void
intelDrawPixels(GLcontext * ctx,
GLint x, GLint y,
GLsizei width, GLsizei height,
GLenum format,
GLenum type,
const struct gl_pixelstore_attrib *unpack,
const GLvoid * pixels)
{
if (do_blit_drawpixels(ctx, x, y, width, height, format, type,
unpack, pixels))
return;
if (do_texture_drawpixels(ctx, x, y, width, height, format, type,
unpack, pixels))
return;
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s: fallback to swrast\n", __FUNCTION__);
_swrast_DrawPixels(ctx, x, y, width, height, format, type, unpack, pixels);
}

View File

@@ -0,0 +1,317 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "glheader.h"
#include "enums.h"
#include "mtypes.h"
#include "macros.h"
#include "image.h"
#include "bufferobj.h"
#include "swrast/swrast.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_ioctl.h"
#include "intel_batchbuffer.h"
#include "intel_blit.h"
#include "intel_buffers.h"
#include "intel_regions.h"
#include "intel_pixel.h"
#include "intel_buffer_objects.h"
/* For many applications, the new ability to pull the source buffers
* back out of the GTT and then do the packing/conversion operations
* in software will be as much of an improvement as trying to get the
* blitter and/or texture engine to do the work.
*
* This step is gated on private backbuffers.
*
* Obviously the frontbuffer can't be pulled back, so that is either
* an argument for blit/texture readpixels, or for blitting to a
* temporary and then pulling that back.
*
* When the destination is a pbo, however, it's not clear if it is
* ever going to be pulled to main memory (though the access param
* will be a good hint). So it sounds like we do want to be able to
* choose between blit/texture implementation on the gpu and pullback
* and cpu-based copying.
*
* Unless you can magically turn client memory into a PBO for the
* duration of this call, there will be a cpu-based copying step in
* any case.
*/
static GLboolean
do_texture_readpixels(GLcontext * ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack,
struct intel_region *dest_region)
{
#if 0
struct intel_context *intel = intel_context(ctx);
intelScreenPrivate *screen = intel->intelScreen;
GLint pitch = pack->RowLength ? pack->RowLength : width;
__DRIdrawablePrivate *dPriv = intel->driDrawable;
int textureFormat;
GLenum glTextureFormat;
int destFormat, depthFormat, destPitch;
drm_clip_rect_t tmp;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
if (ctx->_ImageTransferState ||
pack->SwapBytes || pack->LsbFirst || !pack->Invert) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: check_color failed\n", __FUNCTION__);
return GL_FALSE;
}
intel->vtbl.meta_texrect_source(intel, intel_readbuf_region(intel));
if (!intel->vtbl.meta_render_dest(intel, dest_region, type, format)) {
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: couldn't set dest %s/%s\n",
__FUNCTION__,
_mesa_lookup_enum_by_nr(type),
_mesa_lookup_enum_by_nr(format));
return GL_FALSE;
}
LOCK_HARDWARE(intel);
if (intel->driDrawable->numClipRects) {
intel->vtbl.install_meta_state(intel);
intel->vtbl.meta_no_depth_write(intel);
intel->vtbl.meta_no_stencil_write(intel);
if (!driClipRectToFramebuffer(ctx->ReadBuffer, &x, &y, &width, &height)) {
UNLOCK_HARDWARE(intel);
SET_STATE(i830, state);
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s: cliprect failed\n", __FUNCTION__);
return GL_TRUE;
}
y = dPriv->h - y - height;
x += dPriv->x;
y += dPriv->y;
/* Set the frontbuffer up as a large rectangular texture.
*/
intel->vtbl.meta_tex_rect_source(intel, src_region, textureFormat);
intel->vtbl.meta_texture_blend_replace(i830, glTextureFormat);
/* Set the 3d engine to draw into the destination region:
*/
intel->vtbl.meta_draw_region(intel, dest_region);
intel->vtbl.meta_draw_format(intel, destFormat, depthFormat); /* ?? */
/* Draw a single quad, no cliprects:
*/
intel->vtbl.meta_disable_cliprects(intel);
intel->vtbl.draw_quad(intel,
0, width, 0, height,
0x00ff00ff, x, x + width, y, y + height);
intel->vtbl.leave_meta_state(intel);
}
UNLOCK_HARDWARE(intel);
intel_region_wait_fence(ctx, dest_region); /* required by GL */
return GL_TRUE;
#endif
return GL_FALSE;
}
static GLboolean
do_blit_readpixels(GLcontext * ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
struct intel_context *intel = intel_context(ctx);
struct intel_region *src = intel_readbuf_region(intel);
struct intel_buffer_object *dst = intel_buffer_object(pack->BufferObj);
GLuint dst_offset;
GLuint rowLength;
struct _DriFenceObject *fence = NULL;
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s\n", __FUNCTION__);
if (!src)
return GL_FALSE;
if (dst) {
/* XXX This validation should be done by core mesa:
*/
if (!_mesa_validate_pbo_access(2, pack, width, height, 1,
format, type, pixels)) {
_mesa_error(ctx, GL_INVALID_OPERATION, "glDrawPixels");
return GL_TRUE;
}
}
else {
/* PBO only for now:
*/
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s - not PBO\n", __FUNCTION__);
return GL_FALSE;
}
if (ctx->_ImageTransferState ||
!intel_check_blit_format(src, format, type)) {
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s - bad format for blit\n", __FUNCTION__);
return GL_FALSE;
}
if (pack->Alignment != 1 || pack->SwapBytes || pack->LsbFirst) {
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s: bad packing params\n", __FUNCTION__);
return GL_FALSE;
}
if (pack->RowLength > 0)
rowLength = pack->RowLength;
else
rowLength = width;
if (pack->Invert) {
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s: MESA_PACK_INVERT not done yet\n", __FUNCTION__);
return GL_FALSE;
}
else {
rowLength = -rowLength;
}
/* XXX 64-bit cast? */
dst_offset = (GLuint) _mesa_image_address(2, pack, pixels, width, height,
format, type, 0, 0, 0);
/* Although the blits go on the command buffer, need to do this and
* fire with lock held to guarentee cliprects are correct.
*/
intelFlush(&intel->ctx);
LOCK_HARDWARE(intel);
if (intel->driDrawable->numClipRects) {
GLboolean all = (width * height * src->cpp == dst->Base.Size &&
x == 0 && dst_offset == 0);
struct _DriBufferObject *dst_buffer =
intel_bufferobj_buffer(intel, dst, all ? INTEL_WRITE_FULL :
INTEL_WRITE_PART);
__DRIdrawablePrivate *dPriv = intel->driDrawable;
int nbox = dPriv->numClipRects;
drm_clip_rect_t *box = dPriv->pClipRects;
drm_clip_rect_t rect;
drm_clip_rect_t src_rect;
int i;
src_rect.x1 = dPriv->x + x;
src_rect.y1 = dPriv->y + dPriv->h - (y + height);
src_rect.x2 = src_rect.x1 + width;
src_rect.y2 = src_rect.y1 + height;
for (i = 0; i < nbox; i++) {
if (!intel_intersect_cliprects(&rect, &src_rect, &box[i]))
continue;
intelEmitCopyBlit(intel,
src->cpp,
src->pitch, src->buffer, 0,
rowLength,
dst_buffer, dst_offset,
rect.x1,
rect.y1,
rect.x1 - src_rect.x1,
rect.y2 - src_rect.y2,
rect.x2 - rect.x1, rect.y2 - rect.y1);
}
fence = intel_batchbuffer_flush(intel->batch);
driFenceReference(fence);
}
UNLOCK_HARDWARE(intel);
if (intel->driDrawable->numClipRects)
driFenceFinish(fence, DRM_FENCE_TYPE_EXE | DRM_I915_FENCE_TYPE_RW,
GL_FALSE);
driFenceUnReference(fence);
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s - DONE\n", __FUNCTION__);
return GL_TRUE;
}
void
intelReadPixels(GLcontext * ctx,
GLint x, GLint y, GLsizei width, GLsizei height,
GLenum format, GLenum type,
const struct gl_pixelstore_attrib *pack, GLvoid * pixels)
{
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
intelFlush(ctx);
if (do_blit_readpixels
(ctx, x, y, width, height, format, type, pack, pixels))
return;
if (do_texture_readpixels
(ctx, x, y, width, height, format, type, pack, pixels))
return;
if (INTEL_DEBUG & DEBUG_PIXEL)
_mesa_printf("%s: fallback to swrast\n", __FUNCTION__);
_swrast_ReadPixels(ctx, x, y, width, height, format, type, pack, pixels);
}

View File

@@ -0,0 +1,458 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
/* Provide additional functionality on top of bufmgr buffers:
* - 2d semantics and blit operations
* - refcounting of buffers for multiple images in a buffer.
* - refcounting of buffer mappings.
* - some logic for moving the buffers to the best memory pools for
* given operations.
*
* Most of this is to make it easier to implement the fixed-layout
* mipmap tree required by intel hardware in the face of GL's
* programming interface where each image can be specifed in random
* order and it isn't clear what layout the tree should have until the
* last moment.
*/
#include "intel_context.h"
#include "intel_regions.h"
#include "intel_blit.h"
#include "intel_buffer_objects.h"
#include "dri_bufmgr.h"
#include "intel_batchbuffer.h"
#define FILE_DEBUG_FLAG DEBUG_REGION
void
intel_region_idle(intelScreenPrivate *intelScreen, struct intel_region *region)
{
DBG("%s\n", __FUNCTION__);
if (region && region->buffer)
driBOWaitIdle(region->buffer, GL_FALSE);
}
/* XXX: Thread safety?
*/
GLubyte *
intel_region_map(intelScreenPrivate *intelScreen, struct intel_region *region)
{
DBG("%s\n", __FUNCTION__);
if (!region->map_refcount++) {
if (region->pbo)
intel_region_cow(intelScreen, region);
region->map = driBOMap(region->buffer,
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
}
return region->map;
}
void
intel_region_unmap(intelScreenPrivate *intelScreen, struct intel_region *region)
{
DBG("%s\n", __FUNCTION__);
if (!--region->map_refcount) {
driBOUnmap(region->buffer);
region->map = NULL;
}
}
struct intel_region *
intel_region_alloc(intelScreenPrivate *intelScreen,
GLuint cpp, GLuint pitch, GLuint height)
{
struct intel_region *region = calloc(sizeof(*region), 1);
DBG("%s\n", __FUNCTION__);
region->cpp = cpp;
region->pitch = pitch;
region->height = height; /* needed? */
region->refcount = 1;
driGenBuffers(intelScreen->regionPool,
"region", 1, &region->buffer, 64, 0, 0);
driBOData(region->buffer, pitch * cpp * height, NULL, 0);
return region;
}
void
intel_region_reference(struct intel_region **dst, struct intel_region *src)
{
assert(*dst == NULL);
if (src) {
src->refcount++;
*dst = src;
}
}
void
intel_region_release(intelScreenPrivate *intelScreen,
struct intel_region **region)
{
if (!*region)
return;
DBG("%s %d\n", __FUNCTION__, (*region)->refcount - 1);
ASSERT((*region)->refcount > 0);
(*region)->refcount--;
if ((*region)->refcount == 0) {
assert((*region)->map_refcount == 0);
if ((*region)->pbo)
(*region)->pbo->region = NULL;
(*region)->pbo = NULL;
driBOUnReference((*region)->buffer);
free(*region);
}
*region = NULL;
}
struct intel_region *
intel_region_create_static(intelScreenPrivate *intelScreen,
GLuint mem_type,
GLuint offset,
void *virtual,
GLuint cpp, GLuint pitch, GLuint height)
{
struct intel_region *region = calloc(sizeof(*region), 1);
DBG("%s\n", __FUNCTION__);
region->cpp = cpp;
region->pitch = pitch;
region->height = height; /* needed? */
region->refcount = 1;
/*
* We use a "shared" buffer type to indicate buffers created and
* shared by others.
*/
driGenBuffers(intelScreen->staticPool, "static region", 1,
&region->buffer, 64,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_NO_EVICT |
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
driBOSetStatic(region->buffer, offset, pitch * cpp * height, virtual, 0);
return region;
}
void
intel_region_update_static(intelScreenPrivate *intelScreen,
struct intel_region *region,
GLuint mem_type,
GLuint offset,
void *virtual,
GLuint cpp, GLuint pitch, GLuint height)
{
DBG("%s\n", __FUNCTION__);
region->cpp = cpp;
region->pitch = pitch;
region->height = height; /* needed? */
/*
* We use a "shared" buffer type to indicate buffers created and
* shared by others.
*/
driDeleteBuffers(1, &region->buffer);
driGenBuffers(intelScreen->staticPool, "static region", 1,
&region->buffer, 64,
DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_NO_EVICT |
DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
driBOSetStatic(region->buffer, offset, pitch * cpp * height, virtual, 0);
}
/*
* XXX Move this into core Mesa?
*/
static void
_mesa_copy_rect(GLubyte * dst,
GLuint cpp,
GLuint dst_pitch,
GLuint dst_x,
GLuint dst_y,
GLuint width,
GLuint height,
GLubyte * src, GLuint src_pitch, GLuint src_x, GLuint src_y)
{
GLuint i;
dst_pitch *= cpp;
src_pitch *= cpp;
dst += dst_x * cpp;
src += src_x * cpp;
dst += dst_y * dst_pitch;
src += src_y * dst_pitch;
width *= cpp;
if (width == dst_pitch && width == src_pitch)
memcpy(dst, src, height * width);
else {
for (i = 0; i < height; i++) {
memcpy(dst, src, width);
dst += dst_pitch;
src += src_pitch;
}
}
}
/* Upload data to a rectangular sub-region. Lots of choices how to do this:
*
* - memcpy by span to current destination
* - upload data as new buffer and blit
*
* Currently always memcpy.
*/
void
intel_region_data(intelScreenPrivate *intelScreen,
struct intel_region *dst,
GLuint dst_offset,
GLuint dstx, GLuint dsty,
void *src, GLuint src_pitch,
GLuint srcx, GLuint srcy, GLuint width, GLuint height)
{
struct intel_context *intel = intelScreenContext(intelScreen);
DBG("%s\n", __FUNCTION__);
if (intel == NULL)
return;
if (dst->pbo) {
if (dstx == 0 &&
dsty == 0 && width == dst->pitch && height == dst->height)
intel_region_release_pbo(intelScreen, dst);
else
intel_region_cow(intelScreen, dst);
}
LOCK_HARDWARE(intel);
_mesa_copy_rect(intel_region_map(intelScreen, dst) + dst_offset,
dst->cpp,
dst->pitch,
dstx, dsty, width, height, src, src_pitch, srcx, srcy);
intel_region_unmap(intelScreen, dst);
UNLOCK_HARDWARE(intel);
}
/* Copy rectangular sub-regions. Need better logic about when to
* push buffers into AGP - will currently do so whenever possible.
*/
void
intel_region_copy(intelScreenPrivate *intelScreen,
struct intel_region *dst,
GLuint dst_offset,
GLuint dstx, GLuint dsty,
struct intel_region *src,
GLuint src_offset,
GLuint srcx, GLuint srcy, GLuint width, GLuint height)
{
struct intel_context *intel = intelScreenContext(intelScreen);
DBG("%s\n", __FUNCTION__);
if (intel == NULL)
return;
if (dst->pbo) {
if (dstx == 0 &&
dsty == 0 && width == dst->pitch && height == dst->height)
intel_region_release_pbo(intelScreen, dst);
else
intel_region_cow(intelScreen, dst);
}
assert(src->cpp == dst->cpp);
intelEmitCopyBlit(intel,
dst->cpp,
src->pitch, src->buffer, src_offset,
dst->pitch, dst->buffer, dst_offset,
srcx, srcy, dstx, dsty, width, height);
}
/* Fill a rectangular sub-region. Need better logic about when to
* push buffers into AGP - will currently do so whenever possible.
*/
void
intel_region_fill(intelScreenPrivate *intelScreen,
struct intel_region *dst,
GLuint dst_offset,
GLuint dstx, GLuint dsty,
GLuint width, GLuint height, GLuint color)
{
struct intel_context *intel = intelScreenContext(intelScreen);
DBG("%s\n", __FUNCTION__);
if (intel == NULL)
return;
if (dst->pbo) {
if (dstx == 0 &&
dsty == 0 && width == dst->pitch && height == dst->height)
intel_region_release_pbo(intelScreen, dst);
else
intel_region_cow(intelScreen, dst);
}
intelEmitFillBlit(intel,
dst->cpp,
dst->pitch, dst->buffer, dst_offset,
dstx, dsty, width, height, color);
}
/* Attach to a pbo, discarding our data. Effectively zero-copy upload
* the pbo's data.
*/
void
intel_region_attach_pbo(intelScreenPrivate *intelScreen,
struct intel_region *region,
struct intel_buffer_object *pbo)
{
if (region->pbo == pbo)
return;
/* If there is already a pbo attached, break the cow tie now.
* Don't call intel_region_release_pbo() as that would
* unnecessarily allocate a new buffer we would have to immediately
* discard.
*/
if (region->pbo) {
region->pbo->region = NULL;
region->pbo = NULL;
}
if (region->buffer) {
driDeleteBuffers(1, &region->buffer);
region->buffer = NULL;
}
region->pbo = pbo;
region->pbo->region = region;
region->buffer = driBOReference(pbo->buffer);
}
/* Break the COW tie to the pbo. The pbo gets to keep the data.
*/
void
intel_region_release_pbo(intelScreenPrivate *intelScreen,
struct intel_region *region)
{
assert(region->buffer == region->pbo->buffer);
region->pbo->region = NULL;
region->pbo = NULL;
driBOUnReference(region->buffer);
region->buffer = NULL;
driGenBuffers(intelScreen->regionPool,
"region", 1, &region->buffer, 64, 0, 0);
driBOData(region->buffer,
region->cpp * region->pitch * region->height, NULL, 0);
}
/* Break the COW tie to the pbo. Both the pbo and the region end up
* with a copy of the data.
*/
void
intel_region_cow(intelScreenPrivate *intelScreen, struct intel_region *region)
{
struct intel_context *intel = intelScreenContext(intelScreen);
struct intel_buffer_object *pbo = region->pbo;
if (intel == NULL)
return;
intel_region_release_pbo(intelScreen, region);
assert(region->cpp * region->pitch * region->height == pbo->Base.Size);
DBG("%s (%d bytes)\n", __FUNCTION__, pbo->Base.Size);
/* Now blit from the texture buffer to the new buffer:
*/
intel_batchbuffer_flush(intel->batch);
if (!intel->locked) {
LOCK_HARDWARE(intel);
intelEmitCopyBlit(intel,
region->cpp,
region->pitch,
region->buffer, 0,
region->pitch,
pbo->buffer, 0,
0, 0, 0, 0, region->pitch, region->height);
intel_batchbuffer_flush(intel->batch);
UNLOCK_HARDWARE(intel);
}
else {
intelEmitCopyBlit(intel,
region->cpp,
region->pitch,
region->buffer, 0,
region->pitch,
pbo->buffer, 0,
0, 0, 0, 0, region->pitch, region->height);
intel_batchbuffer_flush(intel->batch);
}
}
struct _DriBufferObject *
intel_region_buffer(intelScreenPrivate *intelScreen,
struct intel_region *region, GLuint flag)
{
if (region->pbo) {
if (flag == INTEL_WRITE_PART)
intel_region_cow(intelScreen, region);
else if (flag == INTEL_WRITE_FULL)
intel_region_release_pbo(intelScreen, region);
}
return region->buffer;
}

View File

@@ -0,0 +1,142 @@
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#ifndef INTEL_REGIONS_H
#define INTEL_REGIONS_H
#include "mtypes.h"
#include "intel_screen.h"
struct intel_context;
struct intel_buffer_object;
/**
* A layer on top of the bufmgr buffers that adds a few useful things:
*
* - Refcounting for local buffer references.
* - Refcounting for buffer maps
* - Buffer dimensions - pitch and height.
* - Blitter commands for copying 2D regions between buffers. (really???)
*/
struct intel_region
{
struct _DriBufferObject *buffer; /**< buffer manager's buffer ID */
GLuint refcount; /**< Reference count for region */
GLuint cpp; /**< bytes per pixel */
GLuint pitch; /**< in pixels */
GLuint height; /**< in pixels */
GLubyte *map; /**< only non-NULL when region is actually mapped */
GLuint map_refcount; /**< Reference count for mapping */
GLuint draw_offset; /**< Offset of drawing address within the region */
struct intel_buffer_object *pbo; /* zero-copy uploads */
};
/* Allocate a refcounted region. Pointers to regions should only be
* copied by calling intel_reference_region().
*/
struct intel_region *intel_region_alloc(intelScreenPrivate *intelScreen,
GLuint cpp,
GLuint pitch, GLuint height);
void intel_region_reference(struct intel_region **dst,
struct intel_region *src);
void intel_region_release(intelScreenPrivate *intelScreen,
struct intel_region **ib);
extern struct intel_region
*intel_region_create_static(intelScreenPrivate *intelScreen,
GLuint mem_type,
GLuint offset,
void *virtual,
GLuint cpp,
GLuint pitch, GLuint height);
extern void
intel_region_update_static(intelScreenPrivate *intelScreen,
struct intel_region *region,
GLuint mem_type,
GLuint offset,
void *virtual,
GLuint cpp, GLuint pitch, GLuint height);
void intel_region_idle(intelScreenPrivate *intelScreen,
struct intel_region *ib);
/* Map/unmap regions. This is refcounted also:
*/
GLubyte *intel_region_map(intelScreenPrivate *intelScreen,
struct intel_region *ib);
void intel_region_unmap(intelScreenPrivate *intelScreen, struct intel_region *ib);
/* Upload data to a rectangular sub-region
*/
void intel_region_data(intelScreenPrivate *intelScreen,
struct intel_region *dest,
GLuint dest_offset,
GLuint destx, GLuint desty,
void *src, GLuint src_stride,
GLuint srcx, GLuint srcy, GLuint width, GLuint height);
/* Copy rectangular sub-regions
*/
void intel_region_copy(intelScreenPrivate *intelScreen,
struct intel_region *dest,
GLuint dest_offset,
GLuint destx, GLuint desty,
struct intel_region *src,
GLuint src_offset,
GLuint srcx, GLuint srcy, GLuint width, GLuint height);
/* Fill a rectangular sub-region
*/
void intel_region_fill(intelScreenPrivate *intelScreen,
struct intel_region *dest,
GLuint dest_offset,
GLuint destx, GLuint desty,
GLuint width, GLuint height, GLuint color);
/* Helpers for zerocopy uploads, particularly texture image uploads:
*/
void intel_region_attach_pbo(intelScreenPrivate *intelScreen,
struct intel_region *region,
struct intel_buffer_object *pbo);
void intel_region_release_pbo(intelScreenPrivate *intelScreen,
struct intel_region *region);
void intel_region_cow(intelScreenPrivate *intelScreen,
struct intel_region *region);
struct _DriBufferObject *intel_region_buffer(intelScreenPrivate *intelScreen,
struct intel_region *region,
GLuint flag);
#endif

View File

@@ -51,14 +51,14 @@
* dma buffers. Use strip/fan hardware primitives where possible.
* Try to simulate missing primitives with indexed vertices.
*/
#define HAVE_POINTS 0 /* Has it, but can't use because subpixel has to
* be adjusted for points on the INTEL/I845G
*/
#define HAVE_POINTS 0 /* Has it, but can't use because subpixel has to
* be adjusted for points on the INTEL/I845G
*/
#define HAVE_LINES 1
#define HAVE_LINE_STRIPS 1
#define HAVE_TRIANGLES 1
#define HAVE_TRI_STRIPS 1
#define HAVE_TRI_STRIP_1 0 /* has it, template can't use it yet */
#define HAVE_TRI_STRIP_1 0 /* has it, template can't use it yet */
#define HAVE_TRI_FANS 1
#define HAVE_POLYGONS 1
#define HAVE_QUADS 0
@@ -66,7 +66,7 @@
#define HAVE_ELTS 0
static GLuint hw_prim[GL_POLYGON+1] = {
static GLuint hw_prim[GL_POLYGON + 1] = {
0,
PRIM3D_LINELIST,
PRIM3D_LINESTRIP,
@@ -79,7 +79,7 @@ static GLuint hw_prim[GL_POLYGON+1] = {
PRIM3D_POLY
};
static const GLenum reduced_prim[GL_POLYGON+1] = {
static const GLenum reduced_prim[GL_POLYGON + 1] = {
GL_POINTS,
GL_LINES,
GL_LINES,
@@ -92,58 +92,61 @@ static const GLenum reduced_prim[GL_POLYGON+1] = {
GL_TRIANGLES
};
static const int scale_prim[GL_POLYGON+1] = {
0, /* fallback case */
static const int scale_prim[GL_POLYGON + 1] = {
0, /* fallback case */
1,
2,
2,
1,
3,
3,
0, /* fallback case */
0, /* fallback case */
0, /* fallback case */
0, /* fallback case */
3
};
static void intelDmaPrimitive( intelContextPtr intel, GLenum prim )
static void
intelDmaPrimitive(struct intel_context *intel, GLenum prim)
{
if (0) fprintf(stderr, "%s %s\n", __FUNCTION__, _mesa_lookup_enum_by_nr(prim));
if (0)
fprintf(stderr, "%s %s\n", __FUNCTION__, _mesa_lookup_enum_by_nr(prim));
INTEL_FIREVERTICES(intel);
intel->vtbl.reduced_primitive_state( intel, reduced_prim[prim] );
intelStartInlinePrimitive( intel, hw_prim[prim] );
intel->vtbl.reduced_primitive_state(intel, reduced_prim[prim]);
intelStartInlinePrimitive(intel, hw_prim[prim], INTEL_BATCH_CLIPRECTS);
}
#define LOCAL_VARS intelContextPtr intel = INTEL_CONTEXT(ctx)
#define LOCAL_VARS struct intel_context *intel = intel_context(ctx)
#define INIT( prim ) \
do { \
intelDmaPrimitive( intel, prim ); \
} while (0)
#define FLUSH() INTEL_FIREVERTICES( intel )
#define FLUSH() INTEL_FIREVERTICES(intel)
#define GET_SUBSEQUENT_VB_MAX_VERTS() \
(((intel->alloc.size / 2) - 1500) / (intel->vertex_size*4))
((BATCH_SZ - 1500) / (intel->vertex_size*4))
#define GET_CURRENT_VB_MAX_VERTS() GET_SUBSEQUENT_VB_MAX_VERTS()
#define ALLOC_VERTS( nr ) \
intelExtendInlinePrimitive( intel, (nr) * intel->vertex_size )
#define EMIT_VERTS( ctx, j, nr, buf ) \
_tnl_emit_vertices_to_buffer(ctx, j, (j)+(nr), buf )
_tnl_emit_vertices_to_buffer(ctx, j, (j)+(nr), buf )
#define TAG(x) intel_##x
#include "tnl_dd/t_dd_dmatmp.h"
/**********************************************************************/
/* Render pipeline stage */
/**********************************************************************/
/* Heuristic to choose between the two render paths:
*/
static GLboolean choose_render( intelContextPtr intel,
struct vertex_buffer *VB )
static GLboolean
choose_render(struct intel_context *intel, struct vertex_buffer *VB)
{
int vertsz = intel->vertex_size;
int cost_render = 0;
@@ -153,20 +156,20 @@ static GLboolean choose_render( intelContextPtr intel,
int nr_rverts = 0;
int rprim = intel->reduced_primitive;
int i = 0;
for (i = 0 ; i < VB->PrimitiveCount ; i++) {
for (i = 0; i < VB->PrimitiveCount; i++) {
GLuint prim = VB->Primitive[i].mode;
GLuint length = VB->Primitive[i].count;
if (!length)
continue;
continue;
nr_prims++;
nr_rverts += length * scale_prim[prim & PRIM_MODE_MASK];
if (reduced_prim[prim & PRIM_MODE_MASK] != rprim) {
nr_rprims++;
rprim = reduced_prim[prim & PRIM_MODE_MASK];
nr_rprims++;
rprim = reduced_prim[prim & PRIM_MODE_MASK];
}
}
@@ -177,64 +180,63 @@ static GLboolean choose_render( intelContextPtr intel,
/* One point for every 1024 dwords (4k) of dma:
*/
cost_render += (vertsz * i) / 1024;
cost_fallback += (vertsz * nr_rverts) / 1024;
cost_render += (vertsz * i) / 1024;
cost_fallback += (vertsz * nr_rverts) / 1024;
if (0)
fprintf(stderr, "cost render: %d fallback: %d\n",
cost_render, cost_fallback);
cost_render, cost_fallback);
if (cost_render > cost_fallback)
if (cost_render > cost_fallback)
return GL_FALSE;
return GL_TRUE;
}
static GLboolean intel_run_render( GLcontext *ctx,
struct tnl_pipeline_stage *stage )
static GLboolean
intel_run_render(GLcontext * ctx, struct tnl_pipeline_stage *stage)
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
TNLcontext *tnl = TNL_CONTEXT(ctx);
struct vertex_buffer *VB = &tnl->vb;
GLuint i;
/* Don't handle clipping or indexed vertices.
*/
if (intel->RenderIndex != 0 ||
!intel_validate_render( ctx, VB ) ||
!choose_render( intel, VB )) {
if (intel->RenderIndex != 0 ||
!intel_validate_render(ctx, VB) || !choose_render(intel, VB)) {
return GL_TRUE;
}
tnl->clipspace.new_inputs |= VERT_BIT_POS;
tnl->Driver.Render.Start( ctx );
for (i = 0 ; i < VB->PrimitiveCount ; i++)
{
tnl->Driver.Render.Start(ctx);
for (i = 0; i < VB->PrimitiveCount; i++) {
GLuint prim = VB->Primitive[i].mode;
GLuint start = VB->Primitive[i].start;
GLuint length = VB->Primitive[i].count;
if (!length)
continue;
continue;
intel_render_tab_verts[prim & PRIM_MODE_MASK]( ctx, start, start + length,
prim );
intel_render_tab_verts[prim & PRIM_MODE_MASK] (ctx, start,
start + length, prim);
}
tnl->Driver.Render.Finish( ctx );
return GL_FALSE; /* finished the pipe */
tnl->Driver.Render.Finish(ctx);
INTEL_FIREVERTICES(intel);
return GL_FALSE; /* finished the pipe */
}
const struct tnl_pipeline_stage _intel_render_stage =
{
const struct tnl_pipeline_stage _intel_render_stage = {
"intel render",
NULL,
NULL,
NULL,
NULL,
intel_run_render /* run */
intel_run_render /* run */
};

View File

@@ -15,11 +15,14 @@
void
matrix23Set(struct matrix23 *m,
int m00, int m01, int m02,
int m10, int m11, int m12)
int m00, int m01, int m02, int m10, int m11, int m12)
{
m->m00 = m00; m->m01 = m01; m->m02 = m02;
m->m10 = m10; m->m11 = m11; m->m12 = m12;
m->m00 = m00;
m->m01 = m01;
m->m02 = m02;
m->m10 = m10;
m->m11 = m11;
m->m12 = m12;
}
@@ -66,9 +69,9 @@ matrix23TransformDistance(const struct matrix23 *m, int *xDist, int *yDist)
*yDist = (y1 - y0) + (y2 - y0);
if (*xDist < 0)
*xDist = -*xDist;
*xDist = -*xDist;
if (*yDist < 0)
*yDist = -*yDist;
*yDist = -*yDist;
}
@@ -76,7 +79,8 @@ matrix23TransformDistance(const struct matrix23 *m, int *xDist, int *yDist)
* Transform the rect defined by (x, y, w, h) by m.
*/
void
matrix23TransformRect(const struct matrix23 *m, int *x, int *y, int *w, int *h)
matrix23TransformRect(const struct matrix23 *m, int *x, int *y, int *w,
int *h)
{
int x0 = *x, y0 = *y;
int x1 = *x + *w, y1 = *y;
@@ -108,16 +112,16 @@ matrix23Rotate(struct matrix23 *m, int width, int height, int angle)
matrix23Set(m, 1, 0, 0, 0, 1, 0);
break;
case 90:
matrix23Set(m, 0, 1, 0, -1, 0, width);
matrix23Set(m, 0, 1, 0, -1, 0, width);
break;
case 180:
matrix23Set(m, -1, 0, width, 0, -1, height);
matrix23Set(m, -1, 0, width, 0, -1, height);
break;
case 270:
matrix23Set(m, 0, -1, height, 1, 0, 0);
matrix23Set(m, 0, -1, height, 1, 0, 0);
break;
default:
/*abort()*/;
/*abort() */ ;
}
}
@@ -129,16 +133,24 @@ void
matrix23Flip(struct matrix23 *m, int width, int height, int xflip, int yflip)
{
if (xflip) {
m->m00 = -1; m->m01 = 0; m->m02 = width - 1;
m->m00 = -1;
m->m01 = 0;
m->m02 = width - 1;
}
else {
m->m00 = 1; m->m01 = 0; m->m02 = 0;
m->m00 = 1;
m->m01 = 0;
m->m02 = 0;
}
if (yflip) {
m->m10 = 0; m->m11 = -1; m->m12 = height - 1;
m->m10 = 0;
m->m11 = -1;
m->m12 = height - 1;
}
else {
m->m10 = 0; m->m11 = 1; m->m12 = 0;
m->m10 = 0;
m->m11 = 1;
m->m12 = 0;
}
}
@@ -169,14 +181,18 @@ main(int argc, char *argv[])
{
int width = 500, height = 400;
int rot;
int fx = 0, fy = 0; /* flip x and/or y ? */
int fx = 0, fy = 0; /* flip x and/or y ? */
int coords[4][2];
/* four corner coords to test with */
coords[0][0] = 0; coords[0][1] = 0;
coords[1][0] = width-1; coords[1][1] = 0;
coords[2][0] = width-1; coords[2][1] = height-1;
coords[3][0] = 0; coords[3][1] = height-1;
coords[0][0] = 0;
coords[0][1] = 0;
coords[1][0] = width - 1;
coords[1][1] = 0;
coords[2][0] = width - 1;
coords[2][1] = height - 1;
coords[3][0] = 0;
coords[3][1] = height - 1;
for (rot = 0; rot < 360; rot += 90) {

View File

@@ -11,11 +11,9 @@ struct matrix23
extern void
matrix23Set(struct matrix23 *m,
int m00, int m01, int m02,
int m10, int m11, int m12);
int m00, int m01, int m02, int m10, int m11, int m12);
extern void
matrix23TransformCoordi(const struct matrix23 *m, int *x, int *y);
extern void matrix23TransformCoordi(const struct matrix23 *m, int *x, int *y);
extern void
matrix23TransformCoordf(const struct matrix23 *m, float *x, float *y);

View File

@@ -38,46 +38,47 @@
#include "intel_screen.h"
#include "intel_buffers.h"
#include "intel_tex.h"
#include "intel_span.h"
#include "intel_tris.h"
#include "intel_ioctl.h"
#include "intel_fbo.h"
#include "i830_dri.h"
#include "dri_bufpool.h"
#include "intel_regions.h"
PUBLIC const char __driConfigOptions[] =
DRI_CONF_BEGIN
DRI_CONF_SECTION_PERFORMANCE
DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_DEF_INTERVAL_0)
DRI_CONF_SECTION_END
DRI_CONF_SECTION_QUALITY
DRI_CONF_FORCE_S3TC_ENABLE(false)
DRI_CONF_ALLOW_LARGE_TEXTURES(1)
DRI_CONF_SECTION_END
DRI_CONF_END;
const GLuint __driNConfigOptions = 4;
DRI_CONF_BEGIN DRI_CONF_SECTION_PERFORMANCE
DRI_CONF_FTHROTTLE_MODE(DRI_CONF_FTHROTTLE_IRQS)
DRI_CONF_VBLANK_MODE(DRI_CONF_VBLANK_DEF_INTERVAL_0)
DRI_CONF_SECTION_END DRI_CONF_SECTION_QUALITY
DRI_CONF_FORCE_S3TC_ENABLE(false)
DRI_CONF_ALLOW_LARGE_TEXTURES(1)
DRI_CONF_SECTION_END DRI_CONF_END;
const GLuint __driNConfigOptions = 4;
#ifdef USE_NEW_INTERFACE
static PFNGLXCREATECONTEXTMODES create_context_modes = NULL;
#endif /*USE_NEW_INTERFACE*/
static PFNGLXCREATECONTEXTMODES create_context_modes = NULL;
#endif /*USE_NEW_INTERFACE */
extern const struct dri_extension card_extensions[];
extern const struct dri_extension card_extensions[];
/**
* Map all the memory regions described by the screen.
* \return GL_TRUE if success, GL_FALSE if error.
*/
GLboolean
intelMapScreenRegions(__DRIscreenPrivate *sPriv)
intelMapScreenRegions(__DRIscreenPrivate * sPriv)
{
intelScreenPrivate *intelScreen = (intelScreenPrivate *)sPriv->private;
intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
if (intelScreen->front.handle) {
if (drmMap(sPriv->fd,
intelScreen->front.handle,
intelScreen->front.size,
(drmAddress *)&intelScreen->front.map) != 0) {
(drmAddress *) & intelScreen->front.map) != 0) {
_mesa_problem(NULL, "drmMap(frontbuffer) failed!");
return GL_FALSE;
}
@@ -86,42 +87,140 @@ intelMapScreenRegions(__DRIscreenPrivate *sPriv)
_mesa_warning(NULL, "no front buffer handle in intelMapScreenRegions!");
}
_mesa_printf("Back 0x%08x ", intelScreen->back.handle);
if (drmMap(sPriv->fd,
intelScreen->back.handle,
intelScreen->back.size,
(drmAddress *)&intelScreen->back.map) != 0) {
(drmAddress *) & intelScreen->back.map) != 0) {
intelUnmapScreenRegions(intelScreen);
return GL_FALSE;
}
_mesa_printf("Depth 0x%08x ", intelScreen->depth.handle);
if (drmMap(sPriv->fd,
intelScreen->depth.handle,
intelScreen->depth.size,
(drmAddress *)&intelScreen->depth.map) != 0) {
(drmAddress *) & intelScreen->depth.map) != 0) {
intelUnmapScreenRegions(intelScreen);
return GL_FALSE;
}
#if 0
_mesa_printf("TEX 0x%08x ", intelScreen->tex.handle);
if (drmMap(sPriv->fd,
intelScreen->tex.handle,
intelScreen->tex.size,
(drmAddress *)&intelScreen->tex.map) != 0) {
(drmAddress *) & intelScreen->tex.map) != 0) {
intelUnmapScreenRegions(intelScreen);
return GL_FALSE;
}
#endif
if (0)
printf("Mappings: front: %p back: %p depth: %p tex: %p\n",
intelScreen->front.map,
intelScreen->back.map,
intelScreen->depth.map,
intelScreen->tex.map);
intelScreen->front.map,
intelScreen->back.map,
intelScreen->depth.map, intelScreen->tex.map);
return GL_TRUE;
}
static struct intel_region *
intel_recreate_static(intelScreenPrivate *intelScreen,
struct intel_region *region,
GLuint mem_type,
GLuint offset,
void *virtual,
GLuint cpp, GLuint pitch, GLuint height)
{
if (region) {
intel_region_update_static(intelScreen, region, mem_type, offset,
virtual, cpp, pitch, height);
} else {
region = intel_region_create_static(intelScreen, mem_type, offset,
virtual, cpp, pitch, height);
}
return region;
}
/* Create intel_region structs to describe the static front,back,depth
* buffers created by the xserver.
*
* Although FBO's mean we now no longer use these as render targets in
* all circumstances, they won't go away until the back and depth
* buffers become private, and the front and rotated buffers will
* remain even then.
*
* Note that these don't allocate video memory, just describe
* allocations alread made by the X server.
*/
static void
intel_recreate_static_regions(intelScreenPrivate *intelScreen)
{
intelScreen->front_region =
intel_recreate_static(intelScreen,
intelScreen->front_region,
DRM_BO_FLAG_MEM_TT,
intelScreen->front.offset,
intelScreen->front.map,
intelScreen->cpp,
intelScreen->front.pitch / intelScreen->cpp,
intelScreen->height);
intelScreen->rotated_region =
intel_recreate_static(intelScreen,
intelScreen->rotated_region,
DRM_BO_FLAG_MEM_TT,
intelScreen->rotated.offset,
intelScreen->rotated.map,
intelScreen->cpp,
intelScreen->rotated.pitch /
intelScreen->cpp, intelScreen->height);
intelScreen->back_region =
intel_recreate_static(intelScreen,
intelScreen->back_region,
DRM_BO_FLAG_MEM_TT,
intelScreen->back.offset,
intelScreen->back.map,
intelScreen->cpp,
intelScreen->back.pitch / intelScreen->cpp,
intelScreen->height);
/* Still assuming front.cpp == depth.cpp
*/
intelScreen->depth_region =
intel_recreate_static(intelScreen,
intelScreen->depth_region,
DRM_BO_FLAG_MEM_TT,
intelScreen->depth.offset,
intelScreen->depth.map,
intelScreen->cpp,
intelScreen->depth.pitch / intelScreen->cpp,
intelScreen->height);
}
/**
* Use the information in the sarea to update the screen parameters
* related to screen rotation. Needs to be called locked.
*/
void
intelUnmapScreenRegions(intelScreenPrivate *intelScreen)
intelUpdateScreenRotation(__DRIscreenPrivate * sPriv, drmI830Sarea * sarea)
{
intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
intelUnmapScreenRegions(intelScreen);
intelUpdateScreenFromSAREA(intelScreen, sarea);
if (!intelMapScreenRegions(sPriv)) {
fprintf(stderr, "ERROR Remapping screen regions!!!\n");
}
intel_recreate_static_regions(intelScreen);
}
void
intelUnmapScreenRegions(intelScreenPrivate * intelScreen)
{
#define REALLY_UNMAP 1
if (intelScreen->front.map) {
@@ -154,9 +253,8 @@ intelUnmapScreenRegions(intelScreenPrivate *intelScreen)
static void
intelPrintDRIInfo(intelScreenPrivate *intelScreen,
__DRIscreenPrivate *sPriv,
I830DRIPtr gDRIPriv)
intelPrintDRIInfo(intelScreenPrivate * intelScreen,
__DRIscreenPrivate * sPriv, I830DRIPtr gDRIPriv)
{
fprintf(stderr, "*** Front size: 0x%x offset: 0x%x pitch: %d\n",
intelScreen->front.size, intelScreen->front.offset,
@@ -177,9 +275,10 @@ intelPrintDRIInfo(intelScreenPrivate *intelScreen,
static void
intelPrintSAREA(const drmI830Sarea *sarea)
intelPrintSAREA(const drmI830Sarea * sarea)
{
fprintf(stderr, "SAREA: sarea width %d height %d\n", sarea->width, sarea->height);
fprintf(stderr, "SAREA: sarea width %d height %d\n", sarea->width,
sarea->height);
fprintf(stderr, "SAREA: pitch: %d\n", sarea->pitch);
fprintf(stderr,
"SAREA: front offset: 0x%08x size: 0x%x handle: 0x%x\n",
@@ -193,8 +292,7 @@ intelPrintSAREA(const drmI830Sarea *sarea)
sarea->depth_offset, sarea->depth_size,
(unsigned) sarea->depth_handle);
fprintf(stderr, "SAREA: tex offset: 0x%08x size: 0x%x handle: 0x%x\n",
sarea->tex_offset, sarea->tex_size,
(unsigned) sarea->tex_handle);
sarea->tex_offset, sarea->tex_size, (unsigned) sarea->tex_handle);
fprintf(stderr, "SAREA: rotation: %d\n", sarea->rotation);
fprintf(stderr,
"SAREA: rotated offset: 0x%08x size: 0x%x\n",
@@ -208,8 +306,8 @@ intelPrintSAREA(const drmI830Sarea *sarea)
* information in the SAREA. This function updates those parameters.
*/
void
intelUpdateScreenFromSAREA(intelScreenPrivate *intelScreen,
drmI830Sarea *sarea)
intelUpdateScreenFromSAREA(intelScreenPrivate * intelScreen,
drmI830Sarea * sarea)
{
intelScreen->width = sarea->width;
intelScreen->height = sarea->height;
@@ -223,7 +321,7 @@ intelUpdateScreenFromSAREA(intelScreenPrivate *intelScreen,
intelScreen->back.pitch = sarea->pitch * intelScreen->cpp;
intelScreen->back.handle = sarea->back_handle;
intelScreen->back.size = sarea->back_size;
intelScreen->depth.offset = sarea->depth_offset;
intelScreen->depth.pitch = sarea->pitch * intelScreen->cpp;
intelScreen->depth.handle = sarea->depth_handle;
@@ -248,58 +346,87 @@ intelUpdateScreenFromSAREA(intelScreenPrivate *intelScreen,
}
static GLboolean intelInitDriver(__DRIscreenPrivate *sPriv)
static GLboolean
intelInitDriver(__DRIscreenPrivate * sPriv)
{
intelScreenPrivate *intelScreen;
I830DRIPtr gDRIPriv = (I830DRIPtr)sPriv->pDevPriv;
I830DRIPtr gDRIPriv = (I830DRIPtr) sPriv->pDevPriv;
drmI830Sarea *sarea;
PFNGLXSCRENABLEEXTENSIONPROC glx_enable_extension =
(PFNGLXSCRENABLEEXTENSIONPROC) (*dri_interface->getProcAddress("glxEnableExtension"));
void * const psc = sPriv->psc->screenConfigs;
(PFNGLXSCRENABLEEXTENSIONPROC) (*dri_interface->
getProcAddress("glxEnableExtension"));
void *const psc = sPriv->psc->screenConfigs;
if (sPriv->devPrivSize != sizeof(I830DRIRec)) {
fprintf(stderr,"\nERROR! sizeof(I830DRIRec) does not match passed size from device driver\n");
fprintf(stderr,
"\nERROR! sizeof(I830DRIRec) does not match passed size from device driver\n");
return GL_FALSE;
}
/* Allocate the private area */
intelScreen = (intelScreenPrivate *)CALLOC(sizeof(intelScreenPrivate));
intelScreen = (intelScreenPrivate *) CALLOC(sizeof(intelScreenPrivate));
if (!intelScreen) {
fprintf(stderr,"\nERROR! Allocating private area failed\n");
fprintf(stderr, "\nERROR! Allocating private area failed\n");
return GL_FALSE;
}
/* parse information in __driConfigOptions */
driParseOptionInfo (&intelScreen->optionCache,
__driConfigOptions, __driNConfigOptions);
driParseOptionInfo(&intelScreen->optionCache,
__driConfigOptions, __driNConfigOptions);
intelScreen->driScrnPriv = sPriv;
sPriv->private = (void *)intelScreen;
sPriv->private = (void *) intelScreen;
intelScreen->sarea_priv_offset = gDRIPriv->sarea_priv_offset;
sarea = (drmI830Sarea *)
(((GLubyte *)sPriv->pSAREA)+intelScreen->sarea_priv_offset);
(((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
intelScreen->deviceID = gDRIPriv->deviceID;
intelScreen->mem = gDRIPriv->mem;
intelScreen->cpp = gDRIPriv->cpp;
switch (gDRIPriv->bitsPerPixel) {
case 15: intelScreen->fbFormat = DV_PF_555; break;
case 16: intelScreen->fbFormat = DV_PF_565; break;
case 32: intelScreen->fbFormat = DV_PF_8888; break;
case 16:
intelScreen->fbFormat = DV_PF_565;
break;
case 32:
intelScreen->fbFormat = DV_PF_8888;
break;
default:
exit(1);
break;
}
intelUpdateScreenFromSAREA(intelScreen, sarea);
if (0)
intelPrintDRIInfo(intelScreen, sPriv, gDRIPriv);
if (!intelMapScreenRegions(sPriv)) {
fprintf(stderr,"\nERROR! mapping regions\n");
fprintf(stderr, "\nERROR! mapping regions\n");
_mesa_free(intelScreen);
sPriv->private = NULL;
return GL_FALSE;
}
#if 0
/*
* FIXME: Remove this code and its references.
*/
intelScreen->tex.offset = gDRIPriv->textureOffset;
intelScreen->logTextureGranularity = gDRIPriv->logTextureGranularity;
intelScreen->tex.handle = gDRIPriv->textures;
intelScreen->tex.size = gDRIPriv->textureSize;
#else
intelScreen->tex.offset = 0;
intelScreen->logTextureGranularity = 0;
intelScreen->tex.handle = 0;
intelScreen->tex.size = 0;
#endif
intelScreen->sarea_priv_offset = gDRIPriv->sarea_priv_offset;
if (1)
intelPrintDRIInfo(intelScreen, sPriv, gDRIPriv);
intelScreen->drmMinor = sPriv->drmMinor;
/* Determine if IRQs are active? */
@@ -310,11 +437,11 @@ static GLboolean intelInitDriver(__DRIscreenPrivate *sPriv)
gp.param = I830_PARAM_IRQ_ACTIVE;
gp.value = &intelScreen->irq_active;
ret = drmCommandWriteRead( sPriv->fd, DRM_I830_GETPARAM,
&gp, sizeof(gp));
ret = drmCommandWriteRead(sPriv->fd, DRM_I830_GETPARAM,
&gp, sizeof(gp));
if (ret) {
fprintf(stderr, "drmI830GetParam: %d\n", ret);
return GL_FALSE;
fprintf(stderr, "drmI830GetParam: %d\n", ret);
return GL_FALSE;
}
}
@@ -326,125 +453,141 @@ static GLboolean intelInitDriver(__DRIscreenPrivate *sPriv)
gp.param = I830_PARAM_ALLOW_BATCHBUFFER;
gp.value = &intelScreen->allow_batchbuffer;
ret = drmCommandWriteRead( sPriv->fd, DRM_I830_GETPARAM,
&gp, sizeof(gp));
ret = drmCommandWriteRead(sPriv->fd, DRM_I830_GETPARAM,
&gp, sizeof(gp));
if (ret) {
fprintf(stderr, "drmI830GetParam: (%d) %d\n", gp.param, ret);
return GL_FALSE;
fprintf(stderr, "drmI830GetParam: (%d) %d\n", gp.param, ret);
return GL_FALSE;
}
}
if (glx_enable_extension != NULL) {
(*glx_enable_extension)( psc, "GLX_SGI_swap_control" );
(*glx_enable_extension)( psc, "GLX_SGI_video_sync" );
(*glx_enable_extension)( psc, "GLX_MESA_swap_control" );
(*glx_enable_extension)( psc, "GLX_MESA_swap_frame_usage" );
(*glx_enable_extension)( psc, "GLX_SGI_make_current_read" );
(*glx_enable_extension)( psc, "GLX_MESA_allocate_memory" );
(*glx_enable_extension)( psc, "GLX_MESA_copy_sub_buffer" );
(*glx_enable_extension) (psc, "GLX_SGI_swap_control");
(*glx_enable_extension) (psc, "GLX_SGI_video_sync");
(*glx_enable_extension) (psc, "GLX_MESA_swap_control");
(*glx_enable_extension) (psc, "GLX_MESA_swap_frame_usage");
(*glx_enable_extension) (psc, "GLX_SGI_make_current_read");
}
sPriv->psc->allocateMemory = (void *) intelAllocateMemoryMESA;
sPriv->psc->freeMemory = (void *) intelFreeMemoryMESA;
sPriv->psc->memoryOffset = (void *) intelGetMemoryOffsetMESA;
intelScreen->regionPool = driDRMPoolInit(sPriv->fd);
if (!intelScreen->regionPool)
return GL_FALSE;
intelScreen->staticPool = driDRMStaticPoolInit(sPriv->fd);
if (!intelScreen->staticPool)
return GL_FALSE;
intelScreen->texPool = intelScreen->regionPool;
intelScreen->batchPool = driBatchPoolInit(sPriv->fd,
DRM_BO_FLAG_EXE |
DRM_BO_FLAG_MEM_TT |
DRM_BO_FLAG_MEM_LOCAL,
4096, 100, 5);
intel_recreate_static_regions(intelScreen);
return GL_TRUE;
}
static void intelDestroyScreen(__DRIscreenPrivate *sPriv)
static void
intelDestroyScreen(__DRIscreenPrivate * sPriv)
{
intelScreenPrivate *intelScreen = (intelScreenPrivate *)sPriv->private;
intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
intelUnmapScreenRegions(intelScreen);
driPoolTakeDown(intelScreen->regionPool);
driPoolTakeDown(intelScreen->staticPool);
driPoolTakeDown(intelScreen->batchPool);
FREE(intelScreen);
sPriv->private = NULL;
}
static GLboolean intelCreateBuffer( __DRIscreenPrivate *driScrnPriv,
__DRIdrawablePrivate *driDrawPriv,
const __GLcontextModes *mesaVis,
GLboolean isPixmap )
/**
* This is called when we need to set up GL rendering to a new X window.
*/
static GLboolean
intelCreateBuffer(__DRIscreenPrivate * driScrnPriv,
__DRIdrawablePrivate * driDrawPriv,
const __GLcontextModes * mesaVis, GLboolean isPixmap)
{
intelScreenPrivate *screen = (intelScreenPrivate *) driScrnPriv->private;
if (isPixmap) {
return GL_FALSE; /* not implemented */
} else {
GLboolean swStencil = (mesaVis->stencilBits > 0 &&
mesaVis->depthBits != 24);
return GL_FALSE; /* not implemented */
}
else {
GLboolean swStencil = (mesaVis->stencilBits > 0 &&
mesaVis->depthBits != 24);
GLenum rgbFormat = (mesaVis->redBits == 5 ? GL_RGB5 : GL_RGBA8);
struct gl_framebuffer *fb = _mesa_create_framebuffer(mesaVis);
/* setup the hardware-based renderbuffers */
{
driRenderbuffer *frontRb
= driNewRenderbuffer(GL_RGBA,
screen->front.map,
screen->cpp,
screen->front.offset, screen->front.pitch,
driDrawPriv);
intelSetSpanFunctions(frontRb, mesaVis);
struct intel_renderbuffer *frontRb
= intel_create_renderbuffer(rgbFormat,
screen->width, screen->height,
screen->front.offset,
screen->front.pitch,
screen->cpp,
screen->front.map);
intel_set_span_functions(&frontRb->Base);
_mesa_add_renderbuffer(fb, BUFFER_FRONT_LEFT, &frontRb->Base);
}
if (mesaVis->doubleBufferMode) {
driRenderbuffer *backRb
= driNewRenderbuffer(GL_RGBA,
screen->back.map,
screen->cpp,
screen->back.offset, screen->back.pitch,
driDrawPriv);
intelSetSpanFunctions(backRb, mesaVis);
struct intel_renderbuffer *backRb
= intel_create_renderbuffer(rgbFormat,
screen->width, screen->height,
screen->back.offset,
screen->back.pitch,
screen->cpp,
screen->back.map);
intel_set_span_functions(&backRb->Base);
_mesa_add_renderbuffer(fb, BUFFER_BACK_LEFT, &backRb->Base);
}
if (mesaVis->depthBits == 16) {
driRenderbuffer *depthRb
= driNewRenderbuffer(GL_DEPTH_COMPONENT16,
screen->depth.map,
screen->cpp,
screen->depth.offset, screen->depth.pitch,
driDrawPriv);
intelSetSpanFunctions(depthRb, mesaVis);
_mesa_add_renderbuffer(fb, BUFFER_DEPTH, &depthRb->Base);
if (mesaVis->depthBits == 24 && mesaVis->stencilBits == 8) {
/* combined depth/stencil buffer */
struct intel_renderbuffer *depthStencilRb
= intel_create_renderbuffer(GL_DEPTH24_STENCIL8_EXT,
screen->width, screen->height,
screen->depth.offset,
screen->depth.pitch,
screen->cpp, /* 4! */
screen->depth.map);
intel_set_span_functions(&depthStencilRb->Base);
/* note: bind RB to two attachment points */
_mesa_add_renderbuffer(fb, BUFFER_DEPTH, &depthStencilRb->Base);
_mesa_add_renderbuffer(fb, BUFFER_STENCIL, &depthStencilRb->Base);
}
else if (mesaVis->depthBits == 24) {
driRenderbuffer *depthRb
= driNewRenderbuffer(GL_DEPTH_COMPONENT24,
screen->depth.map,
screen->cpp,
screen->depth.offset, screen->depth.pitch,
driDrawPriv);
intelSetSpanFunctions(depthRb, mesaVis);
else if (mesaVis->depthBits == 16) {
/* just 16-bit depth buffer, no hw stencil */
struct intel_renderbuffer *depthRb
= intel_create_renderbuffer(GL_DEPTH_COMPONENT16,
screen->width, screen->height,
screen->depth.offset,
screen->depth.pitch,
screen->cpp, /* 2! */
screen->depth.map);
intel_set_span_functions(&depthRb->Base);
_mesa_add_renderbuffer(fb, BUFFER_DEPTH, &depthRb->Base);
}
if (mesaVis->stencilBits > 0 && !swStencil) {
driRenderbuffer *stencilRb
= driNewRenderbuffer(GL_STENCIL_INDEX8_EXT,
screen->depth.map,
screen->cpp,
screen->depth.offset, screen->depth.pitch,
driDrawPriv);
intelSetSpanFunctions(stencilRb, mesaVis);
_mesa_add_renderbuffer(fb, BUFFER_STENCIL, &stencilRb->Base);
}
_mesa_add_soft_renderbuffers(fb,
GL_FALSE, /* color */
GL_FALSE, /* depth */
swStencil,
mesaVis->accumRedBits > 0,
GL_FALSE, /* alpha */
GL_FALSE /* aux */);
/* now add any/all software-based renderbuffers we may need */
_mesa_add_soft_renderbuffers(fb, GL_FALSE, /* never sw color */
GL_FALSE, /* never sw depth */
swStencil, mesaVis->accumRedBits > 0, GL_FALSE, /* never sw alpha */
GL_FALSE /* never sw aux */ );
driDrawPriv->driverPrivate = (void *) fb;
return (driDrawPriv->driverPrivate != NULL);
}
}
static void intelDestroyBuffer(__DRIdrawablePrivate *driDrawPriv)
static void
intelDestroyBuffer(__DRIdrawablePrivate * driDrawPriv)
{
_mesa_destroy_framebuffer((GLframebuffer *) (driDrawPriv->driverPrivate));
}
@@ -454,13 +597,13 @@ static void intelDestroyBuffer(__DRIdrawablePrivate *driDrawPriv)
* Get information about previous buffer swaps.
*/
static int
intelGetSwapInfo( __DRIdrawablePrivate *dPriv, __DRIswapInfo * sInfo )
intelGetSwapInfo(__DRIdrawablePrivate * dPriv, __DRIswapInfo * sInfo)
{
intelContextPtr intel;
struct intel_context *intel;
if ( (dPriv == NULL) || (dPriv->driContextPriv == NULL)
|| (dPriv->driContextPriv->driverPrivate == NULL)
|| (sInfo == NULL) ) {
if ((dPriv == NULL) || (dPriv->driContextPriv == NULL)
|| (dPriv->driContextPriv->driverPrivate == NULL)
|| (sInfo == NULL)) {
return -1;
}
@@ -470,8 +613,8 @@ intelGetSwapInfo( __DRIdrawablePrivate *dPriv, __DRIswapInfo * sInfo )
sInfo->swap_missed_count = intel->swap_missed_count;
sInfo->swap_missed_usage = (sInfo->swap_missed_count != 0)
? driCalculateSwapUsage( dPriv, 0, intel->swap_missed_ust )
: 0.0;
? driCalculateSwapUsage(dPriv, 0, intel->swap_missed_ust)
: 0.0;
return 0;
}
@@ -481,39 +624,40 @@ intelGetSwapInfo( __DRIdrawablePrivate *dPriv, __DRIswapInfo * sInfo )
* init-designated function to register chipids and createcontext
* functions.
*/
extern GLboolean i830CreateContext( const __GLcontextModes *mesaVis,
__DRIcontextPrivate *driContextPriv,
void *sharedContextPrivate);
extern GLboolean i830CreateContext(const __GLcontextModes * mesaVis,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate);
extern GLboolean i915CreateContext( const __GLcontextModes *mesaVis,
__DRIcontextPrivate *driContextPriv,
void *sharedContextPrivate);
extern GLboolean i915CreateContext(const __GLcontextModes * mesaVis,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate);
static GLboolean intelCreateContext( const __GLcontextModes *mesaVis,
__DRIcontextPrivate *driContextPriv,
void *sharedContextPrivate)
static GLboolean
intelCreateContext(const __GLcontextModes * mesaVis,
__DRIcontextPrivate * driContextPriv,
void *sharedContextPrivate)
{
__DRIscreenPrivate *sPriv = driContextPriv->driScreenPriv;
intelScreenPrivate *intelScreen = (intelScreenPrivate *)sPriv->private;
intelScreenPrivate *intelScreen = (intelScreenPrivate *) sPriv->private;
switch (intelScreen->deviceID) {
/* Don't deal with i830 until texture work complete:
*/
case PCI_CHIP_845_G:
case PCI_CHIP_I830_M:
case PCI_CHIP_I855_GM:
case PCI_CHIP_I865_G:
return i830CreateContext( mesaVis, driContextPriv,
sharedContextPrivate );
return i830CreateContext(mesaVis, driContextPriv, sharedContextPrivate);
case PCI_CHIP_I915_G:
case PCI_CHIP_I915_GM:
case PCI_CHIP_I945_G:
case PCI_CHIP_I945_GM:
return i915CreateContext( mesaVis, driContextPriv,
sharedContextPrivate );
return i915CreateContext(mesaVis, driContextPriv, sharedContextPrivate);
default:
fprintf(stderr, "Unrecognized deviceID %x\n", intelScreen->deviceID);
return GL_FALSE;
@@ -522,30 +666,30 @@ static GLboolean intelCreateContext( const __GLcontextModes *mesaVis,
static const struct __DriverAPIRec intelAPI = {
.InitDriver = intelInitDriver,
.DestroyScreen = intelDestroyScreen,
.CreateContext = intelCreateContext,
.DestroyContext = intelDestroyContext,
.CreateBuffer = intelCreateBuffer,
.DestroyBuffer = intelDestroyBuffer,
.SwapBuffers = intelSwapBuffers,
.MakeCurrent = intelMakeCurrent,
.UnbindContext = intelUnbindContext,
.GetSwapInfo = intelGetSwapInfo,
.GetMSC = driGetMSC32,
.WaitForMSC = driWaitForMSC32,
.WaitForSBC = NULL,
.SwapBuffersMSC = NULL,
.CopySubBuffer = intelCopySubBuffer
.InitDriver = intelInitDriver,
.DestroyScreen = intelDestroyScreen,
.CreateContext = intelCreateContext,
.DestroyContext = intelDestroyContext,
.CreateBuffer = intelCreateBuffer,
.DestroyBuffer = intelDestroyBuffer,
.SwapBuffers = intelSwapBuffers,
.MakeCurrent = intelMakeCurrent,
.UnbindContext = intelUnbindContext,
.GetSwapInfo = intelGetSwapInfo,
.GetMSC = driGetMSC32,
.WaitForMSC = driWaitForMSC32,
.WaitForSBC = NULL,
.SwapBuffersMSC = NULL,
.CopySubBuffer = intelCopySubBuffer
};
static __GLcontextModes *
intelFillInModes( unsigned pixel_bits, unsigned depth_bits,
unsigned stencil_bits, GLboolean have_back_buffer )
intelFillInModes(unsigned pixel_bits, unsigned depth_bits,
unsigned stencil_bits, GLboolean have_back_buffer)
{
__GLcontextModes * modes;
__GLcontextModes * m;
__GLcontextModes *modes;
__GLcontextModes *m;
unsigned num_modes;
unsigned depth_buffer_factor;
unsigned back_buffer_factor;
@@ -576,43 +720,45 @@ intelFillInModes( unsigned pixel_bits, unsigned depth_bits,
stencil_bits_array[2] = (stencil_bits == 0) ? 8 : stencil_bits;
depth_buffer_factor = ((depth_bits != 0) || (stencil_bits != 0)) ? 3 : 1;
back_buffer_factor = (have_back_buffer) ? 3 : 1;
back_buffer_factor = (have_back_buffer) ? 3 : 1;
num_modes = depth_buffer_factor * back_buffer_factor * 4;
if ( pixel_bits == 16 ) {
fb_format = GL_RGB;
fb_type = GL_UNSIGNED_SHORT_5_6_5;
}
else {
fb_format = GL_BGRA;
fb_type = GL_UNSIGNED_INT_8_8_8_8_REV;
}
modes = (*dri_interface->createContextModes)( num_modes, sizeof( __GLcontextModes ) );
m = modes;
if ( ! driFillInModes( & m, fb_format, fb_type,
depth_bits_array, stencil_bits_array, depth_buffer_factor,
back_buffer_modes, back_buffer_factor,
GLX_TRUE_COLOR ) ) {
fprintf( stderr, "[%s:%u] Error creating FBConfig!\n",
__func__, __LINE__ );
return NULL;
if (pixel_bits == 16) {
fb_format = GL_RGB;
fb_type = GL_UNSIGNED_SHORT_5_6_5;
}
if ( ! driFillInModes( & m, fb_format, fb_type,
depth_bits_array, stencil_bits_array, depth_buffer_factor,
back_buffer_modes, back_buffer_factor,
GLX_DIRECT_COLOR ) ) {
fprintf( stderr, "[%s:%u] Error creating FBConfig!\n",
__func__, __LINE__ );
return NULL;
else {
fb_format = GL_BGRA;
fb_type = GL_UNSIGNED_INT_8_8_8_8_REV;
}
modes =
(*dri_interface->createContextModes) (num_modes,
sizeof(__GLcontextModes));
m = modes;
if (!driFillInModes(&m, fb_format, fb_type,
depth_bits_array, stencil_bits_array,
depth_buffer_factor, back_buffer_modes,
back_buffer_factor, GLX_TRUE_COLOR)) {
fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
__LINE__);
return NULL;
}
if (!driFillInModes(&m, fb_format, fb_type,
depth_bits_array, stencil_bits_array,
depth_buffer_factor, back_buffer_modes,
back_buffer_factor, GLX_DIRECT_COLOR)) {
fprintf(stderr, "[%s:%u] Error creating FBConfig!\n", __func__,
__LINE__);
return NULL;
}
/* Mark the visual as slow if there are "fake" stencil bits.
*/
for ( m = modes ; m != NULL ; m = m->next ) {
if ( (m->stencilBits != 0) && (m->stencilBits != stencil_bits) ) {
m->visualRating = GLX_SLOW_CONFIG;
for (m = modes; m != NULL; m = m->next) {
if ((m->stencilBits != 0) && (m->stencilBits != stencil_bits)) {
m->visualRating = GLX_SLOW_CONFIG;
}
}
@@ -630,43 +776,42 @@ intelFillInModes( unsigned pixel_bits, unsigned depth_bits,
* \return A pointer to a \c __DRIscreenPrivate on success, or \c NULL on
* failure.
*/
PUBLIC
void * __driCreateNewScreen_20050727( __DRInativeDisplay *dpy, int scrn, __DRIscreen *psc,
const __GLcontextModes * modes,
const __DRIversion * ddx_version,
const __DRIversion * dri_version,
const __DRIversion * drm_version,
const __DRIframebuffer * frame_buffer,
drmAddress pSAREA, int fd,
int internal_api_version,
const __DRIinterfaceMethods * interface,
__GLcontextModes ** driver_modes )
PUBLIC void *
__driCreateNewScreen_20050727(__DRInativeDisplay * dpy, int scrn,
__DRIscreen * psc,
const __GLcontextModes * modes,
const __DRIversion * ddx_version,
const __DRIversion * dri_version,
const __DRIversion * drm_version,
const __DRIframebuffer * frame_buffer,
drmAddress pSAREA, int fd,
int internal_api_version,
const __DRIinterfaceMethods * interface,
__GLcontextModes ** driver_modes)
{
__DRIscreenPrivate *psp;
static const __DRIversion ddx_expected = { 1, 5, 0 };
static const __DRIversion dri_expected = { 4, 0, 0 };
static const __DRIversion drm_expected = { 1, 4, 0 };
static const __DRIversion drm_expected = { 1, 6, 0 };
dri_interface = interface;
if ( ! driCheckDriDdxDrmVersions2( "i915",
dri_version, & dri_expected,
ddx_version, & ddx_expected,
drm_version, & drm_expected ) ) {
if (!driCheckDriDdxDrmVersions2("i915",
dri_version, &dri_expected,
ddx_version, &ddx_expected,
drm_version, &drm_expected)) {
return NULL;
}
psp = __driUtilCreateNewScreen(dpy, scrn, psc, NULL,
ddx_version, dri_version, drm_version,
frame_buffer, pSAREA, fd,
internal_api_version, &intelAPI);
if ( psp != NULL ) {
ddx_version, dri_version, drm_version,
frame_buffer, pSAREA, fd,
internal_api_version, &intelAPI);
if (psp != NULL) {
I830DRIPtr dri_priv = (I830DRIPtr) psp->pDevPriv;
*driver_modes = intelFillInModes( dri_priv->cpp * 8,
(dri_priv->cpp == 2) ? 16 : 24,
(dri_priv->cpp == 2) ? 0 : 8,
1 );
*driver_modes = intelFillInModes(dri_priv->cpp * 8,
(dri_priv->cpp == 2) ? 16 : 24,
(dri_priv->cpp == 2) ? 0 : 8, 1);
/* Calling driInitExtensions here, with a NULL context pointer, does not actually
* enable the extensions. It just makes sure that all the dispatch offsets for all
@@ -676,8 +821,24 @@ void * __driCreateNewScreen_20050727( __DRInativeDisplay *dpy, int scrn, __DRIsc
*
* Hello chicken. Hello egg. How are you two today?
*/
driInitExtensions( NULL, card_extensions, GL_FALSE );
driInitExtensions(NULL, card_extensions, GL_FALSE);
}
return (void *) psp;
}
struct intel_context *intelScreenContext(intelScreenPrivate *intelScreen)
{
/*
* This should probably change to have the screen allocate a dummy
* context at screen creation. For now just use the current context.
*/
GET_CURRENT_CONTEXT(ctx);
if (ctx == NULL) {
_mesa_problem(NULL, "No current context in intelScreenContext\n");
return NULL;
}
return intel_context(ctx);
}

View File

@@ -29,39 +29,48 @@
#define _INTEL_INIT_H_
#include <sys/time.h>
#include "xmlconfig.h"
#include "dri_util.h"
#include "intel_rotate.h"
#include "i830_common.h"
#include "xmlconfig.h"
#include "dri_bufpool.h"
/* This roughly corresponds to a gl_renderbuffer (Mesa 6.4) */
typedef struct {
/* XXX: change name or eliminate to avoid conflict with "struct
* intel_region"!!!
*/
typedef struct
{
drm_handle_t handle;
drmSize size; /* region size in bytes */
char *map; /* memory map */
int offset; /* from start of video mem, in bytes */
int pitch; /* row stride, in bytes */
drmSize size; /* region size in bytes */
char *map; /* memory map */
int offset; /* from start of video mem, in bytes */
int pitch; /* row stride, in bytes */
} intelRegion;
typedef struct
typedef struct
{
intelRegion front;
intelRegion back;
intelRegion rotated;
intelRegion depth;
intelRegion tex;
struct intel_region *front_region;
struct intel_region *back_region;
struct intel_region *depth_region;
struct intel_region *rotated_region;
int deviceID;
int width;
int height;
int mem; /* unused */
int cpp; /* for front and back buffers */
int fbFormat;
int mem; /* unused */
int cpp; /* for front and back buffers */
/* int bitsPerPixel; */
int fbFormat; /* XXX FBO: this is obsolete - remove after i830 updates */
int logTextureGranularity;
__DRIscreenPrivate *driScrnPriv;
unsigned int sarea_priv_offset;
@@ -72,41 +81,51 @@ typedef struct
struct matrix23 rotMatrix;
int current_rotation; /* 0, 90, 180 or 270 */
int current_rotation; /* 0, 90, 180 or 270 */
int rotatedWidth, rotatedHeight;
/**
* Configuration cache with default values for all contexts
*/
driOptionCache optionCache;
struct _DriBufferPool *batchPool;
struct _DriBufferPool *texPool;
struct _DriBufferPool *regionPool;
struct _DriBufferPool *staticPool;
} intelScreenPrivate;
extern GLboolean
intelMapScreenRegions(__DRIscreenPrivate *sPriv);
extern GLboolean intelMapScreenRegions(__DRIscreenPrivate * sPriv);
extern void intelUnmapScreenRegions(intelScreenPrivate * intelScreen);
extern void
intelUnmapScreenRegions(intelScreenPrivate *intelScreen);
intelUpdateScreenFromSAREA(intelScreenPrivate * intelScreen,
drmI830Sarea * sarea);
extern void
intelUpdateScreenFromSAREA(intelScreenPrivate *intelScreen,
drmI830Sarea *sarea);
extern void intelDestroyContext(__DRIcontextPrivate * driContextPriv);
extern void
intelDestroyContext(__DRIcontextPrivate *driContextPriv);
extern GLboolean intelUnbindContext(__DRIcontextPrivate * driContextPriv);
extern GLboolean
intelUnbindContext(__DRIcontextPrivate *driContextPriv);
intelMakeCurrent(__DRIcontextPrivate * driContextPriv,
__DRIdrawablePrivate * driDrawPriv,
__DRIdrawablePrivate * driReadPriv);
extern GLboolean
intelMakeCurrent(__DRIcontextPrivate *driContextPriv,
__DRIdrawablePrivate *driDrawPriv,
__DRIdrawablePrivate *driReadPriv);
extern void intelSwapBuffers(__DRIdrawablePrivate * dPriv);
extern void
intelSwapBuffers(__DRIdrawablePrivate *dPriv);
intelCopySubBuffer(__DRIdrawablePrivate * dPriv, int x, int y, int w, int h);
extern struct _DriBufferPool *driBatchPoolInit(int fd, unsigned flags,
unsigned long bufSize,
unsigned numBufs,
unsigned checkDelayed);
extern struct intel_context *intelScreenContext(intelScreenPrivate *intelScreen);
extern void
intelCopySubBuffer( __DRIdrawablePrivate *dPriv, int x, int y, int w, int h );
intelUpdateScreenRotation(__DRIscreenPrivate * sPriv, drmI830Sarea * sarea);
#endif

View File

@@ -30,229 +30,380 @@
#include "mtypes.h"
#include "colormac.h"
#include "intel_fbo.h"
#include "intel_screen.h"
#include "intel_span.h"
#include "intel_regions.h"
#include "intel_ioctl.h"
#include "intel_tex.h"
#include "swrast/swrast.h"
/*
break intelWriteRGBASpan_ARGB8888
*/
#undef DBG
#define DBG 0
#define LOCAL_VARS \
intelContextPtr intel = INTEL_CONTEXT(ctx); \
__DRIdrawablePrivate *dPriv = intel->driDrawable; \
driRenderbuffer *drb = (driRenderbuffer *) rb; \
GLuint pitch = drb->pitch; \
GLuint height = dPriv->h; \
char *buf = (char *) drb->Base.Data + \
dPriv->x * drb->cpp + \
dPriv->y * pitch; \
GLushort p; \
(void) buf; (void) p
#define LOCAL_VARS \
struct intel_context *intel = intel_context(ctx); \
struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
const GLint yScale = irb->RenderToTexture ? 1 : -1; \
const GLint yBias = irb->RenderToTexture ? 0 : irb->Base.Height - 1; \
GLubyte *buf = (GLubyte *) irb->pfMap \
+ (intel->drawY * irb->pfPitch + intel->drawX) * irb->region->cpp;\
GLuint p; \
assert(irb->pfMap);\
(void) p;
#define LOCAL_DEPTH_VARS \
intelContextPtr intel = INTEL_CONTEXT(ctx); \
__DRIdrawablePrivate *dPriv = intel->driDrawable; \
driRenderbuffer *drb = (driRenderbuffer *) rb; \
GLuint pitch = drb->pitch; \
GLuint height = dPriv->h; \
char *buf = (char *) drb->Base.Data + \
dPriv->x * drb->cpp + \
dPriv->y * pitch
/* XXX FBO: this is identical to the macro in spantmp2.h except we get
* the cliprect info from the context, not the driDrawable.
* Move this into spantmp2.h someday.
*/
#define HW_CLIPLOOP() \
do { \
int _nc = intel->numClipRects; \
while ( _nc-- ) { \
int minx = intel->pClipRects[_nc].x1 - intel->drawX; \
int miny = intel->pClipRects[_nc].y1 - intel->drawY; \
int maxx = intel->pClipRects[_nc].x2 - intel->drawX; \
int maxy = intel->pClipRects[_nc].y2 - intel->drawY;
#define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
#define INIT_MONO_PIXEL(p,color)\
p = INTEL_PACKCOLOR565(color[0],color[1],color[2])
#define Y_FLIP(_y) (height - _y - 1)
#define Y_FLIP(_y) ((_y) * yScale + yBias)
#define HW_LOCK()
#define HW_UNLOCK()
/* 16 bit, 565 rgb color spanline and pixel functions
/* 16 bit, RGB565 color spanline and pixel functions
*/
#define WRITE_RGBA( _x, _y, r, g, b, a ) \
*(GLushort *)(buf + _x*2 + _y*pitch) = ( (((int)r & 0xf8) << 8) | \
(((int)g & 0xfc) << 3) | \
(((int)b & 0xf8) >> 3))
#define WRITE_PIXEL( _x, _y, p ) \
*(GLushort *)(buf + _x*2 + _y*pitch) = p
#define SPANTMP_PIXEL_FMT GL_RGB
#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_SHORT_5_6_5
#define READ_RGBA( rgba, _x, _y ) \
do { \
GLushort p = *(GLushort *)(buf + _x*2 + _y*pitch); \
rgba[0] = (((p >> 11) & 0x1f) * 255) / 31; \
rgba[1] = (((p >> 5) & 0x3f) * 255) / 63; \
rgba[2] = (((p >> 0) & 0x1f) * 255) / 31; \
rgba[3] = 255; \
} while(0)
#define TAG(x) intel##x##_RGB565
#define TAG2(x,y) intel##x##_RGB565##y
#define GET_PTR(X,Y) (buf + ((Y) * irb->pfPitch + (X)) * 2)
#include "spantmp2.h"
#define TAG(x) intel##x##_565
#include "spantmp.h"
/* 15 bit, 555 rgb color spanline and pixel functions
/* 32 bit, ARGB8888 color spanline and pixel functions
*/
#define WRITE_RGBA( _x, _y, r, g, b, a ) \
*(GLushort *)(buf + _x*2 + _y*pitch) = (((r & 0xf8) << 7) | \
((g & 0xf8) << 3) | \
((b & 0xf8) >> 3))
#define SPANTMP_PIXEL_FMT GL_BGRA
#define SPANTMP_PIXEL_TYPE GL_UNSIGNED_INT_8_8_8_8_REV
#define WRITE_PIXEL( _x, _y, p ) \
*(GLushort *)(buf + _x*2 + _y*pitch) = p
#define TAG(x) intel##x##_ARGB8888
#define TAG2(x,y) intel##x##_ARGB8888##y
#define GET_PTR(X,Y) (buf + ((Y) * irb->pfPitch + (X)) * 4)
#include "spantmp2.h"
#define READ_RGBA( rgba, _x, _y ) \
do { \
GLushort p = *(GLushort *)(buf + _x*2 + _y*pitch); \
rgba[0] = (p >> 7) & 0xf8; \
rgba[1] = (p >> 3) & 0xf8; \
rgba[2] = (p << 3) & 0xf8; \
rgba[3] = 255; \
} while(0)
#define TAG(x) intel##x##_555
#include "spantmp.h"
#define LOCAL_DEPTH_VARS \
struct intel_context *intel = intel_context(ctx); \
struct intel_renderbuffer *irb = intel_renderbuffer(rb); \
const GLuint pitch = irb->pfPitch/***XXX region->pitch*/; /* in pixels */ \
const GLint yScale = irb->RenderToTexture ? 1 : -1; \
const GLint yBias = irb->RenderToTexture ? 0 : irb->Base.Height - 1; \
char *buf = (char *) irb->pfMap/*XXX use region->map*/ + \
(intel->drawY * pitch + intel->drawX) * irb->region->cpp;
/* 16 bit depthbuffer functions.
*/
#define LOCAL_STENCIL_VARS LOCAL_DEPTH_VARS
/**
** 16-bit depthbuffer functions.
**/
#define WRITE_DEPTH( _x, _y, d ) \
*(GLushort *)(buf + (_x)*2 + (_y)*pitch) = d;
((GLushort *)buf)[(_x) + (_y) * pitch] = d;
#define READ_DEPTH( d, _x, _y ) \
d = *(GLushort *)(buf + (_x)*2 + (_y)*pitch);
d = ((GLushort *)buf)[(_x) + (_y) * pitch];
#define TAG(x) intel##x##_z16
#include "depthtmp.h"
#undef LOCAL_VARS
#define LOCAL_VARS \
intelContextPtr intel = INTEL_CONTEXT(ctx); \
__DRIdrawablePrivate *dPriv = intel->driDrawable; \
driRenderbuffer *drb = (driRenderbuffer *) rb; \
GLuint pitch = drb->pitch; \
GLuint height = dPriv->h; \
char *buf = (char *)drb->Base.Data + \
dPriv->x * drb->cpp + \
dPriv->y * pitch; \
GLuint p; \
(void) buf; (void) p
#undef INIT_MONO_PIXEL
#define INIT_MONO_PIXEL(p,color)\
p = INTEL_PACKCOLOR8888(color[0],color[1],color[2],color[3])
/* 32 bit, 8888 argb color spanline and pixel functions
*/
#define WRITE_RGBA(_x, _y, r, g, b, a) \
*(GLuint *)(buf + _x*4 + _y*pitch) = ((r << 16) | \
(g << 8) | \
(b << 0) | \
(a << 24) )
#define WRITE_PIXEL(_x, _y, p) \
*(GLuint *)(buf + _x*4 + _y*pitch) = p
#define READ_RGBA(rgba, _x, _y) \
do { \
GLuint p = *(GLuint *)(buf + _x*4 + _y*pitch); \
rgba[0] = (p >> 16) & 0xff; \
rgba[1] = (p >> 8) & 0xff; \
rgba[2] = (p >> 0) & 0xff; \
rgba[3] = (p >> 24) & 0xff; \
} while (0)
#define TAG(x) intel##x##_8888
#include "spantmp.h"
/* 24/8 bit interleaved depth/stencil functions
*/
#define WRITE_DEPTH( _x, _y, d ) { \
GLuint tmp = *(GLuint *)(buf + (_x)*4 + (_y)*pitch); \
tmp &= 0xff000000; \
tmp |= (d) & 0xffffff; \
*(GLuint *)(buf + (_x)*4 + (_y)*pitch) = tmp; \
/**
** 24/8-bit interleaved depth/stencil functions
** Note: we're actually reading back combined depth+stencil values.
** The wrappers in main/depthstencil.c are used to extract the depth
** and stencil values.
**/
/* Change ZZZS -> SZZZ */
#define WRITE_DEPTH( _x, _y, d ) { \
GLuint tmp = ((d) >> 8) | ((d) << 24); \
((GLuint *)buf)[(_x) + (_y) * pitch] = tmp; \
}
#define READ_DEPTH( d, _x, _y ) \
d = *(GLuint *)(buf + (_x)*4 + (_y)*pitch) & 0xffffff;
/* Change SZZZ -> ZZZS */
#define READ_DEPTH( d, _x, _y ) { \
GLuint tmp = ((GLuint *)buf)[(_x) + (_y) * pitch]; \
d = (tmp << 8) | (tmp >> 24); \
}
#define TAG(x) intel##x##_z24_s8
#include "depthtmp.h"
#define WRITE_STENCIL( _x, _y, d ) { \
GLuint tmp = *(GLuint *)(buf + (_x)*4 + (_y)*pitch); \
tmp &= 0xffffff; \
tmp |= ((d)<<24); \
*(GLuint *)(buf + (_x)*4 + (_y)*pitch) = tmp; \
/**
** 8-bit stencil function (XXX FBO: This is obsolete)
**/
#define WRITE_STENCIL( _x, _y, d ) { \
GLuint tmp = ((GLuint *)buf)[(_x) + (_y) * pitch]; \
tmp &= 0xffffff; \
tmp |= ((d) << 24); \
((GLuint *) buf)[(_x) + (_y) * pitch] = tmp; \
}
#define READ_STENCIL( d, _x, _y ) \
d = *(GLuint *)(buf + (_x)*4 + (_y)*pitch) >> 24;
#define READ_STENCIL( d, _x, _y ) \
d = ((GLuint *)buf)[(_x) + (_y) * pitch] >> 24;
#define TAG(x) intel##x##_z24_s8
#include "stenciltmp.h"
/* Move locking out to get reasonable span performance.
/**
* Map or unmap all the renderbuffers which we may need during
* software rendering.
* XXX in the future, we could probably convey extra information to
* reduce the number of mappings needed. I.e. if doing a glReadPixels
* from the depth buffer, we really only need one mapping.
*
* XXX Rewrite this function someday.
* We can probably just loop over all the renderbuffer attachments,
* map/unmap all of them, and not worry about the _ColorDrawBuffers
* _ColorReadBuffer, _DepthBuffer or _StencilBuffer fields.
*/
void intelSpanRenderStart( GLcontext *ctx )
static void
intel_map_unmap_buffers(struct intel_context *intel, GLboolean map)
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
GLcontext *ctx = &intel->ctx;
GLuint i, j;
struct intel_renderbuffer *irb;
intelFlush(&intel->ctx);
/* color draw buffers */
for (i = 0; i < ctx->Const.MaxDrawBuffers; i++) {
for (j = 0; j < ctx->DrawBuffer->_NumColorDrawBuffers[i]; j++) {
struct gl_renderbuffer *rb =
ctx->DrawBuffer->_ColorDrawBuffers[i][j];
irb = intel_renderbuffer(rb);
if (irb) {
/* this is a user-created intel_renderbuffer */
if (irb->region) {
if (map)
intel_region_map(intel->intelScreen, irb->region);
else
intel_region_unmap(intel->intelScreen, irb->region);
}
irb->pfMap = irb->region->map;
irb->pfPitch = irb->region->pitch;
}
}
}
/* check for render to textures */
for (i = 0; i < BUFFER_COUNT; i++) {
struct gl_renderbuffer_attachment *att =
ctx->DrawBuffer->Attachment + i;
struct gl_texture_object *tex = att->Texture;
if (tex) {
/* render to texture */
ASSERT(att->Renderbuffer);
if (map) {
struct gl_texture_image *texImg;
texImg = tex->Image[att->CubeMapFace][att->TextureLevel];
intel_tex_map_images(intel, intel_texture_object(tex));
}
else {
intel_tex_unmap_images(intel, intel_texture_object(tex));
}
}
}
/* color read buffers */
irb = intel_renderbuffer(ctx->ReadBuffer->_ColorReadBuffer);
if (irb && irb->region) {
if (map)
intel_region_map(intel->intelScreen, irb->region);
else
intel_region_unmap(intel->intelScreen, irb->region);
irb->pfMap = irb->region->map;
irb->pfPitch = irb->region->pitch;
}
/* Account for front/back color page flipping.
* The span routines use the pfMap and pfPitch fields which will
* swap the front/back region map/pitch if we're page flipped.
* Do this after mapping, above, so the map field is valid.
*/
#if 0
if (map && ctx->DrawBuffer->Name == 0) {
struct intel_renderbuffer *irbFront
= intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_FRONT_LEFT);
struct intel_renderbuffer *irbBack
= intel_get_renderbuffer(ctx->DrawBuffer, BUFFER_BACK_LEFT);
if (irbBack) {
/* double buffered */
if (intel->sarea->pf_current_page == 0) {
irbFront->pfMap = irbFront->region->map;
irbFront->pfPitch = irbFront->region->pitch;
irbBack->pfMap = irbBack->region->map;
irbBack->pfPitch = irbBack->region->pitch;
}
else {
irbFront->pfMap = irbBack->region->map;
irbFront->pfPitch = irbBack->region->pitch;
irbBack->pfMap = irbFront->region->map;
irbBack->pfPitch = irbFront->region->pitch;
}
}
}
#endif
/* depth buffer (Note wrapper!) */
if (ctx->DrawBuffer->_DepthBuffer) {
irb = intel_renderbuffer(ctx->DrawBuffer->_DepthBuffer->Wrapped);
if (irb && irb->region && irb->Base.Name != 0) {
if (map) {
intel_region_map(intel->intelScreen, irb->region);
irb->pfMap = irb->region->map;
irb->pfPitch = irb->region->pitch;
}
else {
intel_region_unmap(intel->intelScreen, irb->region);
irb->pfMap = NULL;
irb->pfPitch = 0;
}
}
}
/* stencil buffer (Note wrapper!) */
if (ctx->DrawBuffer->_StencilBuffer) {
irb = intel_renderbuffer(ctx->DrawBuffer->_StencilBuffer->Wrapped);
if (irb && irb->region && irb->Base.Name != 0) {
if (map) {
intel_region_map(intel->intelScreen, irb->region);
irb->pfMap = irb->region->map;
irb->pfPitch = irb->region->pitch;
}
else {
intel_region_unmap(intel->intelScreen, irb->region);
irb->pfMap = NULL;
irb->pfPitch = 0;
}
}
}
}
/**
* Prepare for softare rendering. Map current read/draw framebuffers'
* renderbuffes and all currently bound texture objects.
*
* Old note: Moved locking out to get reasonable span performance.
*/
void
intelSpanRenderStart(GLcontext * ctx)
{
struct intel_context *intel = intel_context(ctx);
GLuint i;
intelFinish(&intel->ctx);
LOCK_HARDWARE(intel);
intelWaitForIdle(intel);
#if 0
/* Just map the framebuffer and all textures. Bufmgr code will
* take care of waiting on the necessary fences:
*/
intel_region_map(intel->intelScreen, intel->front_region);
intel_region_map(intel->intelScreen, intel->back_region);
intel_region_map(intel->intelScreen, intel->intelScreen->depth_region);
#endif
for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
if (ctx->Texture.Unit[i]._ReallyEnabled) {
struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
intel_tex_map_images(intel, intel_texture_object(texObj));
}
}
intel_map_unmap_buffers(intel, GL_TRUE);
}
void intelSpanRenderFinish( GLcontext *ctx )
/**
* Called when done softare rendering. Unmap the buffers we mapped in
* the above function.
*/
void
intelSpanRenderFinish(GLcontext * ctx)
{
intelContextPtr intel = INTEL_CONTEXT( ctx );
_swrast_flush( ctx );
UNLOCK_HARDWARE( intel );
struct intel_context *intel = intel_context(ctx);
GLuint i;
_swrast_flush(ctx);
/* Now unmap the framebuffer:
*/
#if 0
intel_region_unmap(intel, intel->front_region);
intel_region_unmap(intel, intel->back_region);
intel_region_unmap(intel, intel->intelScreen->depth_region);
#endif
for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
if (ctx->Texture.Unit[i]._ReallyEnabled) {
struct gl_texture_object *texObj = ctx->Texture.Unit[i]._Current;
intel_tex_unmap_images(intel, intel_texture_object(texObj));
}
}
intel_map_unmap_buffers(intel, GL_FALSE);
UNLOCK_HARDWARE(intel);
}
void intelInitSpanFuncs( GLcontext *ctx )
void
intelInitSpanFuncs(GLcontext * ctx)
{
struct swrast_device_driver *swdd = _swrast_GetDeviceDriverReference(ctx);
swdd->SpanRenderStart = intelSpanRenderStart;
swdd->SpanRenderFinish = intelSpanRenderFinish;
swdd->SpanRenderFinish = intelSpanRenderFinish;
}
/**
* Plug in the Get/Put routines for the given driRenderbuffer.
* Plug in appropriate span read/write functions for the given renderbuffer.
* These are used for the software fallbacks.
*/
void
intelSetSpanFunctions(driRenderbuffer *drb, const GLvisual *vis)
intel_set_span_functions(struct gl_renderbuffer *rb)
{
if (drb->Base.InternalFormat == GL_RGBA) {
if (vis->redBits == 5 && vis->greenBits == 5 && vis->blueBits == 5) {
intelInitPointers_555(&drb->Base);
}
else if (vis->redBits == 5 && vis->greenBits == 6 && vis->blueBits == 5) {
intelInitPointers_565(&drb->Base);
}
else {
assert(vis->redBits == 8);
assert(vis->greenBits == 8);
assert(vis->blueBits == 8);
intelInitPointers_8888(&drb->Base);
}
if (rb->_ActualFormat == GL_RGB5) {
/* 565 RGB */
intelInitPointers_RGB565(rb);
}
else if (drb->Base.InternalFormat == GL_DEPTH_COMPONENT16) {
intelInitDepthPointers_z16(&drb->Base);
else if (rb->_ActualFormat == GL_RGBA8) {
/* 8888 RGBA */
intelInitPointers_ARGB8888(rb);
}
else if (drb->Base.InternalFormat == GL_DEPTH_COMPONENT24) {
intelInitDepthPointers_z24_s8(&drb->Base);
else if (rb->_ActualFormat == GL_DEPTH_COMPONENT16) {
intelInitDepthPointers_z16(rb);
}
else if (drb->Base.InternalFormat == GL_STENCIL_INDEX8_EXT) {
intelInitStencilPointers_z24_s8(&drb->Base);
else if (rb->_ActualFormat == GL_DEPTH_COMPONENT24 || /* XXX FBO remove */
rb->_ActualFormat == GL_DEPTH24_STENCIL8_EXT) {
intelInitDepthPointers_z24_s8(rb);
}
else if (rb->_ActualFormat == GL_STENCIL_INDEX8_EXT) { /* XXX FBO remove */
intelInitStencilPointers_z24_s8(rb);
}
else {
_mesa_problem(NULL,
"Unexpected _ActualFormat in intelSetSpanFunctions");
}
}

View File

@@ -28,14 +28,11 @@
#ifndef _INTEL_SPAN_H
#define _INTEL_SPAN_H
#include "drirenderbuffer.h"
extern void intelInitSpanFuncs(GLcontext * ctx);
extern void intelInitSpanFuncs( GLcontext *ctx );
extern void intelSpanRenderFinish(GLcontext * ctx);
extern void intelSpanRenderStart(GLcontext * ctx);
extern void intelSpanRenderFinish( GLcontext *ctx );
extern void intelSpanRenderStart( GLcontext *ctx );
extern void
intelSetSpanFunctions(driRenderbuffer *rb, const GLvisual *vis);
extern void intel_set_span_functions(struct gl_renderbuffer *rb);
#endif

View File

@@ -30,252 +30,334 @@
#include "context.h"
#include "macros.h"
#include "enums.h"
#include "colormac.h"
#include "dd.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_fbo.h"
#include "intel_regions.h"
#include "swrast/swrast.h"
int intel_translate_compare_func( GLenum func )
int
intel_translate_compare_func(GLenum func)
{
switch(func) {
case GL_NEVER:
return COMPAREFUNC_NEVER;
case GL_LESS:
return COMPAREFUNC_LESS;
case GL_LEQUAL:
return COMPAREFUNC_LEQUAL;
case GL_GREATER:
return COMPAREFUNC_GREATER;
case GL_GEQUAL:
return COMPAREFUNC_GEQUAL;
case GL_NOTEQUAL:
return COMPAREFUNC_NOTEQUAL;
case GL_EQUAL:
return COMPAREFUNC_EQUAL;
case GL_ALWAYS:
return COMPAREFUNC_ALWAYS;
switch (func) {
case GL_NEVER:
return COMPAREFUNC_NEVER;
case GL_LESS:
return COMPAREFUNC_LESS;
case GL_LEQUAL:
return COMPAREFUNC_LEQUAL;
case GL_GREATER:
return COMPAREFUNC_GREATER;
case GL_GEQUAL:
return COMPAREFUNC_GEQUAL;
case GL_NOTEQUAL:
return COMPAREFUNC_NOTEQUAL;
case GL_EQUAL:
return COMPAREFUNC_EQUAL;
case GL_ALWAYS:
return COMPAREFUNC_ALWAYS;
}
fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, func);
return COMPAREFUNC_ALWAYS;
return COMPAREFUNC_ALWAYS;
}
int intel_translate_stencil_op( GLenum op )
int
intel_translate_stencil_op(GLenum op)
{
switch(op) {
case GL_KEEP:
return STENCILOP_KEEP;
case GL_ZERO:
return STENCILOP_ZERO;
case GL_REPLACE:
return STENCILOP_REPLACE;
case GL_INCR:
switch (op) {
case GL_KEEP:
return STENCILOP_KEEP;
case GL_ZERO:
return STENCILOP_ZERO;
case GL_REPLACE:
return STENCILOP_REPLACE;
case GL_INCR:
return STENCILOP_INCRSAT;
case GL_DECR:
case GL_DECR:
return STENCILOP_DECRSAT;
case GL_INCR_WRAP:
return STENCILOP_INCR;
return STENCILOP_INCR;
case GL_DECR_WRAP:
return STENCILOP_DECR;
case GL_INVERT:
return STENCILOP_INVERT;
default:
return STENCILOP_DECR;
case GL_INVERT:
return STENCILOP_INVERT;
default:
return STENCILOP_ZERO;
}
}
int intel_translate_blend_factor( GLenum factor )
int
intel_translate_blend_factor(GLenum factor)
{
switch(factor) {
case GL_ZERO:
return BLENDFACT_ZERO;
case GL_SRC_ALPHA:
return BLENDFACT_SRC_ALPHA;
case GL_ONE:
return BLENDFACT_ONE;
case GL_SRC_COLOR:
return BLENDFACT_SRC_COLR;
case GL_ONE_MINUS_SRC_COLOR:
return BLENDFACT_INV_SRC_COLR;
case GL_DST_COLOR:
return BLENDFACT_DST_COLR;
case GL_ONE_MINUS_DST_COLOR:
return BLENDFACT_INV_DST_COLR;
switch (factor) {
case GL_ZERO:
return BLENDFACT_ZERO;
case GL_SRC_ALPHA:
return BLENDFACT_SRC_ALPHA;
case GL_ONE:
return BLENDFACT_ONE;
case GL_SRC_COLOR:
return BLENDFACT_SRC_COLR;
case GL_ONE_MINUS_SRC_COLOR:
return BLENDFACT_INV_SRC_COLR;
case GL_DST_COLOR:
return BLENDFACT_DST_COLR;
case GL_ONE_MINUS_DST_COLOR:
return BLENDFACT_INV_DST_COLR;
case GL_ONE_MINUS_SRC_ALPHA:
return BLENDFACT_INV_SRC_ALPHA;
case GL_DST_ALPHA:
return BLENDFACT_DST_ALPHA;
return BLENDFACT_INV_SRC_ALPHA;
case GL_DST_ALPHA:
return BLENDFACT_DST_ALPHA;
case GL_ONE_MINUS_DST_ALPHA:
return BLENDFACT_INV_DST_ALPHA;
case GL_SRC_ALPHA_SATURATE:
return BLENDFACT_INV_DST_ALPHA;
case GL_SRC_ALPHA_SATURATE:
return BLENDFACT_SRC_ALPHA_SATURATE;
case GL_CONSTANT_COLOR:
return BLENDFACT_CONST_COLOR;
return BLENDFACT_CONST_COLOR;
case GL_ONE_MINUS_CONSTANT_COLOR:
return BLENDFACT_INV_CONST_COLOR;
case GL_CONSTANT_ALPHA:
return BLENDFACT_CONST_ALPHA;
return BLENDFACT_CONST_ALPHA;
case GL_ONE_MINUS_CONSTANT_ALPHA:
return BLENDFACT_INV_CONST_ALPHA;
}
fprintf(stderr, "Unknown value in %s: %x\n", __FUNCTION__, factor);
return BLENDFACT_ZERO;
}
int intel_translate_logic_op( GLenum opcode )
int
intel_translate_logic_op(GLenum opcode)
{
switch(opcode) {
case GL_CLEAR:
return LOGICOP_CLEAR;
case GL_AND:
return LOGICOP_AND;
case GL_AND_REVERSE:
return LOGICOP_AND_RVRSE;
case GL_COPY:
return LOGICOP_COPY;
case GL_COPY_INVERTED:
return LOGICOP_COPY_INV;
case GL_AND_INVERTED:
return LOGICOP_AND_INV;
case GL_NOOP:
return LOGICOP_NOOP;
case GL_XOR:
return LOGICOP_XOR;
case GL_OR:
return LOGICOP_OR;
case GL_OR_INVERTED:
return LOGICOP_OR_INV;
case GL_NOR:
return LOGICOP_NOR;
case GL_EQUIV:
return LOGICOP_EQUIV;
case GL_INVERT:
return LOGICOP_INV;
case GL_OR_REVERSE:
return LOGICOP_OR_RVRSE;
case GL_NAND:
return LOGICOP_NAND;
case GL_SET:
return LOGICOP_SET;
switch (opcode) {
case GL_CLEAR:
return LOGICOP_CLEAR;
case GL_AND:
return LOGICOP_AND;
case GL_AND_REVERSE:
return LOGICOP_AND_RVRSE;
case GL_COPY:
return LOGICOP_COPY;
case GL_COPY_INVERTED:
return LOGICOP_COPY_INV;
case GL_AND_INVERTED:
return LOGICOP_AND_INV;
case GL_NOOP:
return LOGICOP_NOOP;
case GL_XOR:
return LOGICOP_XOR;
case GL_OR:
return LOGICOP_OR;
case GL_OR_INVERTED:
return LOGICOP_OR_INV;
case GL_NOR:
return LOGICOP_NOR;
case GL_EQUIV:
return LOGICOP_EQUIV;
case GL_INVERT:
return LOGICOP_INV;
case GL_OR_REVERSE:
return LOGICOP_OR_RVRSE;
case GL_NAND:
return LOGICOP_NAND;
case GL_SET:
return LOGICOP_SET;
default:
return LOGICOP_SET;
}
}
static void intelDrawBuffer(GLcontext *ctx, GLenum mode )
static void
intelClearColor(GLcontext * ctx, const GLfloat color[4])
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
int front = 0;
if (!ctx->DrawBuffer)
return;
struct intel_context *intel = intel_context(ctx);
GLubyte clear[4];
switch ( ctx->DrawBuffer->_ColorDrawBufferMask[0] ) {
case BUFFER_BIT_FRONT_LEFT:
front = 1;
FALLBACK( intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE );
break;
case BUFFER_BIT_BACK_LEFT:
front = 0;
FALLBACK( intel, INTEL_FALLBACK_DRAW_BUFFER, GL_FALSE );
break;
default:
FALLBACK( intel, INTEL_FALLBACK_DRAW_BUFFER, GL_TRUE );
return;
}
CLAMPED_FLOAT_TO_UBYTE(clear[0], color[0]);
CLAMPED_FLOAT_TO_UBYTE(clear[1], color[1]);
CLAMPED_FLOAT_TO_UBYTE(clear[2], color[2]);
CLAMPED_FLOAT_TO_UBYTE(clear[3], color[3]);
if ( intel->sarea->pf_current_page == 1 )
front ^= 1;
intelSetFrontClipRects( intel );
if (front) {
intel->drawRegion = &intel->intelScreen->front;
intel->readRegion = &intel->intelScreen->front;
} else {
intel->drawRegion = &intel->intelScreen->back;
intel->readRegion = &intel->intelScreen->back;
}
intel->vtbl.set_color_region( intel, intel->drawRegion );
}
static void intelReadBuffer( GLcontext *ctx, GLenum mode )
{
/* nothing, until we implement h/w glRead/CopyPixels or CopyTexImage */
/* compute both 32 and 16-bit clear values */
intel->ClearColor8888 = INTEL_PACKCOLOR8888(clear[0], clear[1],
clear[2], clear[3]);
intel->ClearColor565 = INTEL_PACKCOLOR565(clear[0], clear[1], clear[2]);
}
static void intelClearColor(GLcontext *ctx, const GLfloat color[4])
/**
* Update the viewport transformation matrix. Depends on:
* - viewport pos/size
* - depthrange
* - window pos/size or FBO size
*/
static void
intelCalcViewport(GLcontext * ctx)
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
intelScreenPrivate *screen = intel->intelScreen;
CLAMPED_FLOAT_TO_UBYTE(intel->clear_red, color[0]);
CLAMPED_FLOAT_TO_UBYTE(intel->clear_green, color[1]);
CLAMPED_FLOAT_TO_UBYTE(intel->clear_blue, color[2]);
CLAMPED_FLOAT_TO_UBYTE(intel->clear_alpha, color[3]);
intel->ClearColor = INTEL_PACKCOLOR(screen->fbFormat,
intel->clear_red,
intel->clear_green,
intel->clear_blue,
intel->clear_alpha);
}
static void intelCalcViewport( GLcontext *ctx )
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
struct intel_context *intel = intel_context(ctx);
const GLfloat *v = ctx->Viewport._WindowMap.m;
const GLfloat depthScale = 1.0F / ctx->DrawBuffer->_DepthMaxF;
GLfloat *m = intel->ViewportMatrix.m;
GLint h = 0;
GLfloat yScale, yBias;
if (intel->driDrawable)
h = intel->driDrawable->h + SUBPIXEL_Y;
if (ctx->DrawBuffer->Name) {
/* User created FBO */
struct intel_renderbuffer *irb
= intel_renderbuffer(ctx->DrawBuffer->_ColorDrawBuffers[0][0]);
if (irb && !irb->RenderToTexture) {
/* y=0=top */
yScale = -1.0;
yBias = irb->Base.Height;
}
else {
/* y=0=bottom */
yScale = 1.0;
yBias = 0.0;
}
}
else {
/* window buffer, y=0=top */
yScale = -1.0;
yBias = (intel->driDrawable) ? intel->driDrawable->h : 0.0F;
}
/* See also intel_translate_vertex. SUBPIXEL adjustments can be done
* via state vars, too.
*/
m[MAT_SX] = v[MAT_SX];
m[MAT_TX] = v[MAT_TX] + SUBPIXEL_X;
m[MAT_SY] = - v[MAT_SY];
m[MAT_TY] = - v[MAT_TY] + h;
m[MAT_SZ] = v[MAT_SZ] * intel->depth_scale;
m[MAT_TZ] = v[MAT_TZ] * intel->depth_scale;
m[MAT_SX] = v[MAT_SX];
m[MAT_TX] = v[MAT_TX] + SUBPIXEL_X;
m[MAT_SY] = v[MAT_SY] * yScale;
m[MAT_TY] = v[MAT_TY] * yScale + yBias + SUBPIXEL_Y;
m[MAT_SZ] = v[MAT_SZ] * depthScale;
m[MAT_TZ] = v[MAT_TZ] * depthScale;
}
static void intelViewport( GLcontext *ctx,
GLint x, GLint y,
GLsizei width, GLsizei height )
static void
intelViewport(GLcontext * ctx,
GLint x, GLint y, GLsizei width, GLsizei height)
{
intelCalcViewport( ctx );
intelCalcViewport(ctx);
}
static void intelDepthRange( GLcontext *ctx,
GLclampd nearval, GLclampd farval )
static void
intelDepthRange(GLcontext * ctx, GLclampd nearval, GLclampd farval)
{
intelCalcViewport( ctx );
intelCalcViewport(ctx);
}
/* Fallback to swrast for select and feedback.
*/
static void intelRenderMode( GLcontext *ctx, GLenum mode )
static void
intelRenderMode(GLcontext * ctx, GLenum mode)
{
intelContextPtr intel = INTEL_CONTEXT(ctx);
FALLBACK( intel, INTEL_FALLBACK_RENDERMODE, (mode != GL_RENDER) );
struct intel_context *intel = intel_context(ctx);
FALLBACK(intel, INTEL_FALLBACK_RENDERMODE, (mode != GL_RENDER));
}
void intelInitStateFuncs( struct dd_function_table *functions )
void
intelInitStateFuncs(struct dd_function_table *functions)
{
functions->DrawBuffer = intelDrawBuffer;
functions->ReadBuffer = intelReadBuffer;
functions->RenderMode = intelRenderMode;
functions->Viewport = intelViewport;
functions->DepthRange = intelDepthRange;
functions->ClearColor = intelClearColor;
}
void
intelInitState(GLcontext * ctx)
{
/* Mesa should do this for us:
*/
ctx->Driver.AlphaFunc(ctx, ctx->Color.AlphaFunc, ctx->Color.AlphaRef);
ctx->Driver.BlendColor(ctx, ctx->Color.BlendColor);
ctx->Driver.BlendEquationSeparate(ctx,
ctx->Color.BlendEquationRGB,
ctx->Color.BlendEquationA);
ctx->Driver.BlendFuncSeparate(ctx,
ctx->Color.BlendSrcRGB,
ctx->Color.BlendDstRGB,
ctx->Color.BlendSrcA, ctx->Color.BlendDstA);
ctx->Driver.ColorMask(ctx,
ctx->Color.ColorMask[RCOMP],
ctx->Color.ColorMask[GCOMP],
ctx->Color.ColorMask[BCOMP],
ctx->Color.ColorMask[ACOMP]);
ctx->Driver.CullFace(ctx, ctx->Polygon.CullFaceMode);
ctx->Driver.DepthFunc(ctx, ctx->Depth.Func);
ctx->Driver.DepthMask(ctx, ctx->Depth.Mask);
ctx->Driver.Enable(ctx, GL_ALPHA_TEST, ctx->Color.AlphaEnabled);
ctx->Driver.Enable(ctx, GL_BLEND, ctx->Color.BlendEnabled);
ctx->Driver.Enable(ctx, GL_COLOR_LOGIC_OP, ctx->Color.ColorLogicOpEnabled);
ctx->Driver.Enable(ctx, GL_COLOR_SUM, ctx->Fog.ColorSumEnabled);
ctx->Driver.Enable(ctx, GL_CULL_FACE, ctx->Polygon.CullFlag);
ctx->Driver.Enable(ctx, GL_DEPTH_TEST, ctx->Depth.Test);
ctx->Driver.Enable(ctx, GL_DITHER, ctx->Color.DitherFlag);
ctx->Driver.Enable(ctx, GL_FOG, ctx->Fog.Enabled);
ctx->Driver.Enable(ctx, GL_LIGHTING, ctx->Light.Enabled);
ctx->Driver.Enable(ctx, GL_LINE_SMOOTH, ctx->Line.SmoothFlag);
ctx->Driver.Enable(ctx, GL_POLYGON_STIPPLE, ctx->Polygon.StippleFlag);
ctx->Driver.Enable(ctx, GL_SCISSOR_TEST, ctx->Scissor.Enabled);
ctx->Driver.Enable(ctx, GL_STENCIL_TEST, ctx->Stencil.Enabled);
ctx->Driver.Enable(ctx, GL_TEXTURE_1D, GL_FALSE);
ctx->Driver.Enable(ctx, GL_TEXTURE_2D, GL_FALSE);
ctx->Driver.Enable(ctx, GL_TEXTURE_RECTANGLE_NV, GL_FALSE);
ctx->Driver.Enable(ctx, GL_TEXTURE_3D, GL_FALSE);
ctx->Driver.Enable(ctx, GL_TEXTURE_CUBE_MAP, GL_FALSE);
ctx->Driver.Fogfv(ctx, GL_FOG_COLOR, ctx->Fog.Color);
ctx->Driver.Fogfv(ctx, GL_FOG_MODE, 0);
ctx->Driver.Fogfv(ctx, GL_FOG_DENSITY, &ctx->Fog.Density);
ctx->Driver.Fogfv(ctx, GL_FOG_START, &ctx->Fog.Start);
ctx->Driver.Fogfv(ctx, GL_FOG_END, &ctx->Fog.End);
ctx->Driver.FrontFace(ctx, ctx->Polygon.FrontFace);
{
GLfloat f = (GLfloat) ctx->Light.Model.ColorControl;
ctx->Driver.LightModelfv(ctx, GL_LIGHT_MODEL_COLOR_CONTROL, &f);
}
ctx->Driver.LineWidth(ctx, ctx->Line.Width);
ctx->Driver.LogicOpcode(ctx, ctx->Color.LogicOp);
ctx->Driver.PointSize(ctx, ctx->Point.Size);
ctx->Driver.PolygonStipple(ctx, (const GLubyte *) ctx->PolygonStipple);
ctx->Driver.Scissor(ctx, ctx->Scissor.X, ctx->Scissor.Y,
ctx->Scissor.Width, ctx->Scissor.Height);
ctx->Driver.ShadeModel(ctx, ctx->Light.ShadeModel);
ctx->Driver.StencilFuncSeparate(ctx, GL_FRONT,
ctx->Stencil.Function[0],
ctx->Stencil.Ref[0],
ctx->Stencil.ValueMask[0]);
ctx->Driver.StencilFuncSeparate(ctx, GL_BACK,
ctx->Stencil.Function[1],
ctx->Stencil.Ref[1],
ctx->Stencil.ValueMask[1]);
ctx->Driver.StencilMaskSeparate(ctx, GL_FRONT, ctx->Stencil.WriteMask[0]);
ctx->Driver.StencilMaskSeparate(ctx, GL_BACK, ctx->Stencil.WriteMask[1]);
ctx->Driver.StencilOpSeparate(ctx, GL_FRONT,
ctx->Stencil.FailFunc[0],
ctx->Stencil.ZFailFunc[0],
ctx->Stencil.ZPassFunc[0]);
ctx->Driver.StencilOpSeparate(ctx, GL_BACK,
ctx->Stencil.FailFunc[1],
ctx->Stencil.ZFailFunc[1],
ctx->Stencil.ZPassFunc[1]);
/* XXX this isn't really needed */
ctx->Driver.DrawBuffer(ctx, ctx->Color.DrawBuffer[0]);
}

File diff suppressed because it is too large Load Diff

View File

@@ -33,13 +33,102 @@
#include "texmem.h"
void intelInitTextureFuncs( struct dd_function_table *functions );
void intelInitTextureFuncs(struct dd_function_table *functions);
void intelDestroyTexObj( intelContextPtr intel, intelTextureObjectPtr t );
int intelUploadTexImages( intelContextPtr intel, intelTextureObjectPtr t,
GLuint face );
const struct gl_texture_format *intelChooseTextureFormat(GLcontext * ctx,
GLint internalFormat,
GLenum format,
GLenum type);
void intelTexImage3D(GLcontext * ctx,
GLenum target, GLint level,
GLint internalFormat,
GLint width, GLint height, GLint depth,
GLint border,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void intelTexSubImage3D(GLcontext * ctx,
GLenum target,
GLint level,
GLint xoffset, GLint yoffset, GLint zoffset,
GLsizei width, GLsizei height, GLsizei depth,
GLenum format, GLenum type,
const GLvoid * pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void intelTexImage2D(GLcontext * ctx,
GLenum target, GLint level,
GLint internalFormat,
GLint width, GLint height, GLint border,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void intelTexSubImage2D(GLcontext * ctx,
GLenum target,
GLint level,
GLint xoffset, GLint yoffset,
GLsizei width, GLsizei height,
GLenum format, GLenum type,
const GLvoid * pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void intelTexImage1D(GLcontext * ctx,
GLenum target, GLint level,
GLint internalFormat,
GLint width, GLint border,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void intelTexSubImage1D(GLcontext * ctx,
GLenum target,
GLint level,
GLint xoffset,
GLsizei width,
GLenum format, GLenum type,
const GLvoid * pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
void intelCopyTexImage1D(GLcontext * ctx, GLenum target, GLint level,
GLenum internalFormat,
GLint x, GLint y, GLsizei width, GLint border);
void intelCopyTexImage2D(GLcontext * ctx, GLenum target, GLint level,
GLenum internalFormat,
GLint x, GLint y, GLsizei width, GLsizei height,
GLint border);
void intelCopyTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
GLint xoffset, GLint x, GLint y, GLsizei width);
void intelCopyTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
GLint xoffset, GLint yoffset,
GLint x, GLint y, GLsizei width, GLsizei height);
void intelGetTexImage(GLcontext * ctx, GLenum target, GLint level,
GLenum format, GLenum type, GLvoid * pixels,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage);
GLuint intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit);
void intel_tex_map_images(struct intel_context *intel,
struct intel_texture_object *intelObj);
void intel_tex_unmap_images(struct intel_context *intel,
struct intel_texture_object *intelObj);
GLboolean
intel_driReinitTextureHeap( driTexHeap *heap,
unsigned size );
#endif

View File

@@ -0,0 +1,291 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "mtypes.h"
#include "enums.h"
#include "image.h"
#include "teximage.h"
#include "swrast/swrast.h"
#include "intel_screen.h"
#include "intel_context.h"
#include "intel_batchbuffer.h"
#include "intel_buffers.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_fbo.h"
#include "intel_tex.h"
#include "intel_blit.h"
#include "intel_pixel.h"
/**
* Get the intel_region which is the source for any glCopyTex[Sub]Image call.
*
* Do the best we can using the blitter. A future project is to use
* the texture engine and fragment programs for these copies.
*/
static const struct intel_region *
get_teximage_source(struct intel_context *intel, GLenum internalFormat)
{
struct intel_renderbuffer *irb;
if (0)
_mesa_printf("%s %s\n", __FUNCTION__,
_mesa_lookup_enum_by_nr(internalFormat));
switch (internalFormat) {
case GL_DEPTH_COMPONENT:
case GL_DEPTH_COMPONENT16_ARB:
irb = intel_get_renderbuffer(intel->ctx.ReadBuffer, BUFFER_DEPTH);
if (irb && irb->region && irb->region->cpp == 2)
return irb->region;
return NULL;
case GL_DEPTH24_STENCIL8_EXT:
case GL_DEPTH_STENCIL_EXT:
irb = intel_get_renderbuffer(intel->ctx.ReadBuffer, BUFFER_DEPTH);
if (irb && irb->region && irb->region->cpp == 4)
return irb->region;
return NULL;
case GL_RGBA:
return intel_readbuf_region(intel);
case GL_RGB:
if (intel->intelScreen->cpp == 2)
return intel_readbuf_region(intel);
return NULL;
default:
return NULL;
}
}
static GLboolean
do_copy_texsubimage(struct intel_context *intel,
struct intel_texture_image *intelImage,
GLenum internalFormat,
GLint dstx, GLint dsty,
GLint x, GLint y, GLsizei width, GLsizei height)
{
GLcontext *ctx = &intel->ctx;
const struct intel_region *src =
get_teximage_source(intel, internalFormat);
if (!intelImage->mt || !src)
return GL_FALSE;
intelFlush(ctx);
LOCK_HARDWARE(intel);
{
GLuint image_offset = intel_miptree_image_offset(intelImage->mt,
intelImage->face,
intelImage->level);
const GLint orig_x = x;
const GLint orig_y = y;
const struct gl_framebuffer *fb = ctx->DrawBuffer;
if (_mesa_clip_to_region(fb->_Xmin, fb->_Ymin, fb->_Xmax, fb->_Ymax,
&x, &y, &width, &height)) {
/* Update dst for clipped src. Need to also clip the source rect.
*/
dstx += x - orig_x;
dsty += y - orig_y;
if (ctx->ReadBuffer->Name == 0) {
/* reading from a window, adjust x, y */
__DRIdrawablePrivate *dPriv = intel->driDrawable;
GLuint window_y;
/* window_y = position of window on screen if y=0=bottom */
window_y = intel->intelScreen->height - (dPriv->y + dPriv->h);
y = window_y + y;
x += dPriv->x;
}
else {
/* reading from a FBO */
/* invert Y */
y = ctx->ReadBuffer->Height - y - 1;
}
/* A bit of fiddling to get the blitter to work with -ve
* pitches. But we get a nice inverted blit this way, so it's
* worth it:
*/
intelEmitCopyBlit(intel,
intelImage->mt->cpp,
-src->pitch,
src->buffer,
src->height * src->pitch * src->cpp,
intelImage->mt->pitch,
intelImage->mt->region->buffer,
image_offset,
x, y + height, dstx, dsty, width, height);
intel_batchbuffer_flush(intel->batch);
}
}
UNLOCK_HARDWARE(intel);
#if 0
/* GL_SGIS_generate_mipmap -- this can be accelerated now.
* XXX Add a ctx->Driver.GenerateMipmaps() function?
*/
if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
intel_generate_mipmap(ctx, target,
&ctx->Texture.Unit[ctx->Texture.CurrentUnit],
texObj);
}
#endif
return GL_TRUE;
}
void
intelCopyTexImage1D(GLcontext * ctx, GLenum target, GLint level,
GLenum internalFormat,
GLint x, GLint y, GLsizei width, GLint border)
{
struct gl_texture_unit *texUnit =
&ctx->Texture.Unit[ctx->Texture.CurrentUnit];
struct gl_texture_object *texObj =
_mesa_select_tex_object(ctx, texUnit, target);
struct gl_texture_image *texImage =
_mesa_select_tex_image(ctx, texUnit, target, level);
if (border)
goto fail;
/* Setup or redefine the texture object, mipmap tree and texture
* image. Don't populate yet.
*/
ctx->Driver.TexImage1D(ctx, target, level, internalFormat,
width, border,
GL_RGBA, CHAN_TYPE, NULL,
&ctx->DefaultPacking, texObj, texImage);
if (!do_copy_texsubimage(intel_context(ctx),
intel_texture_image(texImage),
internalFormat, 0, 0, x, y, width, 1))
goto fail;
return;
fail:
_swrast_copy_teximage1d(ctx, target, level, internalFormat, x, y,
width, border);
}
void
intelCopyTexImage2D(GLcontext * ctx, GLenum target, GLint level,
GLenum internalFormat,
GLint x, GLint y, GLsizei width, GLsizei height,
GLint border)
{
struct gl_texture_unit *texUnit =
&ctx->Texture.Unit[ctx->Texture.CurrentUnit];
struct gl_texture_object *texObj =
_mesa_select_tex_object(ctx, texUnit, target);
struct gl_texture_image *texImage =
_mesa_select_tex_image(ctx, texUnit, target, level);
if (border)
goto fail;
/* Setup or redefine the texture object, mipmap tree and texture
* image. Don't populate yet.
*/
ctx->Driver.TexImage2D(ctx, target, level, internalFormat,
width, height, border,
GL_RGBA, CHAN_TYPE, NULL,
&ctx->DefaultPacking, texObj, texImage);
if (!do_copy_texsubimage(intel_context(ctx),
intel_texture_image(texImage),
internalFormat, 0, 0, x, y, width, height))
goto fail;
return;
fail:
_swrast_copy_teximage2d(ctx, target, level, internalFormat, x, y,
width, height, border);
}
void
intelCopyTexSubImage1D(GLcontext * ctx, GLenum target, GLint level,
GLint xoffset, GLint x, GLint y, GLsizei width)
{
struct gl_texture_unit *texUnit =
&ctx->Texture.Unit[ctx->Texture.CurrentUnit];
struct gl_texture_image *texImage =
_mesa_select_tex_image(ctx, texUnit, target, level);
GLenum internalFormat = texImage->InternalFormat;
/* XXX need to check <border> as in above function? */
/* Need to check texture is compatible with source format.
*/
if (!do_copy_texsubimage(intel_context(ctx),
intel_texture_image(texImage),
internalFormat, xoffset, 0, x, y, width, 1)) {
_swrast_copy_texsubimage1d(ctx, target, level, xoffset, x, y, width);
}
}
void
intelCopyTexSubImage2D(GLcontext * ctx, GLenum target, GLint level,
GLint xoffset, GLint yoffset,
GLint x, GLint y, GLsizei width, GLsizei height)
{
struct gl_texture_unit *texUnit =
&ctx->Texture.Unit[ctx->Texture.CurrentUnit];
struct gl_texture_image *texImage =
_mesa_select_tex_image(ctx, texUnit, target, level);
GLenum internalFormat = texImage->InternalFormat;
/* Need to check texture is compatible with source format.
*/
if (!do_copy_texsubimage(intel_context(ctx),
intel_texture_image(texImage),
internalFormat,
xoffset, yoffset, x, y, width, height)) {
_swrast_copy_texsubimage2d(ctx, target, level,
xoffset, yoffset, x, y, width, height);
}
}

View File

@@ -0,0 +1,146 @@
#include "intel_context.h"
#include "intel_tex.h"
#include "texformat.h"
#include "enums.h"
/* It works out that this function is fine for all the supported
* hardware. However, there is still a need to map the formats onto
* hardware descriptors.
*/
/* Note that the i915 can actually support many more formats than
* these if we take the step of simply swizzling the colors
* immediately after sampling...
*/
const struct gl_texture_format *
intelChooseTextureFormat(GLcontext * ctx, GLint internalFormat,
GLenum format, GLenum type)
{
struct intel_context *intel = intel_context(ctx);
const GLboolean do32bpt = (intel->intelScreen->cpp == 4);
switch (internalFormat) {
case 4:
case GL_RGBA:
case GL_COMPRESSED_RGBA:
if (format == GL_BGRA) {
if (type == GL_UNSIGNED_BYTE || type == GL_UNSIGNED_INT_8_8_8_8_REV) {
return &_mesa_texformat_argb8888;
}
else if (type == GL_UNSIGNED_SHORT_4_4_4_4_REV) {
return &_mesa_texformat_argb4444;
}
else if (type == GL_UNSIGNED_SHORT_1_5_5_5_REV) {
return &_mesa_texformat_argb1555;
}
}
return do32bpt ? &_mesa_texformat_argb8888 : &_mesa_texformat_argb4444;
case 3:
case GL_RGB:
case GL_COMPRESSED_RGB:
if (format == GL_RGB && type == GL_UNSIGNED_SHORT_5_6_5) {
return &_mesa_texformat_rgb565;
}
return do32bpt ? &_mesa_texformat_argb8888 : &_mesa_texformat_rgb565;
case GL_RGBA8:
case GL_RGB10_A2:
case GL_RGBA12:
case GL_RGBA16:
return do32bpt ? &_mesa_texformat_argb8888 : &_mesa_texformat_argb4444;
case GL_RGBA4:
case GL_RGBA2:
return &_mesa_texformat_argb4444;
case GL_RGB5_A1:
return &_mesa_texformat_argb1555;
case GL_RGB8:
case GL_RGB10:
case GL_RGB12:
case GL_RGB16:
return &_mesa_texformat_argb8888;
case GL_RGB5:
case GL_RGB4:
case GL_R3_G3_B2:
return &_mesa_texformat_rgb565;
case GL_ALPHA:
case GL_ALPHA4:
case GL_ALPHA8:
case GL_ALPHA12:
case GL_ALPHA16:
case GL_COMPRESSED_ALPHA:
return &_mesa_texformat_a8;
case 1:
case GL_LUMINANCE:
case GL_LUMINANCE4:
case GL_LUMINANCE8:
case GL_LUMINANCE12:
case GL_LUMINANCE16:
case GL_COMPRESSED_LUMINANCE:
return &_mesa_texformat_l8;
case 2:
case GL_LUMINANCE_ALPHA:
case GL_LUMINANCE4_ALPHA4:
case GL_LUMINANCE6_ALPHA2:
case GL_LUMINANCE8_ALPHA8:
case GL_LUMINANCE12_ALPHA4:
case GL_LUMINANCE12_ALPHA12:
case GL_LUMINANCE16_ALPHA16:
case GL_COMPRESSED_LUMINANCE_ALPHA:
return &_mesa_texformat_al88;
case GL_INTENSITY:
case GL_INTENSITY4:
case GL_INTENSITY8:
case GL_INTENSITY12:
case GL_INTENSITY16:
case GL_COMPRESSED_INTENSITY:
return &_mesa_texformat_i8;
case GL_YCBCR_MESA:
if (type == GL_UNSIGNED_SHORT_8_8_MESA || type == GL_UNSIGNED_BYTE)
return &_mesa_texformat_ycbcr;
else
return &_mesa_texformat_ycbcr_rev;
case GL_COMPRESSED_RGB_FXT1_3DFX:
return &_mesa_texformat_rgb_fxt1;
case GL_COMPRESSED_RGBA_FXT1_3DFX:
return &_mesa_texformat_rgba_fxt1;
case GL_RGB_S3TC:
case GL_RGB4_S3TC:
case GL_COMPRESSED_RGB_S3TC_DXT1_EXT:
return &_mesa_texformat_rgb_dxt1;
case GL_COMPRESSED_RGBA_S3TC_DXT1_EXT:
return &_mesa_texformat_rgba_dxt1;
case GL_RGBA_S3TC:
case GL_RGBA4_S3TC:
case GL_COMPRESSED_RGBA_S3TC_DXT3_EXT:
return &_mesa_texformat_rgba_dxt3;
case GL_COMPRESSED_RGBA_S3TC_DXT5_EXT:
return &_mesa_texformat_rgba_dxt5;
case GL_DEPTH_COMPONENT:
case GL_DEPTH_COMPONENT16:
case GL_DEPTH_COMPONENT24:
case GL_DEPTH_COMPONENT32:
return &_mesa_texformat_z16;
default:
fprintf(stderr, "unexpected texture format %s in %s\n",
_mesa_lookup_enum_by_nr(internalFormat), __FUNCTION__);
return NULL;
}
return NULL; /* never get here */
}

View File

@@ -0,0 +1,626 @@
#include <stdlib.h>
#include <stdio.h>
#include "glheader.h"
#include "macros.h"
#include "mtypes.h"
#include "enums.h"
#include "colortab.h"
#include "convolve.h"
#include "context.h"
#include "simple_list.h"
#include "texcompress.h"
#include "texformat.h"
#include "texobj.h"
#include "texstore.h"
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_buffer_objects.h"
#include "intel_batchbuffer.h"
#include "intel_tex.h"
#include "intel_ioctl.h"
#include "intel_blit.h"
#define FILE_DEBUG_FLAG DEBUG_TEXTURE
/* Functions to store texture images. Where possible, mipmap_tree's
* will be created or further instantiated with image data, otherwise
* images will be stored in malloc'd memory. A validation step is
* required to pull those images into a mipmap tree, or otherwise
* decide a fallback is required.
*/
static int
logbase2(int n)
{
GLint i = 1;
GLint log2 = 0;
while (n > i) {
i *= 2;
log2++;
}
return log2;
}
/* Otherwise, store it in memory if (Border != 0) or (any dimension ==
* 1).
*
* Otherwise, if max_level >= level >= min_level, create tree with
* space for textures from min_level down to max_level.
*
* Otherwise, create tree with space for textures from (level
* 0)..(1x1). Consider pruning this tree at a validation if the
* saving is worth it.
*/
static void
guess_and_alloc_mipmap_tree(struct intel_context *intel,
struct intel_texture_object *intelObj,
struct intel_texture_image *intelImage)
{
GLuint firstLevel;
GLuint lastLevel;
GLuint width = intelImage->base.Width;
GLuint height = intelImage->base.Height;
GLuint depth = intelImage->base.Depth;
GLuint l2width, l2height, l2depth;
GLuint i;
DBG("%s\n", __FUNCTION__);
if (intelImage->base.Border)
return;
if (intelImage->level > intelObj->base.BaseLevel &&
(intelImage->base.Width == 1 ||
(intelObj->base.Target != GL_TEXTURE_1D &&
intelImage->base.Height == 1) ||
(intelObj->base.Target == GL_TEXTURE_3D &&
intelImage->base.Depth == 1)))
return;
/* If this image disrespects BaseLevel, allocate from level zero.
* Usually BaseLevel == 0, so it's unlikely to happen.
*/
if (intelImage->level < intelObj->base.BaseLevel)
firstLevel = 0;
else
firstLevel = intelObj->base.BaseLevel;
/* Figure out image dimensions at start level.
*/
for (i = intelImage->level; i > firstLevel; i--) {
width <<= 1;
if (height != 1)
height <<= 1;
if (depth != 1)
depth <<= 1;
}
/* Guess a reasonable value for lastLevel. This is probably going
* to be wrong fairly often and might mean that we have to look at
* resizable buffers, or require that buffers implement lazy
* pagetable arrangements.
*/
if ((intelObj->base.MinFilter == GL_NEAREST ||
intelObj->base.MinFilter == GL_LINEAR) &&
intelImage->level == firstLevel) {
lastLevel = firstLevel;
}
else {
l2width = logbase2(width);
l2height = logbase2(height);
l2depth = logbase2(depth);
lastLevel = firstLevel + MAX2(MAX2(l2width, l2height), l2depth);
}
assert(!intelObj->mt);
intelObj->mt = intel_miptree_create(intel,
intelObj->base.Target,
intelImage->base.InternalFormat,
firstLevel,
lastLevel,
width,
height,
depth,
intelImage->base.TexFormat->TexelBytes,
intelImage->base.IsCompressed);
DBG("%s - success\n", __FUNCTION__);
}
static GLuint
target_to_face(GLenum target)
{
switch (target) {
case GL_TEXTURE_CUBE_MAP_POSITIVE_X_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_X_ARB:
case GL_TEXTURE_CUBE_MAP_POSITIVE_Y_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Y_ARB:
case GL_TEXTURE_CUBE_MAP_POSITIVE_Z_ARB:
case GL_TEXTURE_CUBE_MAP_NEGATIVE_Z_ARB:
return ((GLuint) target - (GLuint) GL_TEXTURE_CUBE_MAP_POSITIVE_X);
default:
return 0;
}
}
/* There are actually quite a few combinations this will work for,
* more than what I've listed here.
*/
static GLboolean
check_pbo_format(GLint internalFormat,
GLenum format, GLenum type,
const struct gl_texture_format *mesa_format)
{
switch (internalFormat) {
case 4:
case GL_RGBA:
return (format == GL_BGRA &&
(type == GL_UNSIGNED_BYTE ||
type == GL_UNSIGNED_INT_8_8_8_8_REV) &&
mesa_format == &_mesa_texformat_argb8888);
case 3:
case GL_RGB:
return (format == GL_RGB &&
type == GL_UNSIGNED_SHORT_5_6_5 &&
mesa_format == &_mesa_texformat_rgb565);
case GL_YCBCR_MESA:
return (type == GL_UNSIGNED_SHORT_8_8_MESA || type == GL_UNSIGNED_BYTE);
default:
return GL_FALSE;
}
}
/* XXX: Do this for TexSubImage also:
*/
static GLboolean
try_pbo_upload(struct intel_context *intel,
struct intel_texture_image *intelImage,
const struct gl_pixelstore_attrib *unpack,
GLint internalFormat,
GLint width, GLint height,
GLenum format, GLenum type, const void *pixels)
{
struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
GLuint src_offset, src_stride;
GLuint dst_offset, dst_stride;
if (!pbo ||
intel->ctx._ImageTransferState ||
unpack->SkipPixels || unpack->SkipRows) {
_mesa_printf("%s: failure 1\n", __FUNCTION__);
return GL_FALSE;
}
src_offset = (GLuint) pixels;
if (unpack->RowLength > 0)
src_stride = unpack->RowLength;
else
src_stride = width;
dst_offset = intel_miptree_image_offset(intelImage->mt,
intelImage->face,
intelImage->level);
dst_stride = intelImage->mt->pitch;
intelFlush(&intel->ctx);
LOCK_HARDWARE(intel);
{
struct _DriBufferObject *src_buffer =
intel_bufferobj_buffer(intel, pbo, INTEL_READ);
struct _DriBufferObject *dst_buffer =
intel_region_buffer(intel->intelScreen, intelImage->mt->region,
INTEL_WRITE_FULL);
intelEmitCopyBlit(intel,
intelImage->mt->cpp,
src_stride, src_buffer, src_offset,
dst_stride, dst_buffer, dst_offset,
0, 0, 0, 0, width, height);
intel_batchbuffer_flush(intel->batch);
}
UNLOCK_HARDWARE(intel);
return GL_TRUE;
}
static GLboolean
try_pbo_zcopy(struct intel_context *intel,
struct intel_texture_image *intelImage,
const struct gl_pixelstore_attrib *unpack,
GLint internalFormat,
GLint width, GLint height,
GLenum format, GLenum type, const void *pixels)
{
struct intel_buffer_object *pbo = intel_buffer_object(unpack->BufferObj);
GLuint src_offset, src_stride;
GLuint dst_offset, dst_stride;
if (!pbo ||
intel->ctx._ImageTransferState ||
unpack->SkipPixels || unpack->SkipRows) {
_mesa_printf("%s: failure 1\n", __FUNCTION__);
return GL_FALSE;
}
src_offset = (GLuint) pixels;
if (unpack->RowLength > 0)
src_stride = unpack->RowLength;
else
src_stride = width;
dst_offset = intel_miptree_image_offset(intelImage->mt,
intelImage->face,
intelImage->level);
dst_stride = intelImage->mt->pitch;
if (src_stride != dst_stride || dst_offset != 0 || src_offset != 0) {
_mesa_printf("%s: failure 2\n", __FUNCTION__);
return GL_FALSE;
}
intel_region_attach_pbo(intel->intelScreen, intelImage->mt->region, pbo);
return GL_TRUE;
}
static void
intelTexImage(GLcontext * ctx,
GLint dims,
GLenum target, GLint level,
GLint internalFormat,
GLint width, GLint height, GLint depth,
GLint border,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *unpack,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage)
{
struct intel_context *intel = intel_context(ctx);
struct intel_texture_object *intelObj = intel_texture_object(texObj);
struct intel_texture_image *intelImage = intel_texture_image(texImage);
GLint postConvWidth = width;
GLint postConvHeight = height;
GLint texelBytes, sizeInBytes;
GLuint dstRowStride;
DBG("%s target %s level %d %dx%dx%d border %d\n", __FUNCTION__,
_mesa_lookup_enum_by_nr(target), level, width, height, depth, border);
intelFlush(ctx);
intelImage->face = target_to_face(target);
intelImage->level = level;
if (ctx->_ImageTransferState & IMAGE_CONVOLUTION_BIT) {
_mesa_adjust_image_for_convolution(ctx, dims, &postConvWidth,
&postConvHeight);
}
/* choose the texture format */
texImage->TexFormat = intelChooseTextureFormat(ctx, internalFormat,
format, type);
assert(texImage->TexFormat);
switch (dims) {
case 1:
texImage->FetchTexelc = texImage->TexFormat->FetchTexel1D;
texImage->FetchTexelf = texImage->TexFormat->FetchTexel1Df;
break;
case 2:
texImage->FetchTexelc = texImage->TexFormat->FetchTexel2D;
texImage->FetchTexelf = texImage->TexFormat->FetchTexel2Df;
break;
case 3:
texImage->FetchTexelc = texImage->TexFormat->FetchTexel3D;
texImage->FetchTexelf = texImage->TexFormat->FetchTexel3Df;
break;
default:
assert(0);
break;
}
texelBytes = texImage->TexFormat->TexelBytes;
/* Minimum pitch of 32 bytes */
if (postConvWidth * texelBytes < 32) {
postConvWidth = 32 / texelBytes;
texImage->RowStride = postConvWidth;
}
assert(texImage->RowStride == postConvWidth);
/* Release the reference to a potentially orphaned buffer.
* Release any old malloced memory.
*/
if (intelImage->mt) {
intel_miptree_release(intel, &intelImage->mt);
assert(!texImage->Data);
}
else if (texImage->Data) {
free(texImage->Data);
}
/* If this is the only texture image in the tree, could call
* bmBufferData with NULL data to free the old block and avoid
* waiting on any outstanding fences.
*/
if (intelObj->mt &&
intelObj->mt->first_level == level &&
intelObj->mt->last_level == level &&
intelObj->mt->target != GL_TEXTURE_CUBE_MAP_ARB &&
!intel_miptree_match_image(intelObj->mt, &intelImage->base,
intelImage->face, intelImage->level)) {
DBG("release it\n");
intel_miptree_release(intel, &intelObj->mt);
assert(!intelObj->mt);
}
if (!intelObj->mt) {
guess_and_alloc_mipmap_tree(intel, intelObj, intelImage);
if (!intelObj->mt) {
DBG("guess_and_alloc_mipmap_tree: failed\n");
}
}
assert(!intelImage->mt);
if (intelObj->mt &&
intel_miptree_match_image(intelObj->mt, &intelImage->base,
intelImage->face, intelImage->level)) {
intel_miptree_reference(&intelImage->mt, intelObj->mt);
assert(intelImage->mt);
}
if (!intelImage->mt)
DBG("XXX: Image did not fit into tree - storing in local memory!\n");
/* PBO fastpaths:
*/
if (dims <= 2 &&
intelImage->mt &&
intel_buffer_object(unpack->BufferObj) &&
check_pbo_format(internalFormat, format,
type, intelImage->base.TexFormat)) {
DBG("trying pbo upload\n");
/* Attempt to texture directly from PBO data (zero copy upload).
*
* Currently disable as it can lead to worse as well as better
* performance (in particular when intel_region_cow() is
* required).
*/
if (intelObj->mt == intelImage->mt &&
intelObj->mt->first_level == level &&
intelObj->mt->last_level == level) {
if (try_pbo_zcopy(intel, intelImage, unpack,
internalFormat,
width, height, format, type, pixels)) {
DBG("pbo zcopy upload succeeded\n");
return;
}
}
/* Otherwise, attempt to use the blitter for PBO image uploads.
*/
if (try_pbo_upload(intel, intelImage, unpack,
internalFormat,
width, height, format, type, pixels)) {
DBG("pbo upload succeeded\n");
return;
}
DBG("pbo upload failed\n");
}
/* intelCopyTexImage calls this function with pixels == NULL, with
* the expectation that the mipmap tree will be set up but nothing
* more will be done. This is where those calls return:
*/
pixels = _mesa_validate_pbo_teximage(ctx, dims, width, height, 1,
format, type,
pixels, unpack, "glTexImage");
if (!pixels)
return;
if (intelImage->mt)
intel_region_idle(intel->intelScreen, intelImage->mt->region);
LOCK_HARDWARE(intel);
if (intelImage->mt) {
texImage->Data = intel_miptree_image_map(intel,
intelImage->mt,
intelImage->face,
intelImage->level,
&dstRowStride,
intelImage->base.ImageOffsets);
}
else {
/* Allocate regular memory and store the image there temporarily. */
if (texImage->IsCompressed) {
sizeInBytes = texImage->CompressedSize;
dstRowStride =
_mesa_compressed_row_stride(texImage->InternalFormat, width);
assert(dims != 3);
}
else {
dstRowStride = postConvWidth * texelBytes;
sizeInBytes = depth * dstRowStride * postConvHeight;
}
texImage->Data = malloc(sizeInBytes);
}
DBG("Upload image %dx%dx%d row_len %x "
"pitch %x\n",
width, height, depth, width * texelBytes, dstRowStride);
/* Copy data. Would like to know when it's ok for us to eg. use
* the blitter to copy. Or, use the hardware to do the format
* conversion and copy:
*/
if (!texImage->TexFormat->StoreImage(ctx, dims,
texImage->_BaseFormat,
texImage->TexFormat,
texImage->Data, 0, 0, 0, /* dstX/Y/Zoffset */
dstRowStride,
texImage->ImageOffsets,
width, height, depth,
format, type, pixels, unpack)) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "glTexImage");
}
_mesa_unmap_teximage_pbo(ctx, unpack);
if (intelImage->mt) {
intel_miptree_image_unmap(intel, intelImage->mt);
texImage->Data = NULL;
}
UNLOCK_HARDWARE(intel);
#if 0
/* GL_SGIS_generate_mipmap -- this can be accelerated now.
*/
if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
intel_generate_mipmap(ctx, target,
&ctx->Texture.Unit[ctx->Texture.CurrentUnit],
texObj);
}
#endif
}
void
intelTexImage3D(GLcontext * ctx,
GLenum target, GLint level,
GLint internalFormat,
GLint width, GLint height, GLint depth,
GLint border,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *unpack,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage)
{
intelTexImage(ctx, 3, target, level,
internalFormat, width, height, depth, border,
format, type, pixels, unpack, texObj, texImage);
}
void
intelTexImage2D(GLcontext * ctx,
GLenum target, GLint level,
GLint internalFormat,
GLint width, GLint height, GLint border,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *unpack,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage)
{
intelTexImage(ctx, 2, target, level,
internalFormat, width, height, 1, border,
format, type, pixels, unpack, texObj, texImage);
}
void
intelTexImage1D(GLcontext * ctx,
GLenum target, GLint level,
GLint internalFormat,
GLint width, GLint border,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *unpack,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage)
{
intelTexImage(ctx, 1, target, level,
internalFormat, width, 1, 1, border,
format, type, pixels, unpack, texObj, texImage);
}
/**
* Need to map texture image into memory before copying image data,
* then unmap it.
*/
void
intelGetTexImage(GLcontext * ctx, GLenum target, GLint level,
GLenum format, GLenum type, GLvoid * pixels,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage)
{
struct intel_context *intel = intel_context(ctx);
struct intel_texture_image *intelImage = intel_texture_image(texImage);
/* Map */
if (intelImage->mt) {
/* Image is stored in hardware format in a buffer managed by the
* kernel. Need to explicitly map and unmap it.
*/
intelImage->base.Data =
intel_miptree_image_map(intel,
intelImage->mt,
intelImage->face,
intelImage->level,
&intelImage->base.RowStride,
intelImage->base.ImageOffsets);
}
else {
/* Otherwise, the image should actually be stored in
* intelImage->base.Data. This is pretty confusing for
* everybody, I'd much prefer to separate the two functions of
* texImage->Data - storage for texture images in main memory
* and access (ie mappings) of images. In other words, we'd
* create a new texImage->Map field and leave Data simply for
* storage.
*/
assert(intelImage->base.Data);
}
_mesa_get_teximage(ctx, target, level, format, type, pixels,
texObj, texImage);
/* Unmap */
if (intelImage->mt) {
intel_miptree_image_unmap(intel, intelImage->mt);
intelImage->base.Data = NULL;
}
}

View File

@@ -0,0 +1,183 @@
/**************************************************************************
*
* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "mtypes.h"
#include "texobj.h"
#include "texstore.h"
#include "enums.h"
#include "intel_context.h"
#include "intel_tex.h"
#include "intel_mipmap_tree.h"
#define FILE_DEBUG_FLAG DEBUG_TEXTURE
static void
intelTexSubimage(GLcontext * ctx,
GLint dims,
GLenum target, GLint level,
GLint xoffset, GLint yoffset, GLint zoffset,
GLint width, GLint height, GLint depth,
GLenum format, GLenum type, const void *pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage)
{
struct intel_context *intel = intel_context(ctx);
struct intel_texture_image *intelImage = intel_texture_image(texImage);
GLuint dstImageStride;
GLuint dstRowStride;
DBG("%s target %s level %d offset %d,%d %dx%d\n", __FUNCTION__,
_mesa_lookup_enum_by_nr(target),
level, xoffset, yoffset, width, height);
intelFlush(ctx);
pixels =
_mesa_validate_pbo_teximage(ctx, dims, width, height, depth, format,
type, pixels, packing, "glTexSubImage2D");
if (!pixels)
return;
if (intelImage->mt)
intel_region_idle(intel->intelScreen, intelImage->mt->region);
LOCK_HARDWARE(intel);
/* Map buffer if necessary. Need to lock to prevent other contexts
* from uploading the buffer under us.
*/
if (intelImage->mt)
texImage->Data = intel_miptree_image_map(intel,
intelImage->mt,
intelImage->face,
intelImage->level,
&dstRowStride,
&dstImageStride);
assert(dstRowStride);
if (!texImage->TexFormat->StoreImage(ctx, dims, texImage->_BaseFormat,
texImage->TexFormat,
texImage->Data,
xoffset, yoffset, zoffset,
dstRowStride,
texImage->ImageOffsets,
width, height, depth,
format, type, pixels, packing)) {
_mesa_error(ctx, GL_OUT_OF_MEMORY, "intelTexSubImage");
}
#if 0
/* GL_SGIS_generate_mipmap */
if (level == texObj->BaseLevel && texObj->GenerateMipmap) {
_mesa_generate_mipmap(ctx, target,
&ctx->Texture.Unit[ctx->Texture.CurrentUnit],
texObj);
}
#endif
_mesa_unmap_teximage_pbo(ctx, packing);
if (intelImage->mt) {
intel_miptree_image_unmap(intel, intelImage->mt);
texImage->Data = NULL;
}
UNLOCK_HARDWARE(intel);
}
void
intelTexSubImage3D(GLcontext * ctx,
GLenum target,
GLint level,
GLint xoffset, GLint yoffset, GLint zoffset,
GLsizei width, GLsizei height, GLsizei depth,
GLenum format, GLenum type,
const GLvoid * pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage)
{
intelTexSubimage(ctx, 3,
target, level,
xoffset, yoffset, zoffset,
width, height, depth,
format, type, pixels, packing, texObj, texImage);
}
void
intelTexSubImage2D(GLcontext * ctx,
GLenum target,
GLint level,
GLint xoffset, GLint yoffset,
GLsizei width, GLsizei height,
GLenum format, GLenum type,
const GLvoid * pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage)
{
intelTexSubimage(ctx, 2,
target, level,
xoffset, yoffset, 0,
width, height, 1,
format, type, pixels, packing, texObj, texImage);
}
void
intelTexSubImage1D(GLcontext * ctx,
GLenum target,
GLint level,
GLint xoffset,
GLsizei width,
GLenum format, GLenum type,
const GLvoid * pixels,
const struct gl_pixelstore_attrib *packing,
struct gl_texture_object *texObj,
struct gl_texture_image *texImage)
{
intelTexSubimage(ctx, 1,
target, level,
xoffset, 0, 0,
width, 1, 1,
format, type, pixels, packing, texObj, texImage);
}

View File

@@ -0,0 +1,253 @@
#include "mtypes.h"
#include "macros.h"
#include "intel_context.h"
#include "intel_mipmap_tree.h"
#include "intel_tex.h"
#define FILE_DEBUG_FLAG DEBUG_TEXTURE
/**
* Compute which mipmap levels that really need to be sent to the hardware.
* This depends on the base image size, GL_TEXTURE_MIN_LOD,
* GL_TEXTURE_MAX_LOD, GL_TEXTURE_BASE_LEVEL, and GL_TEXTURE_MAX_LEVEL.
*/
static void
intel_calculate_first_last_level(struct intel_texture_object *intelObj)
{
struct gl_texture_object *tObj = &intelObj->base;
const struct gl_texture_image *const baseImage =
tObj->Image[0][tObj->BaseLevel];
/* These must be signed values. MinLod and MaxLod can be negative numbers,
* and having firstLevel and lastLevel as signed prevents the need for
* extra sign checks.
*/
int firstLevel;
int lastLevel;
/* Yes, this looks overly complicated, but it's all needed.
*/
switch (tObj->Target) {
case GL_TEXTURE_1D:
case GL_TEXTURE_2D:
case GL_TEXTURE_3D:
case GL_TEXTURE_CUBE_MAP:
if (tObj->MinFilter == GL_NEAREST || tObj->MinFilter == GL_LINEAR) {
/* GL_NEAREST and GL_LINEAR only care about GL_TEXTURE_BASE_LEVEL.
*/
firstLevel = lastLevel = tObj->BaseLevel;
}
else {
firstLevel = tObj->BaseLevel + (GLint) (tObj->MinLod + 0.5);
firstLevel = MAX2(firstLevel, tObj->BaseLevel);
lastLevel = tObj->BaseLevel + (GLint) (tObj->MaxLod + 0.5);
lastLevel = MAX2(lastLevel, tObj->BaseLevel);
lastLevel = MIN2(lastLevel, tObj->BaseLevel + baseImage->MaxLog2);
lastLevel = MIN2(lastLevel, tObj->MaxLevel);
lastLevel = MAX2(firstLevel, lastLevel); /* need at least one level */
}
break;
case GL_TEXTURE_RECTANGLE_NV:
case GL_TEXTURE_4D_SGIS:
firstLevel = lastLevel = 0;
break;
default:
return;
}
/* save these values */
intelObj->firstLevel = firstLevel;
intelObj->lastLevel = lastLevel;
}
static void
copy_image_data_to_tree(struct intel_context *intel,
struct intel_texture_object *intelObj,
struct intel_texture_image *intelImage)
{
if (intelImage->mt) {
/* Copy potentially with the blitter:
*/
intel_miptree_image_copy(intel,
intelObj->mt,
intelImage->face,
intelImage->level, intelImage->mt);
intel_miptree_release(intel, &intelImage->mt);
}
else {
assert(intelImage->base.Data != NULL);
/* More straightforward upload.
*/
intel_miptree_image_data(intel,
intelObj->mt,
intelImage->face,
intelImage->level,
intelImage->base.Data,
intelImage->base.RowStride,
intelImage->base.RowStride *
intelImage->base.Height);
free(intelImage->base.Data);
intelImage->base.Data = NULL;
}
intel_miptree_reference(&intelImage->mt, intelObj->mt);
}
/*
*/
GLuint
intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
{
struct gl_texture_object *tObj = intel->ctx.Texture.Unit[unit]._Current;
struct intel_texture_object *intelObj = intel_texture_object(tObj);
GLuint face, i;
GLuint nr_faces = 0;
struct intel_texture_image *firstImage;
/* We know/require this is true by now:
*/
assert(intelObj->base.Complete);
/* What levels must the tree include at a minimum?
*/
intel_calculate_first_last_level(intelObj);
firstImage =
intel_texture_image(intelObj->base.Image[0][intelObj->firstLevel]);
/* Fallback case:
*/
if (firstImage->base.Border) {
if (intelObj->mt) {
intel_miptree_release(intel, &intelObj->mt);
}
return GL_FALSE;
}
/* If both firstImage and intelObj have a tree which can contain
* all active images, favour firstImage. Note that because of the
* completeness requirement, we know that the image dimensions
* will match.
*/
if (firstImage->mt &&
firstImage->mt != intelObj->mt &&
firstImage->mt->first_level <= intelObj->firstLevel &&
firstImage->mt->last_level >= intelObj->lastLevel) {
if (intelObj->mt)
intel_miptree_release(intel, &intelObj->mt);
intel_miptree_reference(&intelObj->mt, firstImage->mt);
}
/* Check tree can hold all active levels. Check tree matches
* target, imageFormat, etc.
*
* XXX: For some layouts (eg i945?), the test might have to be
* first_level == firstLevel, as the tree isn't valid except at the
* original start level. Hope to get around this by
* programming minLod, maxLod, baseLevel into the hardware and
* leaving the tree alone.
*/
if (intelObj->mt &&
((intelObj->mt->first_level > intelObj->firstLevel) ||
(intelObj->mt->last_level < intelObj->lastLevel) ||
(intelObj->mt->internal_format != firstImage->base.InternalFormat))) {
intel_miptree_release(intel, &intelObj->mt);
}
/* May need to create a new tree:
*/
if (!intelObj->mt) {
intelObj->mt = intel_miptree_create(intel,
intelObj->base.Target,
firstImage->base.InternalFormat,
intelObj->firstLevel,
intelObj->lastLevel,
firstImage->base.Width,
firstImage->base.Height,
firstImage->base.Depth,
firstImage->base.TexFormat->
TexelBytes,
firstImage->base.IsCompressed);
}
/* Pull in any images not in the object's tree:
*/
nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
for (face = 0; face < nr_faces; face++) {
for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
struct intel_texture_image *intelImage =
intel_texture_image(intelObj->base.Image[face][i]);
/* Need to import images in main memory or held in other trees.
*/
if (intelObj->mt != intelImage->mt) {
copy_image_data_to_tree(intel, intelObj, intelImage);
}
}
}
return GL_TRUE;
}
void
intel_tex_map_images(struct intel_context *intel,
struct intel_texture_object *intelObj)
{
GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
GLuint face, i;
DBG("%s\n", __FUNCTION__);
for (face = 0; face < nr_faces; face++) {
for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
struct intel_texture_image *intelImage =
intel_texture_image(intelObj->base.Image[face][i]);
if (intelImage->mt) {
intelImage->base.Data =
intel_miptree_image_map(intel,
intelImage->mt,
intelImage->face,
intelImage->level,
&intelImage->base.RowStride,
intelImage->base.ImageOffsets);
/* convert stride to texels, not bytes */
intelImage->base.RowStride /= intelImage->mt->cpp;
/* intelImage->base.ImageStride /= intelImage->mt->cpp; */
}
}
}
}
void
intel_tex_unmap_images(struct intel_context *intel,
struct intel_texture_object *intelObj)
{
GLuint nr_faces = (intelObj->base.Target == GL_TEXTURE_CUBE_MAP) ? 6 : 1;
GLuint face, i;
for (face = 0; face < nr_faces; face++) {
for (i = intelObj->firstLevel; i <= intelObj->lastLevel; i++) {
struct intel_texture_image *intelImage =
intel_texture_image(intelObj->base.Image[face][i]);
if (intelImage->mt) {
intel_miptree_image_unmap(intel, intelImage->mt);
intelImage->base.Data = NULL;
}
}
}
}

View File

@@ -1,72 +0,0 @@
#include "texmem.h"
#include "simple_list.h"
#include "imports.h"
#include "macros.h"
#include "intel_tex.h"
static GLuint
driLog2( GLuint n )
{
GLuint log2;
for ( log2 = 1 ; n > 1 ; log2++ ) {
n >>= 1;
}
return log2;
}
static void calculate_heap_size( driTexHeap * heap, unsigned size,
unsigned nr_regions, unsigned alignmentShift )
{
unsigned l;
l = driLog2( (size - 1) / nr_regions );
if ( l < alignmentShift )
{
l = alignmentShift;
}
heap->logGranularity = l;
heap->size = size & ~((1L << l) - 1);
}
GLboolean
intel_driReinitTextureHeap( driTexHeap *heap,
unsigned size )
{
driTextureObject *t, *tmp;
/* Kick out everything:
*/
foreach_s ( t, tmp, & heap->texture_objects ) {
if ( t->tObj != NULL ) {
driSwapOutTextureObject( t );
}
else {
driDestroyTextureObject( t );
}
}
/* Destroy the memory manager:
*/
mmDestroy( heap->memory_heap );
/* Recreate the memory manager:
*/
calculate_heap_size(heap, size, heap->nrRegions, heap->alignmentShift);
heap->memory_heap = mmInit( 0, heap->size );
if ( heap->memory_heap == NULL ) {
fprintf(stderr, "driReinitTextureHeap: couldn't recreate memory heap\n");
FREE( heap );
return GL_FALSE;
}
make_empty_list( & heap->texture_objects );
return GL_TRUE;
}

File diff suppressed because it is too large Load Diff

View File

@@ -30,6 +30,8 @@
#include "mtypes.h"
#define _INTEL_NEW_RENDERSTATE (_DD_NEW_LINE_STIPPLE | \
_DD_NEW_TRI_UNFILLED | \
_DD_NEW_TRI_LIGHT_TWOSIDE | \
@@ -38,9 +40,30 @@
_NEW_PROGRAM | \
_NEW_POLYGONSTIPPLE)
extern void intelInitTriFuncs( GLcontext *ctx );
extern void intelInitTriFuncs(GLcontext * ctx);
extern void intelChooseRenderState(GLcontext * ctx);
extern void intelStartInlinePrimitive(struct intel_context *intel,
GLuint prim, GLuint flags);
extern void intelWrapInlinePrimitive(struct intel_context *intel);
GLuint *intelExtendInlinePrimitive(struct intel_context *intel,
GLuint dwords);
void intel_meta_draw_quad(struct intel_context *intel,
GLfloat x0, GLfloat x1,
GLfloat y0, GLfloat y1,
GLfloat z,
GLuint color,
GLfloat s0, GLfloat s1, GLfloat t0, GLfloat t1);
void intel_meta_draw_poly(struct intel_context *intel,
GLuint n,
GLfloat xy[][2],
GLfloat z, GLuint color, GLfloat tex[][2]);
extern void intelPrintRenderState( const char *msg, GLuint state );
extern void intelChooseRenderState( GLcontext *ctx );
#endif

View File

@@ -276,6 +276,8 @@ _mesa_PushAttrib(GLbitfield mask)
struct gl_pixel_attrib *attr;
attr = MALLOC_STRUCT( gl_pixel_attrib );
MEMCPY( attr, &ctx->Pixel, sizeof(struct gl_pixel_attrib) );
/* push the Read FBO's ReadBuffer state, not ctx->Pixel.ReadBuffer */
attr->ReadBuffer = ctx->ReadBuffer->ColorReadBuffer;
newnode = new_attrib_node( GL_PIXEL_MODE_BIT );
newnode->data = attr;
newnode->next = head;

View File

@@ -591,11 +591,9 @@ _mesa_ReadBuffer(GLenum buffer)
* \note This function should only be called through the GL API, not
* from device drivers (as was done in the past).
*/
void GLAPIENTRY
_mesa_ResizeBuffersMESA( void )
{
GET_CURRENT_CONTEXT(ctx);
void _mesa_resizebuffers( GLcontext *ctx )
{
ASSERT_OUTSIDE_BEGIN_END_AND_FLUSH( ctx );
if (MESA_VERBOSE & VERBOSE_API)
@@ -637,6 +635,14 @@ _mesa_ResizeBuffersMESA( void )
ctx->NewState |= _NEW_BUFFERS; /* to update scissor / window bounds */
}
void GLAPIENTRY
_mesa_ResizeBuffersMESA( void )
{
GET_CURRENT_CONTEXT(ctx);
_mesa_resizebuffers( ctx );
}
/*
* XXX move somewhere else someday?

View File

@@ -78,4 +78,6 @@ extern void
_mesa_set_scissor(GLcontext *ctx,
GLint x, GLint y, GLsizei width, GLsizei height);
extern void _mesa_resizebuffers( GLcontext *ctx );
#endif

View File

@@ -704,9 +704,7 @@ alloc_shared_state( GLcontext *ctx )
ss->ArrayObjects = _mesa_NewHashTable();
#if FEATURE_ARB_shader_objects
ss->GL2Objects = _mesa_NewHashTable ();
#endif
ss->Default1D = (*ctx->Driver.NewTextureObject)(ctx, 0, GL_TEXTURE_1D);
if (!ss->Default1D)
@@ -777,10 +775,8 @@ alloc_shared_state( GLcontext *ctx )
if (ss->ArrayObjects)
_mesa_DeleteHashTable (ss->ArrayObjects);
#if FEATURE_ARB_shader_objects
if (ss->GL2Objects)
_mesa_DeleteHashTable (ss->GL2Objects);
#endif
#if FEATURE_EXT_framebuffer_object
if (ss->FrameBuffers)
@@ -933,9 +929,7 @@ free_shared_state( GLcontext *ctx, struct gl_shared_state *ss )
_mesa_HashDeleteAll(ss->ArrayObjects, delete_arrayobj_cb, ctx);
_mesa_DeleteHashTable(ss->ArrayObjects);
#if FEATURE_ARB_shader_objects
_mesa_DeleteHashTable(ss->GL2Objects);
#endif
#if FEATURE_EXT_framebuffer_object
_mesa_DeleteHashTable(ss->FrameBuffers);
@@ -1713,6 +1707,8 @@ _mesa_make_current( GLcontext *newCtx, GLframebuffer *drawBuffer,
if (readBuffer != drawBuffer && !readBuffer->Initialized) {
initialize_framebuffer_size(newCtx, readBuffer);
}
_mesa_resizebuffers(newCtx);
#endif
if (newCtx->FirstTimeCurrent) {
/* set initial viewport and scissor size now */

View File

@@ -570,6 +570,11 @@ _mesa_BindRenderbufferEXT(GLenum target, GLuint renderbuffer)
}
FLUSH_VERTICES(ctx, _NEW_BUFFERS);
/* The above doesn't fully flush the drivers in the way that a
* glFlush does, but that is required here:
*/
ctx->Driver.Flush(ctx);
if (renderbuffer) {
newRb = _mesa_lookup_renderbuffer(ctx, renderbuffer);
@@ -972,7 +977,9 @@ _mesa_BindFramebufferEXT(GLenum target, GLuint framebuffer)
}
FLUSH_VERTICES(ctx, _NEW_BUFFERS);
if (ctx->Driver.Flush) {
ctx->Driver.Flush(ctx);
}
if (framebuffer) {
/* Binding a user-created framebuffer object */
newFb = _mesa_lookup_framebuffer(ctx, framebuffer);
@@ -1047,6 +1054,10 @@ _mesa_DeleteFramebuffersEXT(GLsizei n, const GLuint *framebuffers)
ASSERT_OUTSIDE_BEGIN_END(ctx);
FLUSH_VERTICES(ctx, _NEW_BUFFERS);
/* The above doesn't fully flush the drivers in the way that a
* glFlush does, but that is required here:
*/
ctx->Driver.Flush(ctx);
for (i = 0; i < n; i++) {
if (framebuffers[i] > 0) {
@@ -1262,6 +1273,10 @@ framebuffer_texture(GLuint dims, GLenum target, GLenum attachment,
}
FLUSH_VERTICES(ctx, _NEW_BUFFERS);
/* The above doesn't fully flush the drivers in the way that a
* glFlush does, but that is required here:
*/
ctx->Driver.Flush(ctx);
_glthread_LOCK_MUTEX(fb->Mutex);
if (texObj) {
@@ -1380,6 +1395,10 @@ _mesa_FramebufferRenderbufferEXT(GLenum target, GLenum attachment,
}
FLUSH_VERTICES(ctx, _NEW_BUFFERS);
/* The above doesn't fully flush the drivers in the way that a
* glFlush does, but that is required here:
*/
ctx->Driver.Flush(ctx);
assert(ctx->Driver.FramebufferRenderbuffer);
ctx->Driver.FramebufferRenderbuffer(ctx, fb, attachment, rb);
@@ -1443,6 +1462,10 @@ _mesa_GetFramebufferAttachmentParameterivEXT(GLenum target, GLenum attachment,
}
FLUSH_VERTICES(ctx, _NEW_BUFFERS);
/* The above doesn't fully flush the drivers in the way that a
* glFlush does, but that is required here:
*/
ctx->Driver.Flush(ctx);
switch (pname) {
case GL_FRAMEBUFFER_ATTACHMENT_OBJECT_TYPE_EXT:

View File

@@ -604,6 +604,17 @@ do { \
#endif
/**
* Return 1 if this is a little endian machine, 0 if big endian.
*/
static INLINE GLboolean
_mesa_little_endian(void)
{
const GLuint ui = 1; /* intentionally not static */
return *((const GLubyte *) &ui);
}
/**********************************************************************
* Functions

View File

@@ -658,7 +658,7 @@ _mesa_free_texture_image_data(GLcontext *ctx,
void
_mesa_delete_texture_image( GLcontext *ctx, struct gl_texture_image *texImage )
{
if (texImage->Data) {
if (texImage->Data || ctx->Driver.FreeTexImageData) {
ctx->Driver.FreeTexImageData( ctx, texImage );
}
ASSERT(texImage->Data == NULL);

View File

@@ -63,11 +63,21 @@
#include "texformat.h"
#include "teximage.h"
#include "texstore.h"
#include "enums.h"
static const GLint ZERO = 4, ONE = 5;
enum {
ZERO = 4,
ONE = 5
};
static GLboolean can_swizzle(GLenum logicalBaseFormat)
/**
* Return GL_TRUE if the given image format is one that be converted
* to another format by swizzling.
*/
static GLboolean
can_swizzle(GLenum logicalBaseFormat)
{
switch (logicalBaseFormat) {
case GL_RGBA:
@@ -76,6 +86,12 @@ static GLboolean can_swizzle(GLenum logicalBaseFormat)
case GL_INTENSITY:
case GL_ALPHA:
case GL_LUMINANCE:
case GL_RED:
case GL_GREEN:
case GL_BLUE:
case GL_BGR:
case GL_BGRA:
case GL_ABGR_EXT:
return GL_TRUE;
default:
return GL_FALSE;
@@ -83,89 +99,172 @@ static GLboolean can_swizzle(GLenum logicalBaseFormat)
}
enum {
IDX_LUMINANCE = 0,
IDX_ALPHA,
IDX_INTENSITY,
IDX_LUMINANCE_ALPHA,
IDX_RGB,
IDX_RGBA,
IDX_RED,
IDX_GREEN,
IDX_BLUE,
IDX_BGR,
IDX_BGRA,
IDX_ABGR,
MAX_IDX
};
#define MAP1(x) MAP4(x, ZERO, ZERO, ZERO)
#define MAP2(x,y) MAP4(x, y, ZERO, ZERO)
#define MAP3(x,y,z) MAP4(x, y, z, ZERO)
#define MAP4(x,y,z,w) { x, y, z, w, ZERO, ONE }
static const struct {
GLubyte format_idx;
GLubyte to_rgba[6];
GLubyte from_rgba[6];
} mappings[MAX_IDX] =
{
{
IDX_LUMINANCE,
MAP4(0,0,0,ONE),
MAP1(0)
},
{
IDX_ALPHA,
MAP4(ZERO, ZERO, ZERO, 0),
MAP1(3)
},
{
IDX_INTENSITY,
MAP4(0, 0, 0, 0),
MAP1(0),
},
{
IDX_LUMINANCE_ALPHA,
MAP4(0,0,0,1),
MAP2(0,3)
},
{
IDX_RGB,
MAP4(0,1,2,ONE),
MAP3(0,1,2)
},
{
IDX_RGBA,
MAP4(0,1,2,3),
MAP4(0,1,2,3),
},
{
IDX_RED,
MAP4(0, ZERO, ZERO, ONE),
MAP1(0),
},
{
IDX_GREEN,
MAP4(ZERO, 0, ZERO, ONE),
MAP1(1),
},
{
IDX_BLUE,
MAP4(ZERO, ZERO, 0, ONE),
MAP1(2),
},
{
IDX_BGR,
MAP4(2,1,0,ONE),
MAP3(2,1,0)
},
{
IDX_BGRA,
MAP4(2,1,0,3),
MAP4(2,1,0,3)
},
{
IDX_ABGR,
MAP4(3,2,1,0),
MAP4(3,2,1,0)
},
};
/**
* Convert a GL image format enum to an IDX_* value (see above).
*/
static int
get_map_idx(GLenum value)
{
switch (value) {
case GL_LUMINANCE: return IDX_LUMINANCE;
case GL_ALPHA: return IDX_ALPHA;
case GL_INTENSITY: return IDX_INTENSITY;
case GL_LUMINANCE_ALPHA: return IDX_LUMINANCE_ALPHA;
case GL_RGB: return IDX_RGB;
case GL_RGBA: return IDX_RGBA;
case GL_RED: return IDX_RED;
case GL_GREEN: return IDX_GREEN;
case GL_BLUE: return IDX_BLUE;
case GL_BGR: return IDX_BGR;
case GL_BGRA: return IDX_BGRA;
case GL_ABGR_EXT: return IDX_ABGR;
default:
_mesa_problem(NULL, "Unexpected inFormat");
return 0;
}
}
/**
* When promoting texture formats (see below) we need to compute the
* mapping of dest components back to source components.
* This function does that.
* \param logicalBaseFormat the logical format of the texture
* \param textureBaseFormat the final texture format
* \return map[4] the four mapping values
* \param inFormat the incoming format of the texture
* \param outFormat the final texture format
* \return map[6] a full 6-component map
*/
static void
compute_component_mapping(GLenum logicalBaseFormat, GLenum textureBaseFormat,
GLubyte map[6])
compute_component_mapping(GLenum inFormat, GLenum outFormat,
GLubyte *map)
{
map[ZERO] = ZERO;
map[ONE] = ONE;
const int inFmt = get_map_idx(inFormat);
const int outFmt = get_map_idx(outFormat);
const GLubyte *in2rgba = mappings[inFmt].to_rgba;
const GLubyte *rgba2out = mappings[outFmt].from_rgba;
int i;
for (i = 0; i < 4; i++)
map[i] = in2rgba[rgba2out[i]];
/* compute mapping from dest components back to src components */
switch (textureBaseFormat) {
case GL_RGB:
case GL_RGBA:
switch (logicalBaseFormat) {
case GL_LUMINANCE:
map[0] = map[1] = map[2] = 0;
if (textureBaseFormat == GL_RGBA)
map[3] = ONE;
break;
case GL_ALPHA:
ASSERT(textureBaseFormat == GL_RGBA);
map[0] = map[1] = map[2] = ZERO;
map[3] = 0;
break;
case GL_INTENSITY:
map[0] = map[1] = map[2] = 0;
if (textureBaseFormat == GL_RGBA)
map[3] = 0;
break;
case GL_LUMINANCE_ALPHA:
ASSERT(textureBaseFormat == GL_RGBA);
map[0] = map[1] = map[2] = 0;
map[3] = 1;
break;
case GL_RGB:
ASSERT(textureBaseFormat == GL_RGBA);
map[0] = 0;
map[1] = 1;
map[2] = 2;
map[3] = ONE;
break;
case GL_RGBA:
ASSERT(textureBaseFormat == GL_RGBA);
map[0] = 0;
map[1] = 1;
map[2] = 2;
map[3] = 3;
break;
default:
_mesa_problem(NULL, "Unexpected logicalBaseFormat");
map[0] = map[1] = map[2] = map[3] = 0;
}
break;
case GL_LUMINANCE_ALPHA:
switch (logicalBaseFormat) {
case GL_LUMINANCE:
map[0] = 0;
map[1] = ONE;
break;
case GL_ALPHA:
map[0] = ZERO;
map[1] = 0;
break;
case GL_INTENSITY:
map[0] = 0;
map[1] = 0;
break;
default:
_mesa_problem(NULL, "Unexpected logicalBaseFormat");
map[0] = map[1] = 0;
}
break;
default:
_mesa_problem(NULL, "Unexpected logicalBaseFormat");
map[0] = map[1] = 0;
break;
}
map[ZERO] = ZERO;
map[ONE] = ONE;
/*
_mesa_printf("from %x/%s to %x/%s map %d %d %d %d %d %d\n",
inFormat, _mesa_lookup_enum_by_nr(inFormat),
outFormat, _mesa_lookup_enum_by_nr(outFormat),
map[0],
map[1],
map[2],
map[3],
map[4],
map[5]);
*/
}
@@ -552,14 +651,16 @@ _mesa_make_temp_chan_image(GLcontext *ctx, GLuint dims,
* \param dstComponents number of color components in destination pixels
* \param src source pixels
* \param srcComponents number of color components in source pixels
* \param map the swizzle mapping
* \param map the swizzle mapping. map[X] says where to find the X component
* in the source image's pixels. For example, if the source image
* is GL_BGRA and X = red, map[0] yields 2.
* \param count number of pixels to copy/swizzle.
*/
static void
swizzle_copy(GLubyte *dst, GLuint dstComponents, const GLubyte *src,
GLuint srcComponents, const GLubyte *map, GLuint count)
{
GLubyte tmp[8];
GLubyte tmp[6];
GLuint i;
tmp[ZERO] = 0x0;
@@ -596,10 +697,60 @@ swizzle_copy(GLubyte *dst, GLuint dstComponents, const GLubyte *src,
dst += 2;
}
break;
case 1:
for (i = 0; i < count; i++) {
COPY_4UBV(tmp, src);
src += srcComponents;
dst[0] = tmp[map[0]];
dst += 1;
}
break;
}
}
static const GLubyte map_identity[6] = { 0, 1, 2, 3, ZERO, ONE };
static const GLubyte map_3210[6] = { 3, 2, 1, 0, ZERO, ONE };
/* Deal with the _REV input types:
*/
static const GLubyte *
type_mapping( GLenum srcType )
{
switch (srcType) {
case GL_UNSIGNED_BYTE:
return map_identity;
case GL_UNSIGNED_INT_8_8_8_8:
return map_3210;
case GL_UNSIGNED_INT_8_8_8_8_REV:
return map_identity;
default:
return NULL;
}
}
/* Mapping required if input type is
*/
static const GLubyte *
byteswap_mapping( GLboolean swapBytes,
GLenum srcType )
{
if (!swapBytes)
return map_identity;
switch (srcType) {
case GL_UNSIGNED_BYTE:
return map_identity;
case GL_UNSIGNED_INT_8_8_8_8:
case GL_UNSIGNED_INT_8_8_8_8_REV:
return map_3210;
default:
return NULL;
}
}
/**
* Transfer a GLubyte texture image with component swizzling.
*/
@@ -607,7 +758,12 @@ static void
_mesa_swizzle_ubyte_image(GLcontext *ctx,
GLuint dimensions,
GLenum srcFormat,
const GLubyte *dstmap, GLint dstComponents,
GLenum srcType,
GLenum baseInternalFormat,
const GLubyte *rgba2dst,
GLuint dstComponents,
GLvoid *dstAddr,
GLint dstXoffset, GLint dstYoffset, GLint dstZoffset,
@@ -619,9 +775,9 @@ _mesa_swizzle_ubyte_image(GLcontext *ctx,
const struct gl_pixelstore_attrib *srcPacking )
{
GLint srcComponents = _mesa_components_in_format(srcFormat);
GLubyte srcmap[6], map[4];
const GLubyte *srctype2ubyte, *swap;
GLubyte map[4], src2base[6], base2rgba[6];
GLint i;
const GLint srcRowStride =
_mesa_image_row_stride(srcPacking, srcWidth,
srcFormat, GL_UNSIGNED_BYTE);
@@ -635,10 +791,20 @@ _mesa_swizzle_ubyte_image(GLcontext *ctx,
(void) ctx;
compute_component_mapping(srcFormat, GL_RGBA, srcmap);
/* Translate from src->baseInternal->GL_RGBA->dst. This will
* correctly deal with RGBA->RGB->RGBA conversions where the final
* A value must be 0xff regardless of the incoming alpha values.
*/
compute_component_mapping(srcFormat, baseInternalFormat, src2base);
compute_component_mapping(baseInternalFormat, GL_RGBA, base2rgba);
swap = byteswap_mapping(srcPacking->SwapBytes, srcType);
srctype2ubyte = type_mapping(srcType);
for (i = 0; i < 4; i++)
map[i] = srcmap[dstmap[i]];
map[i] = srctype2ubyte[swap[src2base[base2rgba[rgba2dst[i]]]]];
/* _mesa_printf("map %d %d %d %d\n", map[0], map[1], map[2], map[3]); */
if (srcRowStride == srcWidth * srcComponents &&
dimensions < 3) {
@@ -817,6 +983,59 @@ _mesa_texstore_rgba(TEXSTORE_PARAMS)
}
}
}
else if (!ctx->_ImageTransferState &&
_mesa_little_endian() &&
CHAN_TYPE == GL_UNSIGNED_BYTE &&
(srcType == GL_UNSIGNED_BYTE ||
srcType == GL_UNSIGNED_INT_8_8_8_8 ||
srcType == GL_UNSIGNED_INT_8_8_8_8_REV) &&
can_swizzle(baseInternalFormat) &&
can_swizzle(srcFormat)) {
const GLubyte *dstmap;
GLuint components;
/* dstmap - how to swizzle from RGBA to dst format:
*/
if (dstFormat == &_mesa_texformat_rgba) {
dstmap = mappings[IDX_RGBA].from_rgba;
components = 4;
}
else if (dstFormat == &_mesa_texformat_rgb) {
dstmap = mappings[IDX_RGB].from_rgba;
components = 3;
}
else if (dstFormat == &_mesa_texformat_alpha) {
dstmap = mappings[IDX_ALPHA].from_rgba;
components = 1;
}
else if (dstFormat == &_mesa_texformat_luminance) {
dstmap = mappings[IDX_LUMINANCE].from_rgba;
components = 1;
}
else if (dstFormat == &_mesa_texformat_luminance_alpha) {
dstmap = mappings[IDX_LUMINANCE_ALPHA].from_rgba;
components = 2;
}
else if (dstFormat == &_mesa_texformat_intensity) {
dstmap = mappings[IDX_INTENSITY].from_rgba;
components = 1;
}
else {
_mesa_problem(ctx, "Unexpected dstFormat in _mesa_texstore_rgba");
return GL_FALSE;
}
_mesa_swizzle_ubyte_image(ctx, dims,
srcFormat,
srcType,
baseInternalFormat,
dstmap, components,
dstAddr, dstXoffset, dstYoffset, dstZoffset,
dstRowStride, dstImageOffsets,
srcWidth, srcHeight, srcDepth, srcAddr,
srcPacking);
}
else {
/* general path */
const GLchan *tempImage = _mesa_make_temp_chan_image(ctx, dims,
@@ -1050,13 +1269,14 @@ _mesa_texstore_rgb565(TEXSTORE_PARAMS)
}
/**
* Store a texture in MESA_FORMAT_RGBA8888 or MESA_FORMAT_RGBA8888_REV.
*/
GLboolean
_mesa_texstore_rgba8888(TEXSTORE_PARAMS)
{
const GLuint ui = 1;
const GLubyte littleEndian = *((const GLubyte *) &ui);
const GLboolean littleEndian = _mesa_little_endian();
(void)littleEndian;
ASSERT(dstFormat == &_mesa_texformat_rgba8888 ||
dstFormat == &_mesa_texformat_rgba8888_rev);
ASSERT(dstFormat->TexelBytes == 4);
@@ -1066,7 +1286,25 @@ _mesa_texstore_rgba8888(TEXSTORE_PARAMS)
dstFormat == &_mesa_texformat_rgba8888 &&
baseInternalFormat == GL_RGBA &&
((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
(srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV))) {
(srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && !littleEndian) ||
(srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
(srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && littleEndian))) {
/* simple memcpy path */
memcpy_texture(ctx, dims,
dstFormat, dstAddr, dstXoffset, dstYoffset, dstZoffset,
dstRowStride,
dstImageOffsets,
srcWidth, srcHeight, srcDepth, srcFormat, srcType,
srcAddr, srcPacking);
}
else if (!ctx->_ImageTransferState &&
!srcPacking->SwapBytes &&
dstFormat == &_mesa_texformat_rgba8888_rev &&
baseInternalFormat == GL_RGBA &&
((srcFormat == GL_RGBA && srcType == GL_UNSIGNED_INT_8_8_8_8_REV) ||
(srcFormat == GL_RGBA && srcType == GL_UNSIGNED_BYTE && littleEndian) ||
(srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_INT_8_8_8_8) ||
(srcFormat == GL_ABGR_EXT && srcType == GL_UNSIGNED_BYTE && !littleEndian))) {
/* simple memcpy path */
memcpy_texture(ctx, dims,
dstFormat, dstAddr, dstXoffset, dstYoffset, dstZoffset,
@@ -1075,40 +1313,41 @@ _mesa_texstore_rgba8888(TEXSTORE_PARAMS)
srcWidth, srcHeight, srcDepth, srcFormat, srcType,
srcAddr, srcPacking);
}
#if 0
/* broken? */
else if (!ctx->_ImageTransferState &&
!srcPacking->SwapBytes &&
srcType == GL_UNSIGNED_BYTE &&
dstFormat == &_mesa_texformat_rgba8888 &&
littleEndian &&
/* Three texture formats involved: srcFormat,
* baseInternalFormat and destFormat (GL_RGBA). Only two
* may differ. _mesa_swizzle_ubyte_image can't handle two
* propagations at once correctly. */
(srcFormat == baseInternalFormat ||
baseInternalFormat == GL_RGBA) &&
littleEndian &&
(srcType == GL_UNSIGNED_BYTE ||
srcType == GL_UNSIGNED_INT_8_8_8_8 ||
srcType == GL_UNSIGNED_INT_8_8_8_8_REV) &&
can_swizzle(baseInternalFormat) &&
can_swizzle(srcFormat)) {
GLubyte dstmap[4];
/* dstmap - how to swizzle from GL_RGBA to dst format:
*
* FIXME - add !litteEndian and _rev varients:
/* dstmap - how to swizzle from RGBA to dst format:
*/
dstmap[3] = 0;
dstmap[2] = 1;
dstmap[1] = 2;
dstmap[0] = 3;
if (dstFormat == &_mesa_texformat_rgba8888) {
dstmap[3] = 0;
dstmap[2] = 1;
dstmap[1] = 2;
dstmap[0] = 3;
}
else {
dstmap[3] = 3;
dstmap[2] = 2;
dstmap[1] = 1;
dstmap[0] = 0;
}
_mesa_swizzle_ubyte_image(ctx, dims,
srcFormat,
srcType,
baseInternalFormat,
dstmap, 4,
dstAddr, dstXoffset, dstYoffset, dstZoffset,
dstRowStride, dstImageStride,
dstRowStride, dstImageOffsets,
srcWidth, srcHeight, srcDepth, srcAddr,
srcPacking);
}
#endif
else {
/* general path */
const GLchan *tempImage = _mesa_make_temp_chan_image(ctx, dims,
@@ -1159,8 +1398,7 @@ _mesa_texstore_rgba8888(TEXSTORE_PARAMS)
GLboolean
_mesa_texstore_argb8888(TEXSTORE_PARAMS)
{
const GLuint ui = 1;
const GLubyte littleEndian = *((const GLubyte *) &ui);
const GLboolean littleEndian = _mesa_little_endian();
ASSERT(dstFormat == &_mesa_texformat_argb8888 ||
dstFormat == &_mesa_texformat_argb8888_rev);
@@ -1200,6 +1438,8 @@ _mesa_texstore_argb8888(TEXSTORE_PARAMS)
!srcPacking->SwapBytes &&
dstFormat == &_mesa_texformat_argb8888 &&
srcFormat == GL_RGB &&
(baseInternalFormat == GL_RGBA ||
baseInternalFormat == GL_RGB) &&
srcType == GL_UNSIGNED_BYTE) {
int img, row, col;
@@ -1228,6 +1468,7 @@ _mesa_texstore_argb8888(TEXSTORE_PARAMS)
!srcPacking->SwapBytes &&
dstFormat == &_mesa_texformat_argb8888 &&
srcFormat == GL_RGBA &&
baseInternalFormat == GL_RGBA &&
(srcType == GL_UNSIGNED_BYTE && littleEndian)) {
GLint img, row, col;
/* For some reason, streaming copies to write-combined regions
@@ -1262,6 +1503,7 @@ _mesa_texstore_argb8888(TEXSTORE_PARAMS)
!srcPacking->SwapBytes &&
dstFormat == &_mesa_texformat_argb8888 &&
srcFormat == GL_RGBA &&
baseInternalFormat == GL_RGBA &&
srcType == GL_UNSIGNED_BYTE) {
GLint img, row, col;
@@ -1287,29 +1529,36 @@ _mesa_texstore_argb8888(TEXSTORE_PARAMS)
}
}
else if (!ctx->_ImageTransferState &&
!srcPacking->SwapBytes &&
dstFormat == &_mesa_texformat_argb8888 &&
srcType == GL_UNSIGNED_BYTE &&
littleEndian &&
/* Three texture formats involved: srcFormat,
* baseInternalFormat and destFormat (GL_RGBA). Only two
* may differ. _mesa_swizzle_ubyte_image can't handle two
* propagations at once correctly. */
(srcFormat == baseInternalFormat ||
baseInternalFormat == GL_RGBA) &&
(srcType == GL_UNSIGNED_BYTE ||
srcType == GL_UNSIGNED_INT_8_8_8_8 ||
srcType == GL_UNSIGNED_INT_8_8_8_8_REV) &&
can_swizzle(baseInternalFormat) &&
can_swizzle(srcFormat)) {
GLubyte dstmap[4];
/* dstmap - how to swizzle from GL_RGBA to dst format:
/* dstmap - how to swizzle from RGBA to dst format:
*/
dstmap[3] = 3; /* alpha */
dstmap[2] = 0; /* red */
dstmap[1] = 1; /* green */
dstmap[0] = 2; /* blue */
if (dstFormat == &_mesa_texformat_argb8888) {
dstmap[3] = 3; /* alpha */
dstmap[2] = 0; /* red */
dstmap[1] = 1; /* green */
dstmap[0] = 2; /* blue */
}
else {
assert(dstFormat == &_mesa_texformat_argb8888_rev);
dstmap[3] = 2;
dstmap[2] = 1;
dstmap[1] = 0;
dstmap[0] = 3;
}
_mesa_swizzle_ubyte_image(ctx, dims,
srcFormat,
srcType,
baseInternalFormat,
dstmap, 4,
dstAddr, dstXoffset, dstYoffset, dstZoffset,
dstRowStride,
@@ -1367,8 +1616,7 @@ _mesa_texstore_argb8888(TEXSTORE_PARAMS)
GLboolean
_mesa_texstore_rgb888(TEXSTORE_PARAMS)
{
const GLuint ui = 1;
const GLubyte littleEndian = *((const GLubyte *) &ui);
const GLboolean littleEndian = _mesa_little_endian();
ASSERT(dstFormat == &_mesa_texformat_rgb888);
ASSERT(dstFormat->TexelBytes == 3);
@@ -1413,6 +1661,31 @@ _mesa_texstore_rgb888(TEXSTORE_PARAMS)
}
}
}
else if (!ctx->_ImageTransferState &&
littleEndian &&
srcType == GL_UNSIGNED_BYTE &&
can_swizzle(baseInternalFormat) &&
can_swizzle(srcFormat)) {
GLubyte dstmap[4];
/* dstmap - how to swizzle from RGBA to dst format:
*/
dstmap[0] = 2;
dstmap[1] = 1;
dstmap[2] = 0;
dstmap[3] = ONE; /* ? */
_mesa_swizzle_ubyte_image(ctx, dims,
srcFormat,
srcType,
baseInternalFormat,
dstmap, 3,
dstAddr, dstXoffset, dstYoffset, dstZoffset,
dstRowStride, dstImageOffsets,
srcWidth, srcHeight, srcDepth, srcAddr,
srcPacking);
}
else {
/* general path */
const GLchan *tempImage = _mesa_make_temp_chan_image(ctx, dims,
@@ -1469,8 +1742,7 @@ _mesa_texstore_rgb888(TEXSTORE_PARAMS)
GLboolean
_mesa_texstore_bgr888(TEXSTORE_PARAMS)
{
const GLuint ui = 1;
const GLubyte littleEndian = *((const GLubyte *) &ui);
const GLboolean littleEndian = _mesa_little_endian();
ASSERT(dstFormat == &_mesa_texformat_bgr888);
ASSERT(dstFormat->TexelBytes == 3);
@@ -1515,6 +1787,31 @@ _mesa_texstore_bgr888(TEXSTORE_PARAMS)
}
}
}
else if (!ctx->_ImageTransferState &&
littleEndian &&
srcType == GL_UNSIGNED_BYTE &&
can_swizzle(baseInternalFormat) &&
can_swizzle(srcFormat)) {
GLubyte dstmap[4];
/* dstmap - how to swizzle from RGBA to dst format:
*/
dstmap[0] = 0;
dstmap[1] = 1;
dstmap[2] = 2;
dstmap[3] = ONE; /* ? */
_mesa_swizzle_ubyte_image(ctx, dims,
srcFormat,
srcType,
baseInternalFormat,
dstmap, 3,
dstAddr, dstXoffset, dstYoffset, dstZoffset,
dstRowStride, dstImageOffsets,
srcWidth, srcHeight, srcDepth, srcAddr,
srcPacking);
}
else {
/* general path */
const GLchan *tempImage = _mesa_make_temp_chan_image(ctx, dims,
@@ -1689,8 +1986,7 @@ _mesa_texstore_argb1555(TEXSTORE_PARAMS)
GLboolean
_mesa_texstore_al88(TEXSTORE_PARAMS)
{
const GLuint ui = 1;
const GLubyte littleEndian = *((const GLubyte *) &ui);
const GLboolean littleEndian = _mesa_little_endian();
ASSERT(dstFormat == &_mesa_texformat_al88 ||
dstFormat == &_mesa_texformat_al88_rev);
@@ -1711,6 +2007,37 @@ _mesa_texstore_al88(TEXSTORE_PARAMS)
srcWidth, srcHeight, srcDepth, srcFormat, srcType,
srcAddr, srcPacking);
}
else if (!ctx->_ImageTransferState &&
littleEndian &&
srcType == GL_UNSIGNED_BYTE &&
can_swizzle(baseInternalFormat) &&
can_swizzle(srcFormat)) {
GLubyte dstmap[4];
/* dstmap - how to swizzle from RGBA to dst format:
*/
if (dstFormat == &_mesa_texformat_al88) {
dstmap[0] = 0;
dstmap[1] = 3;
}
else {
dstmap[0] = 3;
dstmap[1] = 0;
}
dstmap[2] = ZERO; /* ? */
dstmap[3] = ONE; /* ? */
_mesa_swizzle_ubyte_image(ctx, dims,
srcFormat,
srcType,
baseInternalFormat,
dstmap, 2,
dstAddr, dstXoffset, dstYoffset, dstZoffset,
dstRowStride, dstImageOffsets,
srcWidth, srcHeight, srcDepth, srcAddr,
srcPacking);
}
else {
/* general path */
const GLchan *tempImage = _mesa_make_temp_chan_image(ctx, dims,
@@ -1831,6 +2158,36 @@ _mesa_texstore_a8(TEXSTORE_PARAMS)
srcWidth, srcHeight, srcDepth, srcFormat, srcType,
srcAddr, srcPacking);
}
else if (!ctx->_ImageTransferState &&
_mesa_little_endian() &&
srcType == GL_UNSIGNED_BYTE &&
can_swizzle(baseInternalFormat) &&
can_swizzle(srcFormat)) {
GLubyte dstmap[4];
/* dstmap - how to swizzle from RGBA to dst format:
*/
if (dstFormat == &_mesa_texformat_a8) {
dstmap[0] = 3;
}
else {
dstmap[0] = 0;
}
dstmap[1] = ZERO; /* ? */
dstmap[2] = ZERO; /* ? */
dstmap[3] = ONE; /* ? */
_mesa_swizzle_ubyte_image(ctx, dims,
srcFormat,
srcType,
baseInternalFormat,
dstmap, 1,
dstAddr, dstXoffset, dstYoffset, dstZoffset,
dstRowStride, dstImageOffsets,
srcWidth, srcHeight, srcDepth, srcAddr,
srcPacking);
}
else {
/* general path */
const GLchan *tempImage = _mesa_make_temp_chan_image(ctx, dims,
@@ -1912,8 +2269,7 @@ _mesa_texstore_ci8(TEXSTORE_PARAMS)
GLboolean
_mesa_texstore_ycbcr(TEXSTORE_PARAMS)
{
const GLuint ui = 1;
const GLubyte littleEndian = *((const GLubyte *) &ui);
const GLboolean littleEndian = _mesa_little_endian();
(void) ctx; (void) dims; (void) baseInternalFormat;
ASSERT((dstFormat == &_mesa_texformat_ycbcr) ||
@@ -2164,6 +2520,113 @@ _mesa_texstore_rgba_float16(TEXSTORE_PARAMS)
}
#if FEATURE_EXT_texture_sRGB
GLboolean
_mesa_texstore_srgb8(TEXSTORE_PARAMS)
{
const GLboolean littleEndian = _mesa_little_endian();
const struct gl_texture_format *newDstFormat;
StoreTexImageFunc store;
GLboolean k;
ASSERT(dstFormat == &_mesa_texformat_srgb8);
/* reuse normal rgb texstore code */
if (littleEndian) {
newDstFormat = &_mesa_texformat_bgr888;
store = _mesa_texstore_bgr888;
}
else {
newDstFormat = &_mesa_texformat_rgb888;
store = _mesa_texstore_rgb888;
}
k = store(ctx, dims, baseInternalFormat,
newDstFormat, dstAddr,
dstXoffset, dstYoffset, dstZoffset,
dstRowStride, dstImageOffsets,
srcWidth, srcHeight, srcDepth,
srcFormat, srcType,
srcAddr, srcPacking);
return k;
}
GLboolean
_mesa_texstore_srgba8(TEXSTORE_PARAMS)
{
const GLboolean littleEndian = _mesa_little_endian();
const struct gl_texture_format *newDstFormat;
GLboolean k;
ASSERT(dstFormat == &_mesa_texformat_srgba8);
/* reuse normal rgba texstore code */
if (littleEndian)
newDstFormat = &_mesa_texformat_rgba8888_rev;
else
newDstFormat = &_mesa_texformat_rgba8888;
k = _mesa_texstore_rgba8888(ctx, dims, baseInternalFormat,
newDstFormat, dstAddr,
dstXoffset, dstYoffset, dstZoffset,
dstRowStride, dstImageOffsets,
srcWidth, srcHeight, srcDepth,
srcFormat, srcType,
srcAddr, srcPacking);
return k;
}
GLboolean
_mesa_texstore_sl8(TEXSTORE_PARAMS)
{
const struct gl_texture_format *newDstFormat;
GLboolean k;
ASSERT(dstFormat == &_mesa_texformat_sl8);
newDstFormat = &_mesa_texformat_l8;
/* _mesa_textore_a8 handles luminance8 too */
k = _mesa_texstore_a8(ctx, dims, baseInternalFormat,
newDstFormat, dstAddr,
dstXoffset, dstYoffset, dstZoffset,
dstRowStride, dstImageOffsets,
srcWidth, srcHeight, srcDepth,
srcFormat, srcType,
srcAddr, srcPacking);
return k;
}
GLboolean
_mesa_texstore_sla8(TEXSTORE_PARAMS)
{
const GLboolean littleEndian = _mesa_little_endian();
const struct gl_texture_format *newDstFormat;
GLboolean k;
ASSERT(dstFormat == &_mesa_texformat_sla8);
/* reuse normal luminance/alpha texstore code */
if (littleEndian)
newDstFormat = &_mesa_texformat_al88;
else
newDstFormat = &_mesa_texformat_al88_rev;
k = _mesa_texstore_al88(ctx, dims, baseInternalFormat,
newDstFormat, dstAddr,
dstXoffset, dstYoffset, dstZoffset,
dstRowStride, dstImageOffsets,
srcWidth, srcHeight, srcDepth,
srcFormat, srcType,
srcAddr, srcPacking);
return k;
}
#endif /* FEATURE_EXT_texture_sRGB */
/**
* Check if an unpack PBO is active prior to fetching a texture image.

View File

@@ -98,7 +98,7 @@ _swrast_update_rasterflags( GLcontext *ctx )
rasterMask |= MULTI_DRAW_BIT; /* all color index bits disabled */
}
if (ctx->FragmentProgram._Active) {
if (ctx->FragmentProgram._Enabled) {
rasterMask |= FRAGPROG_BIT;
}
@@ -206,7 +206,7 @@ _swrast_update_fog_state( GLcontext *ctx )
/* determine if fog is needed, and if so, which fog mode */
swrast->_FogEnabled = GL_FALSE;
if (ctx->FragmentProgram._Active) {
if (ctx->FragmentProgram._Enabled) {
if (ctx->FragmentProgram._Current->Base.Target==GL_FRAGMENT_PROGRAM_ARB) {
const struct gl_fragment_program *fp
= ctx->FragmentProgram._Current;
@@ -230,7 +230,7 @@ _swrast_update_fog_state( GLcontext *ctx )
static void
_swrast_update_fragment_program( GLcontext *ctx )
{
if (ctx->FragmentProgram._Active) {
if (ctx->FragmentProgram._Enabled) {
const struct gl_fragment_program *fp = ctx->FragmentProgram._Current;
_mesa_load_state_parameters(ctx, fp->Base.Parameters);
}
@@ -299,7 +299,7 @@ _swrast_validate_triangle( GLcontext *ctx,
if (ctx->Texture._EnabledUnits == 0
&& NEED_SECONDARY_COLOR(ctx)
&& !ctx->FragmentProgram._Active) {
&& !ctx->FragmentProgram._Enabled) {
/* separate specular color, but no texture */
swrast->SpecTriangle = swrast->Triangle;
swrast->Triangle = _swrast_add_spec_terms_triangle;
@@ -322,7 +322,7 @@ _swrast_validate_line( GLcontext *ctx, const SWvertex *v0, const SWvertex *v1 )
if (ctx->Texture._EnabledUnits == 0
&& NEED_SECONDARY_COLOR(ctx)
&& !ctx->FragmentProgram._Active) {
&& !ctx->FragmentProgram._Enabled) {
swrast->SpecLine = swrast->Line;
swrast->Line = _swrast_add_spec_terms_line;
}
@@ -345,7 +345,7 @@ _swrast_validate_point( GLcontext *ctx, const SWvertex *v0 )
if (ctx->Texture._EnabledUnits == 0
&& NEED_SECONDARY_COLOR(ctx)
&& !ctx->FragmentProgram._Active) {
&& !ctx->FragmentProgram._Enabled) {
swrast->SpecPoint = swrast->Point;
swrast->Point = _swrast_add_spec_terms_point;
}

View File

@@ -129,7 +129,7 @@ _swrast_span_default_texcoords( GLcontext *ctx, struct sw_span *span )
GLuint i;
for (i = 0; i < ctx->Const.MaxTextureCoordUnits; i++) {
const GLfloat *tc = ctx->Current.RasterTexCoords[i];
if (ctx->FragmentProgram._Active || ctx->ATIFragmentShader._Enabled) {
if (ctx->FragmentProgram._Enabled || ctx->ATIFragmentShader._Enabled) {
COPY_4V(span->tex[i], tc);
}
else if (tc[3] > 0.0F) {
@@ -410,7 +410,7 @@ interpolate_texcoords(GLcontext *ctx, struct sw_span *span)
if (obj) {
const struct gl_texture_image *img = obj->Image[0][obj->BaseLevel];
needLambda = (obj->MinFilter != obj->MagFilter)
|| ctx->FragmentProgram._Active;
|| ctx->FragmentProgram._Enabled;
texW = img->WidthScale;
texH = img->HeightScale;
}
@@ -435,7 +435,7 @@ interpolate_texcoords(GLcontext *ctx, struct sw_span *span)
GLfloat r = span->tex[u][2];
GLfloat q = span->tex[u][3];
GLuint i;
if (ctx->FragmentProgram._Active || ctx->ATIFragmentShader._Enabled ||
if (ctx->FragmentProgram._Enabled || ctx->ATIFragmentShader._Enabled ||
ctx->ShaderObjects._FragmentShaderPresent) {
/* do perspective correction but don't divide s, t, r by q */
const GLfloat dwdx = span->dwdx;
@@ -487,7 +487,7 @@ interpolate_texcoords(GLcontext *ctx, struct sw_span *span)
GLfloat r = span->tex[u][2];
GLfloat q = span->tex[u][3];
GLuint i;
if (ctx->FragmentProgram._Active || ctx->ATIFragmentShader._Enabled ||
if (ctx->FragmentProgram._Enabled || ctx->ATIFragmentShader._Enabled ||
ctx->ShaderObjects._FragmentShaderPresent) {
/* do perspective correction but don't divide s, t, r by q */
const GLfloat dwdx = span->dwdx;
@@ -546,7 +546,7 @@ interpolate_texcoords(GLcontext *ctx, struct sw_span *span)
if (obj) {
const struct gl_texture_image *img = obj->Image[0][obj->BaseLevel];
needLambda = (obj->MinFilter != obj->MagFilter)
|| ctx->FragmentProgram._Active;
|| ctx->FragmentProgram._Enabled;
texW = (GLfloat) img->WidthScale;
texH = (GLfloat) img->HeightScale;
}
@@ -571,7 +571,7 @@ interpolate_texcoords(GLcontext *ctx, struct sw_span *span)
GLfloat r = span->tex[0][2];
GLfloat q = span->tex[0][3];
GLuint i;
if (ctx->FragmentProgram._Active || ctx->ATIFragmentShader._Enabled ||
if (ctx->FragmentProgram._Enabled || ctx->ATIFragmentShader._Enabled ||
ctx->ShaderObjects._FragmentShaderPresent) {
/* do perspective correction but don't divide s, t, r by q */
const GLfloat dwdx = span->dwdx;
@@ -623,7 +623,7 @@ interpolate_texcoords(GLcontext *ctx, struct sw_span *span)
GLfloat r = span->tex[0][2];
GLfloat q = span->tex[0][3];
GLuint i;
if (ctx->FragmentProgram._Active || ctx->ATIFragmentShader._Enabled ||
if (ctx->FragmentProgram._Enabled || ctx->ATIFragmentShader._Enabled ||
ctx->ShaderObjects._FragmentShaderPresent) {
/* do perspective correction but don't divide s, t, r by q */
const GLfloat dwdx = span->dwdx;
@@ -1120,7 +1120,7 @@ _swrast_write_rgba_span( GLcontext *ctx, struct sw_span *span)
const GLbitfield origInterpMask = span->interpMask;
const GLbitfield origArrayMask = span->arrayMask;
const GLboolean deferredTexture = !(ctx->Color.AlphaEnabled ||
ctx->FragmentProgram._Active ||
ctx->FragmentProgram._Enabled ||
ctx->ShaderObjects._FragmentShaderPresent);
ASSERT(span->primitive == GL_POINT || span->primitive == GL_LINE ||
@@ -1197,15 +1197,12 @@ _swrast_write_rgba_span( GLcontext *ctx, struct sw_span *span)
interpolate_fog(ctx, span);
/* Compute fragment colors with fragment program or texture lookups */
#if FEATURE_ARB_fragment_shader
if (ctx->ShaderObjects._FragmentShaderPresent) {
if (span->interpMask & SPAN_Z)
_swrast_span_interpolate_z (ctx, span);
_swrast_exec_arbshader (ctx, span);
}
else
#endif
if (ctx->FragmentProgram._Active) {
else if (ctx->FragmentProgram._Enabled) {
/* frag prog may need Z values */
if (span->interpMask & SPAN_Z)
_swrast_span_interpolate_z(ctx, span);
@@ -1284,15 +1281,12 @@ _swrast_write_rgba_span( GLcontext *ctx, struct sw_span *span)
if (span->interpMask & SPAN_FOG)
interpolate_fog(ctx, span);
#if FEATURE_ARB_fragment_shader
if (ctx->ShaderObjects._FragmentShaderPresent) {
if (span->interpMask & SPAN_Z)
_swrast_span_interpolate_z (ctx, span);
_swrast_exec_arbshader (ctx, span);
}
else
#endif
if (ctx->FragmentProgram._Active)
else if (ctx->FragmentProgram._Enabled)
_swrast_exec_fragment_program( ctx, span );
else if (ctx->ATIFragmentShader._Enabled)
_swrast_exec_fragment_shader( ctx, span );

View File

@@ -1072,7 +1072,7 @@ _swrast_choose_triangle( GLcontext *ctx )
}
}
if (ctx->Texture._EnabledCoordUnits || ctx->FragmentProgram._Active ||
if (ctx->Texture._EnabledCoordUnits || ctx->FragmentProgram._Enabled ||
ctx->ATIFragmentShader._Enabled || ctx->ShaderObjects._FragmentShaderPresent) {
/* Ugh, we do a _lot_ of tests to pick the best textured tri func */
const struct gl_texture_object *texObj2D;
@@ -1088,7 +1088,7 @@ _swrast_choose_triangle( GLcontext *ctx )
/* First see if we can use an optimized 2-D texture function */
if (ctx->Texture._EnabledCoordUnits == 0x1
&& !ctx->FragmentProgram._Active
&& !ctx->FragmentProgram._Enabled
&& !ctx->ATIFragmentShader._Enabled
&& !ctx->ShaderObjects._FragmentShaderPresent
&& ctx->Texture.Unit[0]._ReallyEnabled == TEXTURE_2D_BIT