Compare commits
32 Commits
mesa-10.3-
...
mesa-10.3-
Author | SHA1 | Date | |
---|---|---|---|
|
4e1ca4a190 | ||
|
06f1f1ea81 | ||
|
e842a02df3 | ||
|
96bca3617c | ||
|
c221e96a13 | ||
|
640ddefd96 | ||
|
7cd0fa023e | ||
|
cd94c64421 | ||
|
e9923b2194 | ||
|
2e56334a2a | ||
|
ead7f72a2c | ||
|
139d176f54 | ||
|
941b2ae35f | ||
|
4b38838ef4 | ||
|
3fdd08c9b4 | ||
|
f8ff31e528 | ||
|
ab53a29892 | ||
|
4073e96a3b | ||
|
4eed41b967 | ||
|
c546523b4d | ||
|
282a3098e6 | ||
|
ec4a333c37 | ||
|
35bb6b058c | ||
|
24e226d0f5 | ||
|
39ad62ce51 | ||
|
f2b2309281 | ||
|
a4b3c4e3ec | ||
|
01dda9d0bd | ||
|
49cd42aab1 | ||
|
eaa9e14ce5 | ||
|
58be4ab741 | ||
|
447785af9d |
@@ -64,7 +64,6 @@ IGNORE_FILES = \
|
||||
|
||||
parsers: configure
|
||||
$(MAKE) -C src/glsl glsl_parser.cpp glsl_parser.h glsl_lexer.cpp glcpp/glcpp-lex.c glcpp/glcpp-parse.c glcpp/glcpp-parse.h
|
||||
$(MAKE) -C src/mesa ../../src/mesa/program/lex.yy.c ../../src/mesa/program/program_parse.tab.c ../../src/mesa/program/program_parse.tab.h
|
||||
|
||||
# Everything for new a Mesa release:
|
||||
ARCHIVES = $(PACKAGE_NAME).tar.gz \
|
||||
|
18
configure.ac
18
configure.ac
@@ -355,6 +355,24 @@ AC_LINK_IFELSE(
|
||||
LDFLAGS=$save_LDFLAGS
|
||||
AM_CONDITIONAL(HAVE_LD_VERSION_SCRIPT, test "$have_ld_version_script" = "yes")
|
||||
|
||||
dnl
|
||||
dnl Check if linker supports dynamic list files
|
||||
dnl
|
||||
AC_MSG_CHECKING([if the linker supports --dynamic-list])
|
||||
save_LDFLAGS=$LDFLAGS
|
||||
LDFLAGS="$LDFLAGS -Wl,--dynamic-list=conftest.dyn"
|
||||
cat > conftest.dyn <<EOF
|
||||
{
|
||||
radeon_drm_winsys_create;
|
||||
};
|
||||
EOF
|
||||
AC_LINK_IFELSE(
|
||||
[AC_LANG_SOURCE([int main() { return 0;}])],
|
||||
[have_ld_dynamic_list=yes;AC_MSG_RESULT(yes)],
|
||||
[have_ld_dynamic_list=no; AC_MSG_RESULT(no)])
|
||||
LDFLAGS=$save_LDFLAGS
|
||||
AM_CONDITIONAL(HAVE_LD_DYNAMIC_LIST, test "$have_ld_dynamic_list" = "yes")
|
||||
|
||||
dnl
|
||||
dnl compatibility symlinks
|
||||
dnl
|
||||
|
@@ -98,6 +98,7 @@ fd2_context_create(struct pipe_screen *pscreen, void *priv)
|
||||
pctx = &fd2_ctx->base.base;
|
||||
|
||||
fd2_ctx->base.dev = fd_device_ref(screen->dev);
|
||||
fd2_ctx->base.screen = fd_screen(pscreen);
|
||||
|
||||
pctx->destroy = fd2_context_destroy;
|
||||
pctx->create_blend_state = fd2_blend_state_create;
|
||||
|
@@ -215,14 +215,19 @@ emit_textures(struct fd_ringbuffer *ring,
|
||||
OUT_RING(ring, CP_LOAD_STATE_1_STATE_TYPE(ST_CONSTANTS) |
|
||||
CP_LOAD_STATE_1_EXT_SRC_ADDR(0));
|
||||
for (i = 0; i < tex->num_textures; i++) {
|
||||
static const struct fd3_pipe_sampler_view dummy_view = {};
|
||||
static const struct fd3_pipe_sampler_view dummy_view = {
|
||||
.base.u.tex.first_level = 1,
|
||||
};
|
||||
const struct fd3_pipe_sampler_view *view = tex->textures[i] ?
|
||||
fd3_pipe_sampler_view(tex->textures[i]) :
|
||||
&dummy_view;
|
||||
struct fd_resource *rsc = view->tex_resource;
|
||||
unsigned start = view->base.u.tex.first_level;
|
||||
unsigned end = view->base.u.tex.last_level;
|
||||
|
||||
for (j = 0; j < view->mipaddrs; j++) {
|
||||
struct fd_resource_slice *slice = fd_resource_slice(rsc, j);
|
||||
for (j = 0; j < (end - start + 1); j++) {
|
||||
struct fd_resource_slice *slice =
|
||||
fd_resource_slice(rsc, j + start);
|
||||
OUT_RELOC(ring, rsc->bo, slice->offset, 0, 0);
|
||||
}
|
||||
|
||||
|
@@ -144,7 +144,8 @@ fd3_sampler_view_create(struct pipe_context *pctx, struct pipe_resource *prsc,
|
||||
{
|
||||
struct fd3_pipe_sampler_view *so = CALLOC_STRUCT(fd3_pipe_sampler_view);
|
||||
struct fd_resource *rsc = fd_resource(prsc);
|
||||
unsigned miplevels = cso->u.tex.last_level - cso->u.tex.first_level;
|
||||
unsigned lvl = cso->u.tex.first_level;
|
||||
unsigned miplevels = cso->u.tex.last_level - lvl;
|
||||
|
||||
if (!so)
|
||||
return NULL;
|
||||
@@ -156,7 +157,6 @@ fd3_sampler_view_create(struct pipe_context *pctx, struct pipe_resource *prsc,
|
||||
so->base.context = pctx;
|
||||
|
||||
so->tex_resource = rsc;
|
||||
so->mipaddrs = 1 + miplevels;
|
||||
|
||||
so->texconst0 =
|
||||
A3XX_TEX_CONST_0_TYPE(tex_type(prsc->target)) |
|
||||
@@ -170,11 +170,11 @@ fd3_sampler_view_create(struct pipe_context *pctx, struct pipe_resource *prsc,
|
||||
|
||||
so->texconst1 =
|
||||
A3XX_TEX_CONST_1_FETCHSIZE(fd3_pipe2fetchsize(cso->format)) |
|
||||
A3XX_TEX_CONST_1_WIDTH(prsc->width0) |
|
||||
A3XX_TEX_CONST_1_HEIGHT(prsc->height0);
|
||||
A3XX_TEX_CONST_1_WIDTH(u_minify(prsc->width0, lvl)) |
|
||||
A3XX_TEX_CONST_1_HEIGHT(u_minify(prsc->height0, lvl));
|
||||
/* when emitted, A3XX_TEX_CONST_2_INDX() must be OR'd in: */
|
||||
so->texconst2 =
|
||||
A3XX_TEX_CONST_2_PITCH(rsc->slices[0].pitch * rsc->cpp);
|
||||
A3XX_TEX_CONST_2_PITCH(rsc->slices[lvl].pitch * rsc->cpp);
|
||||
so->texconst3 = 0x00000000; /* ??? */
|
||||
|
||||
return &so->base;
|
||||
|
@@ -51,7 +51,6 @@ fd3_sampler_stateobj(struct pipe_sampler_state *samp)
|
||||
struct fd3_pipe_sampler_view {
|
||||
struct pipe_sampler_view base;
|
||||
struct fd_resource *tex_resource;
|
||||
uint32_t mipaddrs;
|
||||
uint32_t texconst0, texconst1, texconst2, texconst3;
|
||||
};
|
||||
|
||||
|
@@ -304,7 +304,36 @@ fail:
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool render_blit(struct pipe_context *pctx, struct pipe_blit_info *info);
|
||||
static void fd_blitter_pipe_begin(struct fd_context *ctx);
|
||||
static void fd_blitter_pipe_end(struct fd_context *ctx);
|
||||
|
||||
/**
|
||||
* _copy_region using pipe (3d engine)
|
||||
*/
|
||||
static bool
|
||||
fd_blitter_pipe_copy_region(struct fd_context *ctx,
|
||||
struct pipe_resource *dst,
|
||||
unsigned dst_level,
|
||||
unsigned dstx, unsigned dsty, unsigned dstz,
|
||||
struct pipe_resource *src,
|
||||
unsigned src_level,
|
||||
const struct pipe_box *src_box)
|
||||
{
|
||||
/* not until we allow rendertargets to be buffers */
|
||||
if (dst->target == PIPE_BUFFER || src->target == PIPE_BUFFER)
|
||||
return false;
|
||||
|
||||
if (!util_blitter_is_copy_supported(ctx->blitter, dst, src))
|
||||
return false;
|
||||
|
||||
fd_blitter_pipe_begin(ctx);
|
||||
util_blitter_copy_texture(ctx->blitter,
|
||||
dst, dst_level, dstx, dsty, dstz,
|
||||
src, src_level, src_box);
|
||||
fd_blitter_pipe_end(ctx);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Copy a block of pixels from one resource to another.
|
||||
@@ -320,40 +349,33 @@ fd_resource_copy_region(struct pipe_context *pctx,
|
||||
unsigned src_level,
|
||||
const struct pipe_box *src_box)
|
||||
{
|
||||
struct fd_context *ctx = fd_context(pctx);
|
||||
|
||||
/* TODO if we have 2d core, or other DMA engine that could be used
|
||||
* for simple copies and reasonably easily synchronized with the 3d
|
||||
* core, this is where we'd plug it in..
|
||||
*/
|
||||
struct pipe_blit_info info = {
|
||||
.dst = {
|
||||
.resource = dst,
|
||||
.box = {
|
||||
.x = dstx,
|
||||
.y = dsty,
|
||||
.z = dstz,
|
||||
.width = src_box->width,
|
||||
.height = src_box->height,
|
||||
.depth = src_box->depth,
|
||||
},
|
||||
.format = util_format_linear(dst->format),
|
||||
},
|
||||
.src = {
|
||||
.resource = src,
|
||||
.box = *src_box,
|
||||
.format = util_format_linear(src->format),
|
||||
},
|
||||
.mask = PIPE_MASK_RGBA,
|
||||
.filter = PIPE_TEX_FILTER_NEAREST,
|
||||
};
|
||||
render_blit(pctx, &info);
|
||||
|
||||
/* try blit on 3d pipe: */
|
||||
if (fd_blitter_pipe_copy_region(ctx,
|
||||
dst, dst_level, dstx, dsty, dstz,
|
||||
src, src_level, src_box))
|
||||
return;
|
||||
|
||||
/* else fallback to pure sw: */
|
||||
util_resource_copy_region(pctx,
|
||||
dst, dst_level, dstx, dsty, dstz,
|
||||
src, src_level, src_box);
|
||||
}
|
||||
|
||||
/* Optimal hardware path for blitting pixels.
|
||||
/**
|
||||
* Optimal hardware path for blitting pixels.
|
||||
* Scaling, format conversion, up- and downsampling (resolve) are allowed.
|
||||
*/
|
||||
static void
|
||||
fd_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
|
||||
{
|
||||
struct fd_context *ctx = fd_context(pctx);
|
||||
struct pipe_blit_info info = *blit_info;
|
||||
|
||||
if (info.src.resource->nr_samples > 1 &&
|
||||
@@ -373,21 +395,21 @@ fd_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
|
||||
info.mask &= ~PIPE_MASK_S;
|
||||
}
|
||||
|
||||
render_blit(pctx, &info);
|
||||
}
|
||||
|
||||
static bool
|
||||
render_blit(struct pipe_context *pctx, struct pipe_blit_info *info)
|
||||
{
|
||||
struct fd_context *ctx = fd_context(pctx);
|
||||
|
||||
if (!util_blitter_is_blit_supported(ctx->blitter, info)) {
|
||||
if (!util_blitter_is_blit_supported(ctx->blitter, &info)) {
|
||||
DBG("blit unsupported %s -> %s",
|
||||
util_format_short_name(info->src.resource->format),
|
||||
util_format_short_name(info->dst.resource->format));
|
||||
return false;
|
||||
util_format_short_name(info.src.resource->format),
|
||||
util_format_short_name(info.dst.resource->format));
|
||||
return;
|
||||
}
|
||||
|
||||
fd_blitter_pipe_begin(ctx);
|
||||
util_blitter_blit(ctx->blitter, &info);
|
||||
fd_blitter_pipe_end(ctx);
|
||||
}
|
||||
|
||||
static void
|
||||
fd_blitter_pipe_begin(struct fd_context *ctx)
|
||||
{
|
||||
util_blitter_save_vertex_buffer_slot(ctx->blitter, ctx->vertexbuf.vb);
|
||||
util_blitter_save_vertex_elements(ctx->blitter, ctx->vtx);
|
||||
util_blitter_save_vertex_shader(ctx->blitter, ctx->prog.vp);
|
||||
@@ -407,15 +429,21 @@ render_blit(struct pipe_context *pctx, struct pipe_blit_info *info)
|
||||
ctx->fragtex.num_textures, ctx->fragtex.textures);
|
||||
|
||||
fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_BLIT);
|
||||
util_blitter_blit(ctx->blitter, info);
|
||||
fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_NULL);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void
|
||||
fd_flush_resource(struct pipe_context *ctx, struct pipe_resource *resource)
|
||||
fd_blitter_pipe_end(struct fd_context *ctx)
|
||||
{
|
||||
fd_hw_query_set_stage(ctx, ctx->ring, FD_STAGE_NULL);
|
||||
}
|
||||
|
||||
static void
|
||||
fd_flush_resource(struct pipe_context *pctx, struct pipe_resource *prsc)
|
||||
{
|
||||
struct fd_resource *rsc = fd_resource(prsc);
|
||||
|
||||
if (rsc->dirty)
|
||||
fd_context_render(pctx);
|
||||
}
|
||||
|
||||
void
|
||||
|
@@ -322,7 +322,8 @@ static void ir3_block_dump(struct ir3_dump_ctx *ctx,
|
||||
|
||||
/* draw instruction graph: */
|
||||
for (i = 0; i < block->noutputs; i++)
|
||||
dump_instr(ctx, block->outputs[i]);
|
||||
if (block->outputs[i])
|
||||
dump_instr(ctx, block->outputs[i]);
|
||||
|
||||
/* draw outputs: */
|
||||
fprintf(ctx->f, "output%lx [shape=record,label=\"outputs", PTRID(block));
|
||||
|
@@ -174,15 +174,31 @@ NVC0LegalizePostRA::findOverwritingDefs(const Instruction *texi,
|
||||
}
|
||||
|
||||
void
|
||||
NVC0LegalizePostRA::findFirstUses(const Instruction *texi,
|
||||
const Instruction *insn,
|
||||
std::list<TexUse> &uses)
|
||||
NVC0LegalizePostRA::findFirstUses(
|
||||
const Instruction *texi,
|
||||
const Instruction *insn,
|
||||
std::list<TexUse> &uses,
|
||||
std::tr1::unordered_set<const Instruction *>& visited)
|
||||
{
|
||||
for (int d = 0; insn->defExists(d); ++d) {
|
||||
Value *v = insn->getDef(d);
|
||||
for (Value::UseIterator u = v->uses.begin(); u != v->uses.end(); ++u) {
|
||||
Instruction *usei = (*u)->getInsn();
|
||||
|
||||
/* XXX HACK ALERT XXX
|
||||
*
|
||||
* This shouldn't have to be here, we should always be making forward
|
||||
* progress by looking at the uses. However this somehow does not
|
||||
* appear to be the case. Probably because this is being done right
|
||||
* after RA, when the defs/uses lists have been messed with by node
|
||||
* merging. This should probably be moved to being done right before
|
||||
* RA. But this will do for now.
|
||||
*/
|
||||
if (visited.find(usei) != visited.end())
|
||||
continue;
|
||||
|
||||
visited.insert(usei);
|
||||
|
||||
if (usei->op == OP_PHI || usei->op == OP_UNION) {
|
||||
// need a barrier before WAW cases
|
||||
for (int s = 0; usei->srcExists(s); ++s) {
|
||||
@@ -197,11 +213,11 @@ NVC0LegalizePostRA::findFirstUses(const Instruction *texi,
|
||||
usei->op == OP_PHI ||
|
||||
usei->op == OP_UNION) {
|
||||
// these uses don't manifest in the machine code
|
||||
findFirstUses(texi, usei, uses);
|
||||
findFirstUses(texi, usei, uses, visited);
|
||||
} else
|
||||
if (usei->op == OP_MOV && usei->getDef(0)->equals(usei->getSrc(0)) &&
|
||||
usei->subOp != NV50_IR_SUBOP_MOV_FINAL) {
|
||||
findFirstUses(texi, usei, uses);
|
||||
findFirstUses(texi, usei, uses, visited);
|
||||
} else {
|
||||
addTexUse(uses, usei, insn);
|
||||
}
|
||||
@@ -257,8 +273,10 @@ NVC0LegalizePostRA::insertTextureBarriers(Function *fn)
|
||||
uses = new std::list<TexUse>[texes.size()];
|
||||
if (!uses)
|
||||
return false;
|
||||
for (size_t i = 0; i < texes.size(); ++i)
|
||||
findFirstUses(texes[i], texes[i], uses[i]);
|
||||
for (size_t i = 0; i < texes.size(); ++i) {
|
||||
std::tr1::unordered_set<const Instruction *> visited;
|
||||
findFirstUses(texes[i], texes[i], uses[i], visited);
|
||||
}
|
||||
|
||||
// determine the barrier level at each use
|
||||
for (size_t i = 0; i < texes.size(); ++i) {
|
||||
|
@@ -20,6 +20,8 @@
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <tr1/unordered_set>
|
||||
|
||||
#include "codegen/nv50_ir.h"
|
||||
#include "codegen/nv50_ir_build_util.h"
|
||||
|
||||
@@ -69,7 +71,8 @@ private:
|
||||
bool insertTextureBarriers(Function *);
|
||||
inline bool insnDominatedBy(const Instruction *, const Instruction *) const;
|
||||
void findFirstUses(const Instruction *tex, const Instruction *def,
|
||||
std::list<TexUse>&);
|
||||
std::list<TexUse>&,
|
||||
std::tr1::unordered_set<const Instruction *>&);
|
||||
void findOverwritingDefs(const Instruction *tex, Instruction *insn,
|
||||
const BasicBlock *term,
|
||||
std::list<TexUse>&);
|
||||
|
@@ -567,6 +567,10 @@ ConstantFolding::expr(Instruction *i,
|
||||
ImmediateValue src0;
|
||||
if (i->src(0).getImmediate(src0))
|
||||
expr(i, src0, *i->getSrc(1)->asImm());
|
||||
if (i->saturate && !prog->getTarget()->isSatSupported(i)) {
|
||||
bld.setPosition(i, false);
|
||||
i->setSrc(1, bld.loadImm(NULL, res.data.u32));
|
||||
}
|
||||
} else {
|
||||
i->op = i->saturate ? OP_SAT : OP_MOV; /* SAT handled by unary() */
|
||||
}
|
||||
|
@@ -585,9 +585,12 @@ nv50_stage_sampler_states_bind(struct nv50_context *nv50, int s,
|
||||
nv50_screen_tsc_unlock(nv50->screen, old);
|
||||
}
|
||||
assert(nv50->num_samplers[s] <= PIPE_MAX_SAMPLERS);
|
||||
for (; i < nv50->num_samplers[s]; ++i)
|
||||
if (nv50->samplers[s][i])
|
||||
for (; i < nv50->num_samplers[s]; ++i) {
|
||||
if (nv50->samplers[s][i]) {
|
||||
nv50_screen_tsc_unlock(nv50->screen, nv50->samplers[s][i]);
|
||||
nv50->samplers[s][i] = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
nv50->num_samplers[s] = nr;
|
||||
|
||||
|
@@ -54,8 +54,8 @@ nv50_validate_fb(struct nv50_context *nv50)
|
||||
assert(mt->layout_3d || !array_mode || array_size == 1);
|
||||
|
||||
BEGIN_NV04(push, NV50_3D(RT_ADDRESS_HIGH(i)), 5);
|
||||
PUSH_DATAh(push, bo->offset + sf->offset);
|
||||
PUSH_DATA (push, bo->offset + sf->offset);
|
||||
PUSH_DATAh(push, mt->base.address + sf->offset);
|
||||
PUSH_DATA (push, mt->base.address + sf->offset);
|
||||
PUSH_DATA (push, nv50_format_table[sf->base.format].rt);
|
||||
if (likely(nouveau_bo_memtype(bo))) {
|
||||
PUSH_DATA (push, mt->level[sf->base.u.tex.level].tile_mode);
|
||||
@@ -97,8 +97,8 @@ nv50_validate_fb(struct nv50_context *nv50)
|
||||
int unk = mt->base.base.target == PIPE_TEXTURE_3D || sf->depth == 1;
|
||||
|
||||
BEGIN_NV04(push, NV50_3D(ZETA_ADDRESS_HIGH), 5);
|
||||
PUSH_DATAh(push, bo->offset + sf->offset);
|
||||
PUSH_DATA (push, bo->offset + sf->offset);
|
||||
PUSH_DATAh(push, mt->base.address + sf->offset);
|
||||
PUSH_DATA (push, mt->base.address + sf->offset);
|
||||
PUSH_DATA (push, nv50_format_table[fb->zsbuf->format].rt);
|
||||
PUSH_DATA (push, mt->level[sf->base.u.tex.level].tile_mode);
|
||||
PUSH_DATA (push, mt->layer_stride >> 2);
|
||||
|
@@ -114,8 +114,8 @@ nv50_2d_texture_set(struct nouveau_pushbuf *push, int dst,
|
||||
PUSH_DATA (push, mt->level[level].pitch);
|
||||
PUSH_DATA (push, width);
|
||||
PUSH_DATA (push, height);
|
||||
PUSH_DATAh(push, bo->offset + offset);
|
||||
PUSH_DATA (push, bo->offset + offset);
|
||||
PUSH_DATAh(push, mt->base.address + offset);
|
||||
PUSH_DATA (push, mt->base.address + offset);
|
||||
} else {
|
||||
BEGIN_NV04(push, SUBC_2D(mthd), 5);
|
||||
PUSH_DATA (push, format);
|
||||
@@ -126,8 +126,8 @@ nv50_2d_texture_set(struct nouveau_pushbuf *push, int dst,
|
||||
BEGIN_NV04(push, SUBC_2D(mthd + 0x18), 4);
|
||||
PUSH_DATA (push, width);
|
||||
PUSH_DATA (push, height);
|
||||
PUSH_DATAh(push, bo->offset + offset);
|
||||
PUSH_DATA (push, bo->offset + offset);
|
||||
PUSH_DATAh(push, mt->base.address + offset);
|
||||
PUSH_DATA (push, mt->base.address + offset);
|
||||
}
|
||||
|
||||
#if 0
|
||||
@@ -299,8 +299,8 @@ nv50_clear_render_target(struct pipe_context *pipe,
|
||||
BEGIN_NV04(push, NV50_3D(RT_CONTROL), 1);
|
||||
PUSH_DATA (push, 1);
|
||||
BEGIN_NV04(push, NV50_3D(RT_ADDRESS_HIGH(0)), 5);
|
||||
PUSH_DATAh(push, bo->offset + sf->offset);
|
||||
PUSH_DATA (push, bo->offset + sf->offset);
|
||||
PUSH_DATAh(push, mt->base.address + sf->offset);
|
||||
PUSH_DATA (push, mt->base.address + sf->offset);
|
||||
PUSH_DATA (push, nv50_format_table[dst->format].rt);
|
||||
PUSH_DATA (push, mt->level[sf->base.u.tex.level].tile_mode);
|
||||
PUSH_DATA (push, mt->layer_stride >> 2);
|
||||
@@ -381,8 +381,8 @@ nv50_clear_depth_stencil(struct pipe_context *pipe,
|
||||
nv50->scissors_dirty |= 1;
|
||||
|
||||
BEGIN_NV04(push, NV50_3D(ZETA_ADDRESS_HIGH), 5);
|
||||
PUSH_DATAh(push, bo->offset + sf->offset);
|
||||
PUSH_DATA (push, bo->offset + sf->offset);
|
||||
PUSH_DATAh(push, mt->base.address + sf->offset);
|
||||
PUSH_DATA (push, mt->base.address + sf->offset);
|
||||
PUSH_DATA (push, nv50_format_table[dst->format].rt);
|
||||
PUSH_DATA (push, mt->level[sf->base.u.tex.level].tile_mode);
|
||||
PUSH_DATA (push, mt->layer_stride >> 2);
|
||||
|
@@ -24,6 +24,8 @@ nv50_m2mf_rect_setup(struct nv50_m2mf_rect *rect,
|
||||
rect->bo = mt->base.bo;
|
||||
rect->domain = mt->base.domain;
|
||||
rect->base = mt->level[l].offset;
|
||||
if (mt->base.bo->offset != mt->base.address)
|
||||
rect->base += mt->base.address - mt->base.bo->offset;
|
||||
rect->pitch = mt->level[l].pitch;
|
||||
if (util_format_is_plain(res->format)) {
|
||||
rect->width = w << mt->ms_x;
|
||||
|
@@ -482,12 +482,14 @@ nv84_create_decoder(struct pipe_context *context,
|
||||
mip.level[0].pitch = surf.width * 4;
|
||||
mip.base.domain = NOUVEAU_BO_VRAM;
|
||||
mip.base.bo = dec->mbring;
|
||||
mip.base.address = dec->mbring->offset;
|
||||
context->clear_render_target(context, &surf.base, &color, 0, 0, 64, 4760);
|
||||
surf.offset = dec->vpring->size / 2 - 0x1000;
|
||||
surf.width = 1024;
|
||||
surf.height = 1;
|
||||
mip.level[0].pitch = surf.width * 4;
|
||||
mip.base.bo = dec->vpring;
|
||||
mip.base.address = dec->vpring->offset;
|
||||
context->clear_render_target(context, &surf.base, &color, 0, 0, 1024, 1);
|
||||
surf.offset = dec->vpring->size - 0x1000;
|
||||
context->clear_render_target(context, &surf.base, &color, 0, 0, 1024, 1);
|
||||
@@ -683,17 +685,14 @@ nv84_video_buffer_create(struct pipe_context *pipe,
|
||||
bo_size, &cfg, &buffer->full))
|
||||
goto error;
|
||||
|
||||
mt0->base.bo = buffer->interlaced;
|
||||
nouveau_bo_ref(buffer->interlaced, &mt0->base.bo);
|
||||
mt0->base.domain = NOUVEAU_BO_VRAM;
|
||||
mt0->base.offset = 0;
|
||||
mt0->base.address = buffer->interlaced->offset + mt0->base.offset;
|
||||
nouveau_bo_ref(buffer->interlaced, &empty);
|
||||
mt0->base.address = buffer->interlaced->offset;
|
||||
|
||||
mt1->base.bo = buffer->interlaced;
|
||||
nouveau_bo_ref(buffer->interlaced, &mt1->base.bo);
|
||||
mt1->base.domain = NOUVEAU_BO_VRAM;
|
||||
mt1->base.offset = mt0->layer_stride * 2;
|
||||
mt1->base.address = buffer->interlaced->offset + mt1->base.offset;
|
||||
nouveau_bo_ref(buffer->interlaced, &empty);
|
||||
mt1->base.offset = mt0->total_size;
|
||||
mt1->base.address = buffer->interlaced->offset + mt0->total_size;
|
||||
|
||||
memset(&sv_templ, 0, sizeof(sv_templ));
|
||||
for (component = 0, i = 0; i < 2; ++i ) {
|
||||
|
@@ -261,7 +261,6 @@ nvc0_miptree_create(struct pipe_screen *pscreen,
|
||||
|
||||
if (pt->usage == PIPE_USAGE_STAGING) {
|
||||
switch (pt->target) {
|
||||
case PIPE_TEXTURE_1D:
|
||||
case PIPE_TEXTURE_2D:
|
||||
case PIPE_TEXTURE_RECT:
|
||||
if (pt->last_level == 0 &&
|
||||
|
@@ -440,7 +440,8 @@ static void r600_clear(struct pipe_context *ctx, unsigned buffers,
|
||||
}
|
||||
|
||||
r600_blitter_begin(ctx, R600_CLEAR);
|
||||
util_blitter_clear(rctx->blitter, fb->width, fb->height, 1,
|
||||
util_blitter_clear(rctx->blitter, fb->width, fb->height,
|
||||
util_framebuffer_get_num_layers(fb),
|
||||
buffers, color, depth, stencil);
|
||||
r600_blitter_end(ctx);
|
||||
|
||||
|
@@ -1245,12 +1245,6 @@ static bool r600_update_derived_state(struct r600_context *rctx)
|
||||
}
|
||||
}
|
||||
|
||||
if (rctx->b.chip_class >= EVERGREEN) {
|
||||
evergreen_update_db_shader_control(rctx);
|
||||
} else {
|
||||
r600_update_db_shader_control(rctx);
|
||||
}
|
||||
|
||||
if (unlikely(!ps_dirty && rctx->ps_shader && rctx->rasterizer &&
|
||||
((rctx->rasterizer->sprite_coord_enable != rctx->ps_shader->current->sprite_coord_enable) ||
|
||||
(rctx->rasterizer->flatshade != rctx->ps_shader->current->flatshade)))) {
|
||||
@@ -1264,6 +1258,12 @@ static bool r600_update_derived_state(struct r600_context *rctx)
|
||||
update_shader_atom(ctx, &rctx->pixel_shader, rctx->ps_shader->current);
|
||||
}
|
||||
|
||||
if (rctx->b.chip_class >= EVERGREEN) {
|
||||
evergreen_update_db_shader_control(rctx);
|
||||
} else {
|
||||
r600_update_db_shader_control(rctx);
|
||||
}
|
||||
|
||||
/* on R600 we stuff masks + txq info into one constant buffer */
|
||||
/* on evergreen we only need a txq info one */
|
||||
if (rctx->b.chip_class < EVERGREEN) {
|
||||
|
@@ -807,12 +807,40 @@ void r600_suspend_nontimer_queries(struct r600_common_context *ctx)
|
||||
assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
|
||||
}
|
||||
|
||||
static unsigned r600_queries_num_cs_dw_for_resuming(struct r600_common_context *ctx)
|
||||
{
|
||||
struct r600_query *query;
|
||||
unsigned num_dw = 0;
|
||||
|
||||
LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
|
||||
/* begin + end */
|
||||
num_dw += query->num_cs_dw * 2;
|
||||
|
||||
/* Workaround for the fact that
|
||||
* num_cs_dw_nontimer_queries_suspend is incremented for every
|
||||
* resumed query, which raises the bar in need_cs_space for
|
||||
* queries about to be resumed.
|
||||
*/
|
||||
num_dw += query->num_cs_dw;
|
||||
}
|
||||
/* primitives generated query */
|
||||
num_dw += ctx->streamout.enable_atom.num_dw;
|
||||
/* guess for ZPASS enable or PERFECT_ZPASS_COUNT enable updates */
|
||||
num_dw += 13;
|
||||
|
||||
return num_dw;
|
||||
}
|
||||
|
||||
void r600_resume_nontimer_queries(struct r600_common_context *ctx)
|
||||
{
|
||||
struct r600_query *query;
|
||||
|
||||
assert(ctx->num_cs_dw_nontimer_queries_suspend == 0);
|
||||
|
||||
/* Check CS space here. Resuming must not be interrupted by flushes. */
|
||||
ctx->need_gfx_cs_space(&ctx->b,
|
||||
r600_queries_num_cs_dw_for_resuming(ctx), TRUE);
|
||||
|
||||
LIST_FOR_EACH_ENTRY(query, &ctx->active_nontimer_queries, list) {
|
||||
r600_emit_query_begin(ctx, query);
|
||||
}
|
||||
|
@@ -1328,6 +1328,7 @@ dri_kms_init_screen(__DRIscreen * sPriv)
|
||||
const __DRIconfig **configs;
|
||||
struct dri_screen *screen;
|
||||
struct pipe_screen *pscreen = NULL;
|
||||
uint64_t cap;
|
||||
|
||||
screen = CALLOC_STRUCT(dri_screen);
|
||||
if (!screen)
|
||||
@@ -1339,6 +1340,13 @@ dri_kms_init_screen(__DRIscreen * sPriv)
|
||||
sPriv->driverPrivate = (void *)screen;
|
||||
|
||||
pscreen = kms_swrast_create_screen(screen->fd);
|
||||
|
||||
if (drmGetCap(sPriv->fd, DRM_CAP_PRIME, &cap) == 0 &&
|
||||
(cap & DRM_PRIME_CAP_IMPORT)) {
|
||||
dri2ImageExtension.createImageFromFds = dri2_from_fds;
|
||||
dri2ImageExtension.createImageFromDmaBufs = dri2_from_dma_bufs;
|
||||
}
|
||||
|
||||
sPriv->extensions = dri_screen_extensions;
|
||||
|
||||
/* dri_init_screen_helper checks pscreen for us */
|
||||
|
@@ -26,7 +26,6 @@ gallium_dri_la_LDFLAGS = \
|
||||
-shrext .so \
|
||||
-module \
|
||||
-avoid-version \
|
||||
-Wl,--dynamic-list=$(top_srcdir)/src/gallium/targets/dri-vdpau.dyn \
|
||||
$(GC_SECTIONS)
|
||||
|
||||
if HAVE_LD_VERSION_SCRIPT
|
||||
@@ -34,6 +33,11 @@ gallium_dri_la_LDFLAGS += \
|
||||
-Wl,--version-script=$(top_srcdir)/src/gallium/targets/dri/dri.sym
|
||||
endif # HAVE_LD_VERSION_SCRIPT
|
||||
|
||||
if HAVE_LD_DYNAMIC_LIST
|
||||
gallium_dri_la_LDFLAGS += \
|
||||
-Wl,--dynamic-list=$(top_srcdir)/src/gallium/targets/dri-vdpau.dyn
|
||||
endif # HAVE_LD_DYNAMIC_LIST
|
||||
|
||||
gallium_dri_la_LIBADD = \
|
||||
$(top_builddir)/src/mesa/libmesagallium.la \
|
||||
$(top_builddir)/src/mesa/drivers/dri/common/libdricommon.la \
|
||||
|
@@ -15,7 +15,6 @@ libvdpau_gallium_la_LDFLAGS = \
|
||||
-module \
|
||||
-no-undefined \
|
||||
-version-number $(VDPAU_MAJOR):$(VDPAU_MINOR) \
|
||||
-Wl,--dynamic-list=$(top_srcdir)/src/gallium/targets/dri-vdpau.dyn \
|
||||
$(GC_SECTIONS) \
|
||||
$(LD_NO_UNDEFINED)
|
||||
|
||||
@@ -24,6 +23,11 @@ libvdpau_gallium_la_LDFLAGS += \
|
||||
-Wl,--version-script=$(top_srcdir)/src/gallium/targets/vdpau/vdpau.sym
|
||||
endif # HAVE_LD_VERSION_SCRIPT
|
||||
|
||||
if HAVE_LD_DYNAMIC_LIST
|
||||
libvdpau_gallium_la_LDFLAGS += \
|
||||
-Wl,--dynamic-list=$(top_srcdir)/src/gallium/targets/dri-vdpau.dyn
|
||||
endif # HAVE_LD_DYNAMIC_LIST
|
||||
|
||||
libvdpau_gallium_la_LIBADD = \
|
||||
$(top_builddir)/src/gallium/state_trackers/vdpau/libvdpautracker.la \
|
||||
$(top_builddir)/src/gallium/auxiliary/libgallium.la \
|
||||
|
@@ -238,7 +238,7 @@ out_mip:
|
||||
|
||||
static struct svga_winsys_surface *
|
||||
vmw_drm_surface_from_handle(struct svga_winsys_screen *sws,
|
||||
struct winsys_handle *whandle,
|
||||
struct winsys_handle *whandle,
|
||||
SVGA3dSurfaceFormat *format)
|
||||
{
|
||||
struct vmw_svga_winsys_surface *vsrf;
|
||||
@@ -248,7 +248,8 @@ vmw_drm_surface_from_handle(struct svga_winsys_screen *sws,
|
||||
struct drm_vmw_surface_arg *req = &arg.req;
|
||||
struct drm_vmw_surface_create_req *rep = &arg.rep;
|
||||
uint32_t handle = 0;
|
||||
SVGA3dSize size;
|
||||
struct drm_vmw_size size;
|
||||
SVGA3dSize base_size;
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
@@ -274,7 +275,7 @@ vmw_drm_surface_from_handle(struct svga_winsys_screen *sws,
|
||||
|
||||
memset(&arg, 0, sizeof(arg));
|
||||
req->sid = handle;
|
||||
rep->size_addr = (size_t)&size;
|
||||
rep->size_addr = (unsigned long)&size;
|
||||
|
||||
ret = drmCommandWriteRead(vws->ioctl.drm_fd, DRM_VMW_REF_SURFACE,
|
||||
&arg, sizeof(arg));
|
||||
@@ -324,7 +325,11 @@ vmw_drm_surface_from_handle(struct svga_winsys_screen *sws,
|
||||
*format = rep->format;
|
||||
|
||||
/* Estimate usage, for early flushing. */
|
||||
vsrf->size = svga3dsurface_get_serialized_size(rep->format, size,
|
||||
|
||||
base_size.width = size.width;
|
||||
base_size.height = size.height;
|
||||
base_size.depth = size.depth;
|
||||
vsrf->size = svga3dsurface_get_serialized_size(rep->format, base_size,
|
||||
rep->mip_levels[0],
|
||||
FALSE);
|
||||
|
||||
|
@@ -38,6 +38,7 @@
|
||||
#include <sys/mman.h>
|
||||
#include <unistd.h>
|
||||
#include <dlfcn.h>
|
||||
#include <fcntl.h>
|
||||
#include <xf86drm.h>
|
||||
|
||||
#include "pipe/p_compiler.h"
|
||||
@@ -121,7 +122,7 @@ kms_sw_displaytarget_create(struct sw_winsys *ws,
|
||||
int ret;
|
||||
|
||||
kms_sw_dt = CALLOC_STRUCT(kms_sw_displaytarget);
|
||||
if(!kms_sw_dt)
|
||||
if (!kms_sw_dt)
|
||||
goto no_dt;
|
||||
|
||||
kms_sw_dt->ref_count = 1;
|
||||
@@ -210,6 +211,38 @@ kms_sw_displaytarget_map(struct sw_winsys *ws,
|
||||
return kms_sw_dt->mapped;
|
||||
}
|
||||
|
||||
static struct kms_sw_displaytarget *
|
||||
kms_sw_displaytarget_add_from_prime(struct kms_sw_winsys *kms_sw, int fd)
|
||||
{
|
||||
uint32_t handle = -1;
|
||||
struct kms_sw_displaytarget * kms_sw_dt;
|
||||
int ret;
|
||||
|
||||
ret = drmPrimeFDToHandle(kms_sw->fd, fd, &handle);
|
||||
|
||||
if (ret)
|
||||
return NULL;
|
||||
|
||||
kms_sw_dt = CALLOC_STRUCT(kms_sw_displaytarget);
|
||||
if (!kms_sw_dt)
|
||||
return NULL;
|
||||
|
||||
kms_sw_dt->ref_count = 1;
|
||||
kms_sw_dt->handle = handle;
|
||||
kms_sw_dt->size = lseek(fd, 0, SEEK_END);
|
||||
|
||||
if (kms_sw_dt->size == (off_t)-1) {
|
||||
FREE(kms_sw_dt);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
lseek(fd, 0, SEEK_SET);
|
||||
|
||||
list_add(&kms_sw_dt->link, &kms_sw->bo_list);
|
||||
|
||||
return kms_sw_dt;
|
||||
}
|
||||
|
||||
static void
|
||||
kms_sw_displaytarget_unmap(struct sw_winsys *ws,
|
||||
struct sw_displaytarget *dt)
|
||||
@@ -231,17 +264,34 @@ kms_sw_displaytarget_from_handle(struct sw_winsys *ws,
|
||||
struct kms_sw_winsys *kms_sw = kms_sw_winsys(ws);
|
||||
struct kms_sw_displaytarget *kms_sw_dt;
|
||||
|
||||
assert(whandle->type == DRM_API_HANDLE_TYPE_KMS);
|
||||
assert(whandle->type == DRM_API_HANDLE_TYPE_KMS ||
|
||||
whandle->type == DRM_API_HANDLE_TYPE_FD);
|
||||
|
||||
LIST_FOR_EACH_ENTRY(kms_sw_dt, &kms_sw->bo_list, link) {
|
||||
if (kms_sw_dt->handle == whandle->handle) {
|
||||
switch(whandle->type) {
|
||||
case DRM_API_HANDLE_TYPE_FD:
|
||||
kms_sw_dt = kms_sw_displaytarget_add_from_prime(kms_sw, whandle->handle);
|
||||
if (kms_sw_dt) {
|
||||
kms_sw_dt->ref_count++;
|
||||
|
||||
DEBUG("KMS-DEBUG: imported buffer %u (size %u)\n", kms_sw_dt->handle, kms_sw_dt->size);
|
||||
|
||||
kms_sw_dt->width = templ->width0;
|
||||
kms_sw_dt->height = templ->height0;
|
||||
kms_sw_dt->stride = whandle->stride;
|
||||
*stride = kms_sw_dt->stride;
|
||||
return (struct sw_displaytarget *)kms_sw_dt;
|
||||
}
|
||||
return (struct sw_displaytarget *)kms_sw_dt;
|
||||
case DRM_API_HANDLE_TYPE_KMS:
|
||||
LIST_FOR_EACH_ENTRY(kms_sw_dt, &kms_sw->bo_list, link) {
|
||||
if (kms_sw_dt->handle == whandle->handle) {
|
||||
kms_sw_dt->ref_count++;
|
||||
|
||||
DEBUG("KMS-DEBUG: imported buffer %u (size %u)\n", kms_sw_dt->handle, kms_sw_dt->size);
|
||||
|
||||
*stride = kms_sw_dt->stride;
|
||||
return (struct sw_displaytarget *)kms_sw_dt;
|
||||
}
|
||||
}
|
||||
/* fallthrough */
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
assert(0);
|
||||
@@ -253,16 +303,26 @@ kms_sw_displaytarget_get_handle(struct sw_winsys *winsys,
|
||||
struct sw_displaytarget *dt,
|
||||
struct winsys_handle *whandle)
|
||||
{
|
||||
struct kms_sw_winsys *kms_sw = kms_sw_winsys(winsys);
|
||||
struct kms_sw_displaytarget *kms_sw_dt = kms_sw_displaytarget(dt);
|
||||
|
||||
if (whandle->type == DRM_API_HANDLE_TYPE_KMS) {
|
||||
switch(whandle->type) {
|
||||
case DRM_API_HANDLE_TYPE_KMS:
|
||||
whandle->handle = kms_sw_dt->handle;
|
||||
whandle->stride = kms_sw_dt->stride;
|
||||
} else {
|
||||
return TRUE;
|
||||
case DRM_API_HANDLE_TYPE_FD:
|
||||
if (!drmPrimeHandleToFD(kms_sw->fd, kms_sw_dt->handle,
|
||||
DRM_CLOEXEC, &whandle->handle)) {
|
||||
whandle->stride = kms_sw_dt->stride;
|
||||
return TRUE;
|
||||
}
|
||||
/* fallthrough */
|
||||
default:
|
||||
whandle->handle = 0;
|
||||
whandle->stride = 0;
|
||||
return FALSE;
|
||||
}
|
||||
return TRUE;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -315,4 +375,4 @@ kms_dri_create_winsys(int fd)
|
||||
return &ws->base;
|
||||
}
|
||||
|
||||
/* vim: set sw=3 ts=8 sts=3 expandtab: */
|
||||
/* vim: set sw=3 ts=8 sts=3 expandtab: */
|
||||
|
@@ -76,7 +76,7 @@ compare_index_block(exec_list *instructions, ir_variable *index,
|
||||
ir_rvalue *broadcast_index = new(mem_ctx) ir_dereference_variable(index);
|
||||
|
||||
assert(index->type->is_scalar());
|
||||
assert(index->type->base_type == GLSL_TYPE_INT);
|
||||
assert(index->type->base_type == GLSL_TYPE_INT || index->type->base_type == GLSL_TYPE_UINT);
|
||||
assert(components >= 1 && components <= 4);
|
||||
|
||||
if (components > 1) {
|
||||
|
@@ -36,8 +36,7 @@ endif
|
||||
gldir = $(includedir)/GL
|
||||
gl_HEADERS = $(top_srcdir)/include/GL/*.h
|
||||
|
||||
.PHONY: $(BUILDDIR)main/git_sha1.h.tmp $(BUILDDIR)program/lex.yy.c $(BUILDDIR)program/program_parse.tab.c $(BUILDDIR)program/program_parse.tab.h
|
||||
|
||||
.PHONY: $(BUILDDIR)main/git_sha1.h.tmp
|
||||
$(BUILDDIR)main/git_sha1.h.tmp:
|
||||
@touch main/git_sha1.h.tmp
|
||||
@if test -d $(top_srcdir)/.git; then \
|
||||
|
@@ -396,25 +396,6 @@ _mesa_meta_init(struct gl_context *ctx)
|
||||
ctx->Meta = CALLOC_STRUCT(gl_meta_state);
|
||||
}
|
||||
|
||||
static GLenum
|
||||
gl_buffer_index_to_drawbuffers_enum(gl_buffer_index bufindex)
|
||||
{
|
||||
assert(bufindex < BUFFER_COUNT);
|
||||
|
||||
if (bufindex >= BUFFER_COLOR0)
|
||||
return GL_COLOR_ATTACHMENT0 + bufindex - BUFFER_COLOR0;
|
||||
else if (bufindex == BUFFER_FRONT_LEFT)
|
||||
return GL_FRONT_LEFT;
|
||||
else if (bufindex == BUFFER_FRONT_RIGHT)
|
||||
return GL_FRONT_RIGHT;
|
||||
else if (bufindex == BUFFER_BACK_LEFT)
|
||||
return GL_BACK_LEFT;
|
||||
else if (bufindex == BUFFER_BACK_RIGHT)
|
||||
return GL_BACK_RIGHT;
|
||||
|
||||
return GL_NONE;
|
||||
}
|
||||
|
||||
/**
|
||||
* Free context meta-op state.
|
||||
* To be called once during context destruction.
|
||||
@@ -806,20 +787,9 @@ _mesa_meta_begin(struct gl_context *ctx, GLbitfield state)
|
||||
}
|
||||
|
||||
if (state & MESA_META_DRAW_BUFFERS) {
|
||||
int buf, real_color_buffers = 0;
|
||||
memset(save->ColorDrawBuffers, 0, sizeof(save->ColorDrawBuffers));
|
||||
|
||||
for (buf = 0; buf < ctx->Const.MaxDrawBuffers; buf++) {
|
||||
int buf_index = ctx->DrawBuffer->_ColorDrawBufferIndexes[buf];
|
||||
if (buf_index == -1)
|
||||
continue;
|
||||
|
||||
save->ColorDrawBuffers[buf] =
|
||||
gl_buffer_index_to_drawbuffers_enum(buf_index);
|
||||
|
||||
if (++real_color_buffers >= ctx->DrawBuffer->_NumColorDrawBuffers)
|
||||
break;
|
||||
}
|
||||
struct gl_framebuffer *fb = ctx->DrawBuffer;
|
||||
memcpy(save->ColorDrawBuffers, fb->ColorDrawBuffer,
|
||||
sizeof(save->ColorDrawBuffers));
|
||||
}
|
||||
|
||||
/* misc */
|
||||
@@ -1224,7 +1194,7 @@ _mesa_meta_end(struct gl_context *ctx)
|
||||
_mesa_BindRenderbuffer(GL_RENDERBUFFER, save->RenderbufferName);
|
||||
|
||||
if (state & MESA_META_DRAW_BUFFERS) {
|
||||
_mesa_DrawBuffers(ctx->Const.MaxDrawBuffers, save->ColorDrawBuffers);
|
||||
_mesa_drawbuffers(ctx, ctx->Const.MaxDrawBuffers, save->ColorDrawBuffers, NULL);
|
||||
}
|
||||
|
||||
ctx->Meta->SaveStackDepth--;
|
||||
|
@@ -74,7 +74,7 @@ make_view(struct gl_context *ctx, struct gl_texture_image *tex_image,
|
||||
tex_image->Depth,
|
||||
0, internal_format, tex_format);
|
||||
|
||||
view_tex_obj->MinLevel = 0;
|
||||
view_tex_obj->MinLevel = tex_image->Level;
|
||||
view_tex_obj->NumLevels = 1;
|
||||
view_tex_obj->MinLayer = tex_obj->MinLayer;
|
||||
view_tex_obj->NumLayers = tex_obj->NumLayers;
|
||||
|
@@ -2246,10 +2246,10 @@ fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir)
|
||||
return;
|
||||
}
|
||||
|
||||
fs_reg op[2];
|
||||
fs_reg op[3];
|
||||
fs_inst *inst;
|
||||
|
||||
assert(expr->get_num_operands() <= 2);
|
||||
assert(expr->get_num_operands() <= 3);
|
||||
for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
|
||||
assert(expr->operands[i]->type->is_scalar());
|
||||
|
||||
@@ -2336,6 +2336,22 @@ fs_visitor::emit_bool_to_cond_code(ir_rvalue *ir)
|
||||
brw_conditional_for_comparison(expr->operation)));
|
||||
break;
|
||||
|
||||
case ir_triop_csel: {
|
||||
/* Expand the boolean condition into the flag register. */
|
||||
inst = emit(MOV(reg_null_d, op[0]));
|
||||
inst->conditional_mod = BRW_CONDITIONAL_NZ;
|
||||
|
||||
/* Select which boolean to return. */
|
||||
fs_reg temp(this, expr->operands[1]->type);
|
||||
inst = emit(SEL(temp, op[1], op[2]));
|
||||
inst->predicate = BRW_PREDICATE_NORMAL;
|
||||
|
||||
/* Expand the result to a condition code. */
|
||||
inst = emit(MOV(reg_null_d, temp));
|
||||
inst->conditional_mod = BRW_CONDITIONAL_NZ;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
unreachable("not reached");
|
||||
}
|
||||
|
@@ -777,10 +777,10 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir,
|
||||
*predicate = BRW_PREDICATE_NORMAL;
|
||||
|
||||
if (expr) {
|
||||
src_reg op[2];
|
||||
src_reg op[3];
|
||||
vec4_instruction *inst;
|
||||
|
||||
assert(expr->get_num_operands() <= 2);
|
||||
assert(expr->get_num_operands() <= 3);
|
||||
for (unsigned int i = 0; i < expr->get_num_operands(); i++) {
|
||||
expr->operands[i]->accept(this);
|
||||
op[i] = this->result;
|
||||
@@ -852,6 +852,22 @@ vec4_visitor::emit_bool_to_cond_code(ir_rvalue *ir,
|
||||
brw_conditional_for_comparison(expr->operation)));
|
||||
break;
|
||||
|
||||
case ir_triop_csel: {
|
||||
/* Expand the boolean condition into the flag register. */
|
||||
inst = emit(MOV(dst_null_d(), op[0]));
|
||||
inst->conditional_mod = BRW_CONDITIONAL_NZ;
|
||||
|
||||
/* Select which boolean to return. */
|
||||
dst_reg temp(this, expr->operands[1]->type);
|
||||
inst = emit(BRW_OPCODE_SEL, temp, op[1], op[2]);
|
||||
inst->predicate = BRW_PREDICATE_NORMAL;
|
||||
|
||||
/* Expand the result to a condition code. */
|
||||
inst = emit(MOV(dst_null_d(), src_reg(temp)));
|
||||
inst->conditional_mod = BRW_CONDITIONAL_NZ;
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
unreachable("not reached");
|
||||
}
|
||||
@@ -2596,7 +2612,7 @@ vec4_visitor::visit(ir_texture *ir)
|
||||
} else if (ir->op == ir_txf_ms) {
|
||||
emit(MOV(dst_reg(MRF, param_base + 1, sample_index_type, WRITEMASK_X),
|
||||
sample_index));
|
||||
if (brw->gen >= 7)
|
||||
if (brw->gen >= 7) {
|
||||
/* MCS data is in the first channel of `mcs`, but we need to get it into
|
||||
* the .y channel of the second vec4 of params, so replicate .x across
|
||||
* the whole vec4 and then mask off everything except .y
|
||||
@@ -2604,6 +2620,7 @@ vec4_visitor::visit(ir_texture *ir)
|
||||
mcs.swizzle = BRW_SWIZZLE_XXXX;
|
||||
emit(MOV(dst_reg(MRF, param_base + 1, glsl_type::uint_type, WRITEMASK_Y),
|
||||
mcs));
|
||||
}
|
||||
inst->mlen++;
|
||||
} else if (ir->op == ir_txd) {
|
||||
const glsl_type *type = lod_type;
|
||||
|
@@ -40,6 +40,7 @@ copy_image_with_blitter(struct brw_context *brw,
|
||||
int src_width, int src_height)
|
||||
{
|
||||
GLuint bw, bh;
|
||||
uint32_t src_image_x, src_image_y, dst_image_x, dst_image_y;
|
||||
int cpp;
|
||||
|
||||
/* The blitter doesn't understand multisampling at all. */
|
||||
@@ -70,43 +71,53 @@ copy_image_with_blitter(struct brw_context *brw,
|
||||
return false;
|
||||
}
|
||||
|
||||
intel_miptree_get_image_offset(src_mt, src_level, src_z,
|
||||
&src_image_x, &src_image_y);
|
||||
|
||||
if (_mesa_is_format_compressed(src_mt->format)) {
|
||||
_mesa_get_format_block_size(src_mt->format, &bw, &bh);
|
||||
|
||||
assert(src_x % bw == 0);
|
||||
assert(src_y % bw == 0);
|
||||
assert(src_y % bh == 0);
|
||||
assert(src_width % bw == 0);
|
||||
assert(src_height % bw == 0);
|
||||
assert(src_height % bh == 0);
|
||||
|
||||
src_x /= (int)bw;
|
||||
src_y /= (int)bw;
|
||||
src_y /= (int)bh;
|
||||
src_width /= (int)bw;
|
||||
src_height /= (int)bw;
|
||||
src_height /= (int)bh;
|
||||
|
||||
/* Inside of the miptree, the x offsets are stored in pixels while
|
||||
* the y offsets are stored in blocks. We need to scale just the x
|
||||
* offset.
|
||||
*/
|
||||
src_image_x /= bw;
|
||||
|
||||
cpp = _mesa_get_format_bytes(src_mt->format);
|
||||
} else {
|
||||
cpp = src_mt->cpp;
|
||||
}
|
||||
src_x += src_image_x;
|
||||
src_y += src_image_y;
|
||||
|
||||
intel_miptree_get_image_offset(dst_mt, dst_level, dst_z,
|
||||
&dst_image_x, &dst_image_y);
|
||||
|
||||
if (_mesa_is_format_compressed(dst_mt->format)) {
|
||||
_mesa_get_format_block_size(dst_mt->format, &bw, &bh);
|
||||
|
||||
assert(dst_x % bw == 0);
|
||||
assert(dst_y % bw == 0);
|
||||
assert(dst_y % bh == 0);
|
||||
|
||||
dst_x /= (int)bw;
|
||||
dst_y /= (int)bw;
|
||||
dst_y /= (int)bh;
|
||||
|
||||
/* Inside of the miptree, the x offsets are stored in pixels while
|
||||
* the y offsets are stored in blocks. We need to scale just the x
|
||||
* offset.
|
||||
*/
|
||||
dst_image_x /= bw;
|
||||
}
|
||||
|
||||
uint32_t src_image_x, src_image_y;
|
||||
intel_miptree_get_image_offset(src_mt, src_level, src_z,
|
||||
&src_image_x, &src_image_y);
|
||||
src_x += src_image_x;
|
||||
src_y += src_image_y;
|
||||
|
||||
uint32_t dst_image_x, dst_image_y;
|
||||
intel_miptree_get_image_offset(dst_mt, dst_level, dst_z,
|
||||
&dst_image_x, &dst_image_y);
|
||||
dst_x += dst_image_x;
|
||||
dst_y += dst_image_y;
|
||||
|
||||
@@ -243,9 +254,11 @@ intel_copy_image_sub_data(struct gl_context *ctx,
|
||||
intel_miptree_all_slices_resolve_depth(brw, intel_dst_image->mt);
|
||||
intel_miptree_resolve_color(brw, intel_dst_image->mt);
|
||||
|
||||
if (copy_image_with_blitter(brw, intel_src_image->mt, src_image->Level,
|
||||
unsigned src_level = src_image->Level + src_image->TexObject->MinLevel;
|
||||
unsigned dst_level = dst_image->Level + dst_image->TexObject->MinLevel;
|
||||
if (copy_image_with_blitter(brw, intel_src_image->mt, src_level,
|
||||
src_x, src_y, src_z,
|
||||
intel_dst_image->mt, src_image->Level,
|
||||
intel_dst_image->mt, dst_level,
|
||||
dst_x, dst_y, dst_z,
|
||||
src_width, src_height))
|
||||
return;
|
||||
@@ -253,9 +266,9 @@ intel_copy_image_sub_data(struct gl_context *ctx,
|
||||
/* This is a worst-case scenario software fallback that maps the two
|
||||
* textures and does a memcpy between them.
|
||||
*/
|
||||
copy_image_with_memcpy(brw, intel_src_image->mt, src_image->Level,
|
||||
copy_image_with_memcpy(brw, intel_src_image->mt, src_level,
|
||||
src_x, src_y, src_z,
|
||||
intel_dst_image->mt, src_image->Level,
|
||||
intel_dst_image->mt, dst_level,
|
||||
dst_x, dst_y, dst_z,
|
||||
src_width, src_height);
|
||||
}
|
||||
|
@@ -1488,6 +1488,10 @@ copy_array_attrib(struct gl_context *ctx,
|
||||
|
||||
/* skip ArrayBufferObj */
|
||||
/* skip IndexBufferObj */
|
||||
|
||||
/* Invalidate draw state. It will be updated during the next draw. */
|
||||
dest->DrawMethod = DRAW_NONE;
|
||||
dest->_DrawArrays = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@@ -653,6 +653,9 @@ _mesa_init_constants(struct gl_constants *consts, gl_api api)
|
||||
/* GL_ARB_framebuffer_object */
|
||||
consts->MaxSamples = 0;
|
||||
|
||||
/* GLSL default if NativeIntegers == FALSE */
|
||||
consts->UniformBooleanTrue = FLT_AS_UINT(1.0f);
|
||||
|
||||
/* GL_ARB_sync */
|
||||
consts->MaxServerWaitTimeout = 0x1fff7fffffffULL;
|
||||
|
||||
|
@@ -184,6 +184,13 @@ static inline GLfloat UINT_AS_FLT(GLuint u)
|
||||
return tmp.f;
|
||||
}
|
||||
|
||||
static inline unsigned FLT_AS_UINT(float f)
|
||||
{
|
||||
fi_type tmp;
|
||||
tmp.f = f;
|
||||
return tmp.u;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert a floating point value to an unsigned fixed point value.
|
||||
*
|
||||
|
@@ -34,6 +34,7 @@
|
||||
#include "pipe/p_context.h"
|
||||
#include "pipe/p_defines.h"
|
||||
#include "pipe/p_screen.h"
|
||||
#include "util/u_math.h"
|
||||
|
||||
#include "st_context.h"
|
||||
#include "st_extensions.h"
|
||||
@@ -274,8 +275,6 @@ void st_init_limits(struct pipe_screen *screen,
|
||||
c->MinProgramTextureGatherOffset = screen->get_param(screen, PIPE_CAP_MIN_TEXTURE_GATHER_OFFSET);
|
||||
c->MaxProgramTextureGatherOffset = screen->get_param(screen, PIPE_CAP_MAX_TEXTURE_GATHER_OFFSET);
|
||||
|
||||
c->UniformBooleanTrue = ~0;
|
||||
|
||||
c->MaxTransformFeedbackBuffers =
|
||||
screen->get_param(screen, PIPE_CAP_MAX_STREAM_OUTPUT_BUFFERS);
|
||||
c->MaxTransformFeedbackBuffers = MIN2(c->MaxTransformFeedbackBuffers, MAX_FEEDBACK_BUFFERS);
|
||||
@@ -697,6 +696,8 @@ void st_init_extensions(struct pipe_screen *screen,
|
||||
}
|
||||
}
|
||||
|
||||
consts->UniformBooleanTrue = consts->NativeIntegers ? ~0 : fui(1.0f);
|
||||
|
||||
/* Below are the cases which cannot be moved into tables easily. */
|
||||
|
||||
if (!has_lib_dxtc && !options->force_s3tc_enable) {
|
||||
|
@@ -74,14 +74,6 @@ extern "C" {
|
||||
(1 << PROGRAM_CONSTANT) | \
|
||||
(1 << PROGRAM_UNIFORM))
|
||||
|
||||
/**
|
||||
* Maximum number of temporary registers.
|
||||
*
|
||||
* It is too big for stack allocated arrays -- it will cause stack overflow on
|
||||
* Windows and likely Mac OS X.
|
||||
*/
|
||||
#define MAX_TEMPS 4096
|
||||
|
||||
/**
|
||||
* Maximum number of arrays
|
||||
*/
|
||||
@@ -3301,14 +3293,10 @@ get_src_arg_mask(st_dst_reg dst, st_src_reg src)
|
||||
void
|
||||
glsl_to_tgsi_visitor::simplify_cmp(void)
|
||||
{
|
||||
unsigned *tempWrites;
|
||||
int tempWritesSize = 0;
|
||||
unsigned *tempWrites = NULL;
|
||||
unsigned outputWrites[MAX_PROGRAM_OUTPUTS];
|
||||
|
||||
tempWrites = new unsigned[MAX_TEMPS];
|
||||
if (!tempWrites) {
|
||||
return;
|
||||
}
|
||||
memset(tempWrites, 0, sizeof(unsigned) * MAX_TEMPS);
|
||||
memset(outputWrites, 0, sizeof(outputWrites));
|
||||
|
||||
foreach_in_list(glsl_to_tgsi_instruction, inst, &this->instructions) {
|
||||
@@ -3330,7 +3318,19 @@ glsl_to_tgsi_visitor::simplify_cmp(void)
|
||||
prevWriteMask = outputWrites[inst->dst.index];
|
||||
outputWrites[inst->dst.index] |= inst->dst.writemask;
|
||||
} else if (inst->dst.file == PROGRAM_TEMPORARY) {
|
||||
assert(inst->dst.index < MAX_TEMPS);
|
||||
if (inst->dst.index >= tempWritesSize) {
|
||||
const int inc = 4096;
|
||||
|
||||
tempWrites = (unsigned*)
|
||||
realloc(tempWrites,
|
||||
(tempWritesSize + inc) * sizeof(unsigned));
|
||||
if (!tempWrites)
|
||||
return;
|
||||
|
||||
memset(tempWrites + tempWritesSize, 0, inc * sizeof(unsigned));
|
||||
tempWritesSize += inc;
|
||||
}
|
||||
|
||||
prevWriteMask = tempWrites[inst->dst.index];
|
||||
tempWrites[inst->dst.index] |= inst->dst.writemask;
|
||||
} else
|
||||
@@ -3349,7 +3349,7 @@ glsl_to_tgsi_visitor::simplify_cmp(void)
|
||||
}
|
||||
}
|
||||
|
||||
delete [] tempWrites;
|
||||
free(tempWrites);
|
||||
}
|
||||
|
||||
/* Replaces all references to a temporary register index with another index. */
|
||||
@@ -4158,7 +4158,9 @@ struct label {
|
||||
struct st_translate {
|
||||
struct ureg_program *ureg;
|
||||
|
||||
struct ureg_dst temps[MAX_TEMPS];
|
||||
unsigned temps_size;
|
||||
struct ureg_dst *temps;
|
||||
|
||||
struct ureg_dst arrays[MAX_ARRAYS];
|
||||
struct ureg_src *constants;
|
||||
struct ureg_src *immediates;
|
||||
@@ -4299,7 +4301,19 @@ dst_register(struct st_translate *t,
|
||||
return ureg_dst_undef();
|
||||
|
||||
case PROGRAM_TEMPORARY:
|
||||
assert(index < Elements(t->temps));
|
||||
/* Allocate space for temporaries on demand. */
|
||||
if (index >= t->temps_size) {
|
||||
const int inc = 4096;
|
||||
|
||||
t->temps = (struct ureg_dst*)
|
||||
realloc(t->temps,
|
||||
(t->temps_size + inc) * sizeof(struct ureg_dst));
|
||||
if (!t->temps)
|
||||
return ureg_dst_undef();
|
||||
|
||||
memset(t->temps + t->temps_size, 0, inc * sizeof(struct ureg_dst));
|
||||
t->temps_size += inc;
|
||||
}
|
||||
|
||||
if (ureg_dst_is_undef(t->temps[index]))
|
||||
t->temps[index] = ureg_DECL_local_temporary(t->ureg);
|
||||
@@ -5158,6 +5172,7 @@ st_translate_program(
|
||||
|
||||
out:
|
||||
if (t) {
|
||||
free(t->temps);
|
||||
free(t->insn);
|
||||
free(t->labels);
|
||||
free(t->constants);
|
||||
|
Reference in New Issue
Block a user