Compare commits
33 Commits
mesa-19.3.
...
mesa-19.3.
Author | SHA1 | Date | |
---|---|---|---|
|
cd736de7aa | ||
|
b7ab6e9470 | ||
|
addf63dbd7 | ||
|
2b4459973b | ||
|
48f8f0edca | ||
|
3b8461cf16 | ||
|
79610494f9 | ||
|
32aba91c07 | ||
|
35182247fc | ||
|
ab4df0ec72 | ||
|
37d13ecca7 | ||
|
a3d52fd4ab | ||
|
36fbe5b292 | ||
|
9445d96d5c | ||
|
5cd8c67a7f | ||
|
d7c0a1d3d4 | ||
|
7c61e5192f | ||
|
f393c92345 | ||
|
4fbe772b23 | ||
|
17ad67c6dc | ||
|
61366cdf05 | ||
|
001e7305ab | ||
|
1b8f93550a | ||
|
992bff94f7 | ||
|
51a15eabe6 | ||
|
f3c0d5aa3a | ||
|
512ed9899a | ||
|
35c196025b | ||
|
4910128bab | ||
|
14c8323774 | ||
|
c78901c124 | ||
|
2a497735ec | ||
|
87efb9f3a4 |
@@ -1,2 +1,6 @@
|
||||
# This is reverted shortly after landing
|
||||
4432a2d14d80081d062f7939a950d65ea3a16eed
|
||||
|
||||
# This was manually backported
|
||||
21be5c8edd3ad156f6cbfbceb96e7939716d9f2c
|
||||
4b392ced2d744fccffe95490ff57e6b41033c266
|
||||
|
@@ -1362,20 +1362,6 @@ EGLAPI EGLuint64NV EGLAPIENTRY eglGetSystemTimeNV (void);
|
||||
#define EGL_NATIVE_SURFACE_TIZEN 0x32A1
|
||||
#endif /* EGL_TIZEN_image_native_surface */
|
||||
|
||||
#ifndef EGL_EXT_image_flush_external
|
||||
#define EGL_EXT_image_flush_external 1
|
||||
#define EGL_IMAGE_EXTERNAL_FLUSH_EXT 0x32A2
|
||||
typedef EGLBoolean (EGLAPIENTRYP PFNEGLIMAGEFLUSHEXTERNALEXTPROC) (EGLDisplay dpy, EGLImageKHR image, const EGLAttrib *attrib_list);
|
||||
typedef EGLBoolean (EGLAPIENTRYP PFNEGLIMAGEINVALIDATEEXTERNALEXTPROC) (EGLDisplay dpy, EGLImageKHR image, const EGLAttrib *attrib_list);
|
||||
#ifdef EGL_EGLEXT_PROTOTYPES
|
||||
EGLAPI EGLBoolean EGLAPIENTRY eglImageFlushExternalEXT (EGLDisplay dpy, EGLImageKHR image, const EGLAttrib *attrib_list);
|
||||
EGLAPI EGLBoolean EGLAPIENTRY eglImageInvalidateExternalEXT (EGLDisplay dpy, EGLImageKHR image, const EGLAttrib *attrib_list);
|
||||
#endif
|
||||
#endif /* EGL_EXT_image_flush_external */
|
||||
|
||||
#include <EGL/eglmesaext.h>
|
||||
#include <EGL/eglextchromium.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@@ -53,6 +53,17 @@ typedef EGLBoolean (EGLAPIENTRYP PFNEGLGETSYNCVALUESCHROMIUMPROC)
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef EGL_EXT_image_flush_external
|
||||
#define EGL_EXT_image_flush_external 1
|
||||
#define EGL_IMAGE_EXTERNAL_FLUSH_EXT 0x32A2
|
||||
typedef EGLBoolean (EGLAPIENTRYP PFNEGLIMAGEFLUSHEXTERNALEXTPROC) (EGLDisplay dpy, EGLImageKHR image, const EGLAttrib *attrib_list);
|
||||
typedef EGLBoolean (EGLAPIENTRYP PFNEGLIMAGEINVALIDATEEXTERNALEXTPROC) (EGLDisplay dpy, EGLImageKHR image, const EGLAttrib *attrib_list);
|
||||
#ifdef EGL_EGLEXT_PROTOTYPES
|
||||
EGLAPI EGLBoolean EGLAPIENTRY eglImageFlushExternalEXT (EGLDisplay dpy, EGLImageKHR image, const EGLAttrib *attrib_list);
|
||||
EGLAPI EGLBoolean EGLAPIENTRY eglImageInvalidateExternalEXT (EGLDisplay dpy, EGLImageKHR image, const EGLAttrib *attrib_list);
|
||||
#endif
|
||||
#endif /* EGL_EXT_image_flush_external */
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
@@ -1306,7 +1306,7 @@ struct __DRIdri2ExtensionRec {
|
||||
* extensions.
|
||||
*/
|
||||
#define __DRI_IMAGE "DRI_IMAGE"
|
||||
#define __DRI_IMAGE_VERSION 18
|
||||
#define __DRI_IMAGE_VERSION 17
|
||||
|
||||
/**
|
||||
* These formats correspond to the similarly named MESA_FORMAT_*
|
||||
@@ -1353,8 +1353,6 @@ struct __DRIdri2ExtensionRec {
|
||||
* could be read after a flush."
|
||||
*/
|
||||
#define __DRI_IMAGE_USE_BACKBUFFER 0x0010
|
||||
/* Whether to expect explicit flushes for external consumers. */
|
||||
#define __DRI_IMAGE_USE_FLUSH_EXTERNAL 0x0020
|
||||
|
||||
|
||||
#define __DRI_IMAGE_TRANSFER_READ 0x1
|
||||
@@ -1755,53 +1753,6 @@ struct __DRIimageExtensionRec {
|
||||
int renderbuffer,
|
||||
void *loaderPrivate,
|
||||
unsigned *error);
|
||||
|
||||
/**
|
||||
* Flush the image for external consumers. This is called when
|
||||
* the current context is the producer.
|
||||
*
|
||||
* \since 18
|
||||
*/
|
||||
void (*imageFlushExternal)(__DRIcontext *context, __DRIimage *image,
|
||||
unsigned flags);
|
||||
|
||||
/**
|
||||
* This call indicates that the image has been modified outside of
|
||||
* the current context. This is called when the current context is
|
||||
* the consumer of the image.
|
||||
*
|
||||
* \since 18
|
||||
*/
|
||||
void (*imageInvalidateExternal)(__DRIcontext *context, __DRIimage *image,
|
||||
unsigned flags);
|
||||
|
||||
/**
|
||||
* Same as createImageFromName, but also specifies use.
|
||||
*
|
||||
* \since 18
|
||||
*/
|
||||
__DRIimage *(*createImageFromName2)(__DRIscreen *screen,
|
||||
int width, int height, int format,
|
||||
int name, int pitch, unsigned use,
|
||||
void *loaderPrivate);
|
||||
|
||||
/**
|
||||
* Same as createImageFromDmaBufs, but also specifies modifier and use.
|
||||
* Set modifier to DRM_FORMAT_MOD_INVALID if not using it.
|
||||
*
|
||||
* \since 18
|
||||
*/
|
||||
__DRIimage *(*createImageFromDmaBufs3)(__DRIscreen *screen,
|
||||
int width, int height, int fourcc,
|
||||
uint64_t modifier, unsigned use,
|
||||
int *fds, int num_fds,
|
||||
int *strides, int *offsets,
|
||||
enum __DRIYUVColorSpace color_space,
|
||||
enum __DRISampleRange sample_range,
|
||||
enum __DRIChromaSiting horiz_siting,
|
||||
enum __DRIChromaSiting vert_siting,
|
||||
unsigned *error,
|
||||
void *loaderPrivate);
|
||||
};
|
||||
|
||||
|
||||
|
@@ -134,10 +134,7 @@ Temp emit_wqm(isel_context *ctx, Temp src, Temp dst=Temp(0, s1), bool program_ne
|
||||
if (!dst.id())
|
||||
return src;
|
||||
|
||||
if (src.type() == RegType::vgpr || src.size() > 1)
|
||||
bld.copy(Definition(dst), src);
|
||||
else
|
||||
bld.sop1(aco_opcode::s_mov_b32, Definition(dst), src);
|
||||
bld.copy(Definition(dst), src);
|
||||
return dst;
|
||||
}
|
||||
|
||||
@@ -148,6 +145,9 @@ Temp emit_wqm(isel_context *ctx, Temp src, Temp dst=Temp(0, s1), bool program_ne
|
||||
|
||||
static Temp emit_bpermute(isel_context *ctx, Builder &bld, Temp index, Temp data)
|
||||
{
|
||||
if (index.regClass() == s1)
|
||||
return bld.vop3(aco_opcode::v_readlane_b32, bld.def(s1), data, index);
|
||||
|
||||
Temp index_x4 = bld.vop2(aco_opcode::v_lshlrev_b32, bld.def(v1), Operand(2u), index);
|
||||
|
||||
/* Currently not implemented on GFX6-7 */
|
||||
@@ -1647,7 +1647,7 @@ void visit_alu_instr(isel_context *ctx, nir_alu_instr *instr)
|
||||
} else if (dst.size() == 2) {
|
||||
Temp cond = bld.vopc(aco_opcode::v_cmp_nlt_f64, bld.hint_vcc(bld.def(s2)), Operand(0u), src);
|
||||
Temp tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0x3FF00000u));
|
||||
Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, src, cond);
|
||||
Temp upper = bld.vop2_e64(aco_opcode::v_cndmask_b32, bld.def(v1), tmp, emit_extract_vector(ctx, src, 1, v1), cond);
|
||||
|
||||
cond = bld.vopc(aco_opcode::v_cmp_le_f64, bld.hint_vcc(bld.def(s2)), Operand(0u), src);
|
||||
tmp = bld.vop1(aco_opcode::v_mov_b32, bld.def(v1), Operand(0xBFF00000u));
|
||||
@@ -5557,11 +5557,11 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr)
|
||||
}
|
||||
case nir_intrinsic_shuffle: {
|
||||
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
|
||||
if (!ctx->divergent_vals[instr->dest.ssa.index]) {
|
||||
if (!ctx->divergent_vals[instr->dest.ssa.index] &&
|
||||
!ctx->divergent_vals[instr->src[0].ssa->index]) {
|
||||
emit_uniform_subgroup(ctx, instr, src);
|
||||
} else {
|
||||
Temp tid = get_ssa_temp(ctx, instr->src[1].ssa);
|
||||
assert(tid.regClass() == v1);
|
||||
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
|
||||
if (src.regClass() == v1) {
|
||||
emit_wqm(ctx, emit_bpermute(ctx, bld, tid, src), dst);
|
||||
@@ -5626,9 +5626,8 @@ void visit_intrinsic(isel_context *ctx, nir_intrinsic_instr *instr)
|
||||
}
|
||||
case nir_intrinsic_read_invocation: {
|
||||
Temp src = get_ssa_temp(ctx, instr->src[0].ssa);
|
||||
Temp lane = get_ssa_temp(ctx, instr->src[1].ssa);
|
||||
Temp lane = bld.as_uniform(get_ssa_temp(ctx, instr->src[1].ssa));
|
||||
Temp dst = get_ssa_temp(ctx, &instr->dest.ssa);
|
||||
assert(lane.regClass() == s1);
|
||||
if (src.regClass() == v1) {
|
||||
emit_wqm(ctx, bld.vop3(aco_opcode::v_readlane_b32, bld.def(s1), src, lane), dst);
|
||||
} else if (src.regClass() == v2) {
|
||||
|
@@ -469,11 +469,24 @@ bool can_accept_constant(aco_ptr<Instruction>& instr, unsigned operand)
|
||||
|
||||
bool valu_can_accept_literal(opt_ctx& ctx, aco_ptr<Instruction>& instr, unsigned operand)
|
||||
{
|
||||
/* instructions like v_cndmask_b32 can't take a literal because they always
|
||||
* read SGPRs */
|
||||
if (instr->operands.size() >= 3 &&
|
||||
instr->operands[2].isTemp() && instr->operands[2].regClass().type() == RegType::sgpr)
|
||||
return false;
|
||||
|
||||
// TODO: VOP3 can take a literal on GFX10
|
||||
return !instr->isSDWA() && !instr->isDPP() && !instr->isVOP3() &&
|
||||
operand == 0 && can_accept_constant(instr, operand);
|
||||
}
|
||||
|
||||
bool valu_can_accept_vgpr(aco_ptr<Instruction>& instr, unsigned operand)
|
||||
{
|
||||
if (instr->opcode == aco_opcode::v_readlane_b32 || instr->opcode == aco_opcode::v_writelane_b32)
|
||||
return operand != 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool parse_base_offset(opt_ctx &ctx, Instruction* instr, unsigned op_index, Temp *base, uint32_t *offset)
|
||||
{
|
||||
Operand op = instr->operands[op_index];
|
||||
@@ -576,7 +589,7 @@ void label_instruction(opt_ctx &ctx, aco_ptr<Instruction>& instr)
|
||||
|
||||
/* VALU: propagate neg, abs & inline constants */
|
||||
else if (instr->isVALU()) {
|
||||
if (info.is_temp() && info.temp.type() == RegType::vgpr) {
|
||||
if (info.is_temp() && info.temp.type() == RegType::vgpr && valu_can_accept_vgpr(instr, i)) {
|
||||
instr->operands[i].setTemp(info.temp);
|
||||
info = ctx.info[info.temp.id()];
|
||||
}
|
||||
|
@@ -881,7 +881,15 @@ void handle_pseudo(ra_ctx& ctx,
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!writes_sgpr)
|
||||
/* if all operands are constant, no need to care either */
|
||||
bool reads_sgpr = false;
|
||||
for (Operand& op : instr->operands) {
|
||||
if (op.isTemp() && op.getTemp().type() == RegType::sgpr) {
|
||||
reads_sgpr = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (!(writes_sgpr && reads_sgpr))
|
||||
return;
|
||||
|
||||
Pseudo_instruction *pi = (Pseudo_instruction *)instr;
|
||||
@@ -1414,7 +1422,7 @@ void register_allocation(Program *program, std::vector<std::set<Temp>> live_out_
|
||||
for (unsigned j = 0; j < i; j++) {
|
||||
Operand& op = instr->operands[j];
|
||||
if (op.isTemp() && op.tempId() == blocking_id) {
|
||||
op = Operand(pc_def.getTemp());
|
||||
op.setTemp(pc_def.getTemp());
|
||||
op.setFixed(reg);
|
||||
}
|
||||
}
|
||||
|
@@ -265,7 +265,7 @@ aco_ptr<Instruction> do_reload(spill_ctx& ctx, Temp tmp, Temp new_name, uint32_t
|
||||
} else if (instr->format == Format::SOP1) {
|
||||
res.reset(create_instruction<SOP1_instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
|
||||
} else if (instr->format == Format::PSEUDO) {
|
||||
res.reset(create_instruction<Instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
|
||||
res.reset(create_instruction<Pseudo_instruction>(instr->opcode, instr->format, instr->operands.size(), instr->definitions.size()));
|
||||
}
|
||||
for (unsigned i = 0; i < instr->operands.size(); i++) {
|
||||
res->operands[i] = instr->operands[i];
|
||||
|
@@ -200,7 +200,7 @@ class Value(object):
|
||||
${val.cond if val.cond else 'NULL'},
|
||||
${val.swizzle()},
|
||||
% elif isinstance(val, Expression):
|
||||
${'true' if val.inexact else 'false'},
|
||||
${'true' if val.inexact else 'false'}, ${'true' if val.exact else 'false'},
|
||||
${val.comm_expr_idx}, ${val.comm_exprs},
|
||||
${val.c_opcode()},
|
||||
{ ${', '.join(src.c_value_ptr(cache) for src in val.sources)} },
|
||||
@@ -348,7 +348,7 @@ class Variable(Value):
|
||||
return '{' + ', '.join([str(swizzles[c]) for c in self.swiz[1:]]) + '}'
|
||||
return '{0, 1, 2, 3}'
|
||||
|
||||
_opcode_re = re.compile(r"(?P<inexact>~)?(?P<opcode>\w+)(?:@(?P<bits>\d+))?"
|
||||
_opcode_re = re.compile(r"(?P<inexact>~)?(?P<exact>!)?(?P<opcode>\w+)(?:@(?P<bits>\d+))?"
|
||||
r"(?P<cond>\([^\)]+\))?")
|
||||
|
||||
class Expression(Value):
|
||||
@@ -362,8 +362,12 @@ class Expression(Value):
|
||||
self.opcode = m.group('opcode')
|
||||
self._bit_size = int(m.group('bits')) if m.group('bits') else None
|
||||
self.inexact = m.group('inexact') is not None
|
||||
self.exact = m.group('exact') is not None
|
||||
self.cond = m.group('cond')
|
||||
|
||||
assert not self.inexact or not self.exact, \
|
||||
'Expression cannot be both exact and inexact.'
|
||||
|
||||
# "many-comm-expr" isn't really a condition. It's notification to the
|
||||
# generator that this pattern is known to have too many commutative
|
||||
# expressions, and an error should not be generated for this case.
|
||||
|
@@ -69,6 +69,9 @@ e = 'e'
|
||||
# expression this indicates that the constructed value should have that
|
||||
# bit-size.
|
||||
#
|
||||
# If the opcode in a replacement expression is prefixed by a '!' character,
|
||||
# this indicated that the new expression will be marked exact.
|
||||
#
|
||||
# A special condition "many-comm-expr" can be used with expressions to note
|
||||
# that the expression and its subexpressions have more commutative expressions
|
||||
# than nir_replace_instr can handle. If this special condition is needed with
|
||||
@@ -1351,8 +1354,8 @@ optimizations += [(bitfield_reverse('x@32'), ('bitfield_reverse', 'x'), '!option
|
||||
# and, if a is a NaN then the second comparison will fail anyway.
|
||||
for op in ['flt', 'fge', 'feq']:
|
||||
optimizations += [
|
||||
(('iand', ('feq', a, a), (op, a, b)), (op, a, b)),
|
||||
(('iand', ('feq', a, a), (op, b, a)), (op, b, a)),
|
||||
(('iand', ('feq', a, a), (op, a, b)), ('!' + op, a, b)),
|
||||
(('iand', ('feq', a, a), (op, b, a)), ('!' + op, b, a)),
|
||||
]
|
||||
|
||||
# Add optimizations to handle the case where the result of a ternary is
|
||||
|
@@ -474,7 +474,7 @@ construct_value(nir_builder *build,
|
||||
* expression we are replacing has any exact values, the entire
|
||||
* replacement should be exact.
|
||||
*/
|
||||
alu->exact = state->has_exact_alu;
|
||||
alu->exact = state->has_exact_alu || expr->exact;
|
||||
|
||||
for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
|
||||
/* If the source is an explicitly sized source, then we need to reset
|
||||
|
@@ -138,6 +138,9 @@ typedef struct {
|
||||
*/
|
||||
bool inexact;
|
||||
|
||||
/** In a replacement, requests that the instruction be marked exact. */
|
||||
bool exact;
|
||||
|
||||
/* Commutative expression index. This is assigned by opt_algebraic.py when
|
||||
* search structures are constructed and is a unique (to this structure)
|
||||
* index within the commutative operation bitfield used for searching for
|
||||
|
@@ -801,7 +801,6 @@ dri2_setup_screen(_EGLDisplay *disp)
|
||||
}
|
||||
|
||||
disp->Extensions.KHR_image_base = EGL_TRUE;
|
||||
disp->Extensions.EXT_image_flush_external = EGL_TRUE;
|
||||
disp->Extensions.KHR_gl_renderbuffer_image = EGL_TRUE;
|
||||
if (dri2_dpy->image->base.version >= 5 &&
|
||||
dri2_dpy->image->createImageFromTexture) {
|
||||
@@ -2298,27 +2297,14 @@ dri2_create_image_mesa_drm_buffer(_EGLDisplay *disp, _EGLContext *ctx,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (dri2_dpy->image->base.version >= 18) {
|
||||
unsigned use = 0;
|
||||
|
||||
if (attrs.ImageFlushExternal)
|
||||
use |= __DRI_IMAGE_USE_FLUSH_EXTERNAL;
|
||||
|
||||
dri_image =
|
||||
dri2_dpy->image->createImageFromName2(dri2_dpy->dri_screen,
|
||||
attrs.Width, attrs.Height,
|
||||
format, name, pitch, use,
|
||||
NULL);
|
||||
} else {
|
||||
dri_image =
|
||||
dri2_dpy->image->createImageFromName(dri2_dpy->dri_screen,
|
||||
attrs.Width,
|
||||
attrs.Height,
|
||||
format,
|
||||
name,
|
||||
pitch,
|
||||
NULL);
|
||||
}
|
||||
dri_image =
|
||||
dri2_dpy->image->createImageFromName(dri2_dpy->dri_screen,
|
||||
attrs.Width,
|
||||
attrs.Height,
|
||||
format,
|
||||
name,
|
||||
pitch,
|
||||
NULL);
|
||||
|
||||
return dri2_create_image_from_dri(disp, dri_image);
|
||||
}
|
||||
@@ -2660,26 +2646,7 @@ dri2_create_image_dma_buf(_EGLDisplay *disp, _EGLContext *ctx,
|
||||
has_modifier = true;
|
||||
}
|
||||
|
||||
if (dri2_dpy->image->base.version >= 18) {
|
||||
unsigned use = 0;
|
||||
|
||||
if (attrs.ImageFlushExternal)
|
||||
use |= __DRI_IMAGE_USE_FLUSH_EXTERNAL;
|
||||
|
||||
if (!has_modifier)
|
||||
modifier = DRM_FORMAT_MOD_INVALID;
|
||||
|
||||
dri_image =
|
||||
dri2_dpy->image->createImageFromDmaBufs3(dri2_dpy->dri_screen,
|
||||
attrs.Width, attrs.Height, attrs.DMABufFourCC.Value,
|
||||
modifier, use, fds, num_fds, pitches, offsets,
|
||||
attrs.DMABufYuvColorSpaceHint.Value,
|
||||
attrs.DMABufSampleRangeHint.Value,
|
||||
attrs.DMABufChromaHorizontalSiting.Value,
|
||||
attrs.DMABufChromaVerticalSiting.Value,
|
||||
&error,
|
||||
NULL);
|
||||
} else if (has_modifier) {
|
||||
if (has_modifier) {
|
||||
if (dri2_dpy->image->base.version < 15 ||
|
||||
dri2_dpy->image->createImageFromDmaBufs2 == NULL) {
|
||||
_eglError(EGL_BAD_MATCH, "unsupported dma_buf format modifier");
|
||||
@@ -2695,7 +2662,8 @@ dri2_create_image_dma_buf(_EGLDisplay *disp, _EGLContext *ctx,
|
||||
attrs.DMABufChromaVerticalSiting.Value,
|
||||
&error,
|
||||
NULL);
|
||||
} else {
|
||||
}
|
||||
else {
|
||||
dri_image =
|
||||
dri2_dpy->image->createImageFromDmaBufs(dri2_dpy->dri_screen,
|
||||
attrs.Width, attrs.Height, attrs.DMABufFourCC.Value,
|
||||
@@ -2766,8 +2734,6 @@ dri2_create_drm_image_mesa(_EGLDriver *drv, _EGLDisplay *disp,
|
||||
dri_use |= __DRI_IMAGE_USE_SCANOUT;
|
||||
if (attrs.DRMBufferUseMESA & EGL_DRM_BUFFER_USE_CURSOR_MESA)
|
||||
dri_use |= __DRI_IMAGE_USE_CURSOR;
|
||||
if (attrs.ImageFlushExternal)
|
||||
dri_use |= __DRI_IMAGE_USE_FLUSH_EXTERNAL;
|
||||
|
||||
dri2_img = malloc(sizeof *dri2_img);
|
||||
if (!dri2_img) {
|
||||
@@ -3476,37 +3442,6 @@ dri2_interop_export_object(_EGLDisplay *disp, _EGLContext *ctx,
|
||||
return dri2_dpy->interop->export_object(dri2_ctx->dri_context, in, out);
|
||||
}
|
||||
|
||||
static void
|
||||
dri2_image_flush_external(_EGLDisplay *disp, _EGLContext *ctx,
|
||||
_EGLImage *image)
|
||||
{
|
||||
struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
|
||||
struct dri2_egl_context *dri2_ctx = dri2_egl_context(ctx);
|
||||
struct dri2_egl_image *dri2_img = dri2_egl_image(image);
|
||||
|
||||
if (dri2_dpy->image->base.version < 18)
|
||||
return;
|
||||
|
||||
dri2_dpy->image->imageFlushExternal(dri2_ctx->dri_context,
|
||||
dri2_img->dri_image, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
dri2_image_invalidate_external(_EGLDisplay *disp, _EGLContext *ctx,
|
||||
_EGLImage *image)
|
||||
{
|
||||
struct dri2_egl_display *dri2_dpy = dri2_egl_display(disp);
|
||||
struct dri2_egl_context *dri2_ctx = dri2_egl_context(ctx);
|
||||
struct dri2_egl_image *dri2_img = dri2_egl_image(image);
|
||||
|
||||
if (dri2_dpy->image->base.version < 18)
|
||||
return;
|
||||
|
||||
dri2_dpy->image->imageInvalidateExternal(dri2_ctx->dri_context,
|
||||
dri2_img->dri_image, 0);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This is the main entrypoint into the driver, called by libEGL.
|
||||
* Gets an _EGLDriver object and init its dispatch table.
|
||||
@@ -3565,6 +3500,4 @@ _eglInitDriver(_EGLDriver *dri2_drv)
|
||||
dri2_drv->API.GLInteropExportObject = dri2_interop_export_object;
|
||||
dri2_drv->API.DupNativeFenceFDANDROID = dri2_dup_native_fence_fd;
|
||||
dri2_drv->API.SetBlobCacheFuncsANDROID = dri2_set_blob_cache_funcs;
|
||||
dri2_drv->API.ImageFlushExternal = dri2_image_flush_external;
|
||||
dri2_drv->API.ImageInvalidateExternal = dri2_image_invalidate_external;
|
||||
}
|
||||
|
@@ -1795,18 +1795,6 @@
|
||||
<param><ptype>EGLint</ptype> <name>external_win_id</name></param>
|
||||
<param><ptype>EGLint</ptype> <name>policy</name></param>
|
||||
</command>
|
||||
<command>
|
||||
<proto><ptype>EGLBoolean</ptype> <name>eglImageFlushExternalEXT</name></proto>
|
||||
<param><ptype>EGLDisplay</ptype> <name>dpy</name></param>
|
||||
<param><ptype>EGLImageKHR</ptype> <name>image</name></param>
|
||||
<param>const <ptype>EGLAttrib</ptype> *<name>attrib_list</name></param>
|
||||
</command>
|
||||
<command>
|
||||
<proto><ptype>EGLBoolean</ptype> <name>eglImageInvalidateExternalEXT</name></proto>
|
||||
<param><ptype>EGLDisplay</ptype> <name>dpy</name></param>
|
||||
<param><ptype>EGLImageKHR</ptype> <name>image</name></param>
|
||||
<param>const <ptype>EGLAttrib</ptype> *<name>attrib_list</name></param>
|
||||
</command>
|
||||
</commands>
|
||||
|
||||
<!-- SECTION: EGL API interface definitions. -->
|
||||
|
@@ -213,7 +213,5 @@ EGL_FUNCTIONS = (
|
||||
_eglFunc("eglGetDisplayDriverName", "display"),
|
||||
_eglFunc("eglGetDisplayDriverConfig", "display"),
|
||||
|
||||
_eglFunc("eglImageFlushExternalEXT", "display"),
|
||||
_eglFunc("eglImageInvalidateExternalEXT", "display"),
|
||||
)
|
||||
|
||||
|
@@ -100,6 +100,8 @@ def generateHeader(functions):
|
||||
|
||||
#include <EGL/egl.h>
|
||||
#include <EGL/eglext.h>
|
||||
#include <EGL/eglmesaext.h>
|
||||
#include <EGL/eglextchromium.h>
|
||||
#include "glvnd/libeglabi.h"
|
||||
|
||||
""".lstrip("\n"))
|
||||
|
@@ -499,7 +499,6 @@ _eglCreateExtensionsString(_EGLDisplay *disp)
|
||||
_EGL_CHECK_EXTENSION(EXT_create_context_robustness);
|
||||
_EGL_CHECK_EXTENSION(EXT_image_dma_buf_import);
|
||||
_EGL_CHECK_EXTENSION(EXT_image_dma_buf_import_modifiers);
|
||||
_EGL_CHECK_EXTENSION(EXT_image_flush_external);
|
||||
_EGL_CHECK_EXTENSION(EXT_surface_CTA861_3_metadata);
|
||||
_EGL_CHECK_EXTENSION(EXT_surface_SMPTE2086_metadata);
|
||||
_EGL_CHECK_EXTENSION(EXT_swap_buffers_with_damage);
|
||||
@@ -2748,55 +2747,6 @@ eglGetDisplayDriverName(EGLDisplay dpy)
|
||||
RETURN_EGL_EVAL(disp, ret);
|
||||
}
|
||||
|
||||
static EGLBoolean EGLAPIENTRY
|
||||
eglImageFlushExternalEXT(EGLDisplay dpy, EGLImageKHR image,
|
||||
const EGLAttrib *attrib_list)
|
||||
{
|
||||
_EGLDisplay *disp = _eglLockDisplay(dpy);
|
||||
_EGLContext *ctx = _eglGetCurrentContext();
|
||||
_EGLImage *img = _eglLookupImage(image, disp);
|
||||
_EGLDriver *drv;
|
||||
|
||||
_EGL_FUNC_START(disp, EGL_OBJECT_IMAGE_KHR, img, EGL_FALSE);
|
||||
_EGL_CHECK_DISPLAY(disp, EGL_FALSE, drv);
|
||||
|
||||
if (attrib_list && attrib_list[0] != EGL_NONE)
|
||||
RETURN_EGL_ERROR(disp, EGL_BAD_PARAMETER, EGL_FALSE);
|
||||
|
||||
if (!ctx || !disp->Extensions.EXT_image_flush_external)
|
||||
RETURN_EGL_EVAL(disp, EGL_FALSE);
|
||||
if (!img)
|
||||
RETURN_EGL_ERROR(disp, EGL_BAD_PARAMETER, EGL_FALSE);
|
||||
|
||||
|
||||
drv->API.ImageFlushExternal(disp, ctx, img);
|
||||
RETURN_EGL_EVAL(disp, EGL_TRUE);
|
||||
}
|
||||
|
||||
static EGLBoolean EGLAPIENTRY
|
||||
eglImageInvalidateExternalEXT(EGLDisplay dpy, EGLImageKHR image,
|
||||
const EGLAttrib *attrib_list)
|
||||
{
|
||||
_EGLDisplay *disp = _eglLockDisplay(dpy);
|
||||
_EGLContext *ctx = _eglGetCurrentContext();
|
||||
_EGLImage *img = _eglLookupImage(image, disp);
|
||||
_EGLDriver *drv;
|
||||
|
||||
_EGL_FUNC_START(disp, EGL_OBJECT_IMAGE_KHR, img, EGL_FALSE);
|
||||
_EGL_CHECK_DISPLAY(disp, EGL_FALSE, drv);
|
||||
|
||||
if (attrib_list && attrib_list[0] != EGL_NONE)
|
||||
RETURN_EGL_ERROR(disp, EGL_BAD_PARAMETER, EGL_FALSE);
|
||||
|
||||
if (!ctx || !disp->Extensions.EXT_image_flush_external)
|
||||
RETURN_EGL_EVAL(disp, EGL_FALSE);
|
||||
if (!img)
|
||||
RETURN_EGL_ERROR(disp, EGL_BAD_PARAMETER, EGL_FALSE);
|
||||
|
||||
drv->API.ImageInvalidateExternal(disp, ctx, img);
|
||||
RETURN_EGL_EVAL(disp, EGL_TRUE);
|
||||
}
|
||||
|
||||
__eglMustCastToProperFunctionPointerType EGLAPIENTRY
|
||||
eglGetProcAddress(const char *procname)
|
||||
{
|
||||
|
@@ -189,11 +189,6 @@ struct _egl_api
|
||||
void (*SetBlobCacheFuncsANDROID) (_EGLDriver *drv, _EGLDisplay *disp,
|
||||
EGLSetBlobFuncANDROID set,
|
||||
EGLGetBlobFuncANDROID get);
|
||||
|
||||
void (*ImageFlushExternal)(_EGLDisplay *disp, _EGLContext *ctx,
|
||||
_EGLImage *image);
|
||||
void (*ImageInvalidateExternal)(_EGLDisplay *disp, _EGLContext *ctx,
|
||||
_EGLImage *image);
|
||||
};
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
@@ -105,7 +105,6 @@ struct _egl_extensions
|
||||
EGLBoolean EXT_create_context_robustness;
|
||||
EGLBoolean EXT_image_dma_buf_import;
|
||||
EGLBoolean EXT_image_dma_buf_import_modifiers;
|
||||
EGLBoolean EXT_image_flush_external;
|
||||
EGLBoolean EXT_pixel_format_float;
|
||||
EGLBoolean EXT_surface_CTA861_3_metadata;
|
||||
EGLBoolean EXT_surface_SMPTE2086_metadata;
|
||||
|
@@ -51,8 +51,6 @@ EGL_ENTRYPOINT(eglGetProcAddress)
|
||||
EGL_ENTRYPOINT(eglGetSyncAttrib)
|
||||
EGL_ENTRYPOINT(eglGetSyncAttribKHR)
|
||||
EGL_ENTRYPOINT(eglGetSyncValuesCHROMIUM)
|
||||
EGL_ENTRYPOINT(eglImageFlushExternalEXT)
|
||||
EGL_ENTRYPOINT(eglImageInvalidateExternalEXT)
|
||||
EGL_ENTRYPOINT(eglInitialize)
|
||||
EGL_ENTRYPOINT(eglLabelObjectKHR)
|
||||
EGL_ENTRYPOINT(eglMakeCurrent)
|
||||
|
@@ -265,25 +265,6 @@ _eglParseEXTImageDmaBufImportModifiersAttribs(_EGLImageAttribs *attrs,
|
||||
return EGL_SUCCESS;
|
||||
}
|
||||
|
||||
static EGLint
|
||||
_eglParseEXTImageFlushExternalAttribs(_EGLImageAttribs *attrs,
|
||||
_EGLDisplay *disp,
|
||||
EGLint attr, EGLint val)
|
||||
{
|
||||
if (!disp->Extensions.EXT_image_flush_external)
|
||||
return EGL_BAD_PARAMETER;
|
||||
|
||||
switch (attr) {
|
||||
case EGL_IMAGE_EXTERNAL_FLUSH_EXT:
|
||||
attrs->ImageFlushExternal = val;
|
||||
break;
|
||||
default:
|
||||
return EGL_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
return EGL_SUCCESS;
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the list of image attributes.
|
||||
*
|
||||
@@ -305,10 +286,6 @@ _eglParseImageAttribList(_EGLImageAttribs *attrs, _EGLDisplay *disp,
|
||||
EGLint attr = attrib_list[i++];
|
||||
EGLint val = attrib_list[i];
|
||||
|
||||
err = _eglParseEXTImageFlushExternalAttribs(attrs, disp, attr, val);
|
||||
if (err == EGL_SUCCESS)
|
||||
continue;
|
||||
|
||||
err = _eglParseKHRImageAttribs(attrs, disp, attr, val);
|
||||
if (err == EGL_SUCCESS)
|
||||
continue;
|
||||
|
@@ -50,9 +50,6 @@ struct _egl_image_attrib_int
|
||||
|
||||
struct _egl_image_attribs
|
||||
{
|
||||
/* EGL_EXT_image_flush_external */
|
||||
EGLBoolean ImageFlushExternal;
|
||||
|
||||
/* EGL_KHR_image_base */
|
||||
EGLBoolean ImagePreserved;
|
||||
|
||||
|
@@ -33,6 +33,8 @@
|
||||
|
||||
#include <EGL/egl.h>
|
||||
#include <EGL/eglext.h>
|
||||
#include <EGL/eglmesaext.h>
|
||||
#include <EGL/eglextchromium.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@@ -96,12 +96,6 @@ ir3_context_init(struct ir3_compiler *compiler,
|
||||
NIR_PASS_V(ctx->s, nir_opt_constant_folding);
|
||||
}
|
||||
|
||||
/* Enable the texture pre-fetch feature only a4xx onwards. But
|
||||
* only enable it on generations that have been tested:
|
||||
*/
|
||||
if ((so->type == MESA_SHADER_FRAGMENT) && (compiler->gpu_id >= 600))
|
||||
NIR_PASS_V(ctx->s, ir3_nir_lower_tex_prefetch);
|
||||
|
||||
NIR_PASS_V(ctx->s, nir_convert_from_ssa, true);
|
||||
|
||||
if (shader_debug_enabled(so->type)) {
|
||||
|
@@ -99,13 +99,8 @@ coord_offset(nir_ssa_def *ssa)
|
||||
int
|
||||
ir3_nir_coord_offset(nir_ssa_def *ssa)
|
||||
{
|
||||
/* only prefetch for simple 2d tex fetch case. Note this check only
|
||||
* applies to the tex coord src itself, and not in the case where
|
||||
* we recursively chase a vecN's src.
|
||||
*/
|
||||
if (ssa->num_components != 2)
|
||||
return -1;
|
||||
|
||||
assert (ssa->num_components == 2);
|
||||
return coord_offset(ssa);
|
||||
}
|
||||
|
||||
@@ -140,6 +135,10 @@ lower_tex_prefetch_block(nir_block *block)
|
||||
has_src(tex, nir_tex_src_sampler_offset))
|
||||
continue;
|
||||
|
||||
/* only prefetch for simple 2d tex fetch case */
|
||||
if (tex->sampler_dim != GLSL_SAMPLER_DIM_2D || tex->is_array)
|
||||
continue;
|
||||
|
||||
int idx = nir_tex_instr_src_index(tex, nir_tex_src_coord);
|
||||
/* First source should be the sampling coordinate. */
|
||||
nir_tex_src *coord = &tex->src[idx];
|
||||
|
@@ -702,6 +702,15 @@ ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
|
||||
|
||||
block->data = bd;
|
||||
|
||||
struct ir3_instruction *first_non_input = NULL;
|
||||
list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
|
||||
if (instr->opc != OPC_META_INPUT) {
|
||||
first_non_input = instr;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
list_for_each_entry (struct ir3_instruction, instr, &block->instr_list, node) {
|
||||
struct ir3_instruction *src;
|
||||
struct ir3_register *reg;
|
||||
@@ -771,6 +780,9 @@ ra_block_compute_live_ranges(struct ir3_ra_ctx *ctx, struct ir3_block *block)
|
||||
|
||||
def(name, id->defn);
|
||||
|
||||
if (instr->opc == OPC_META_INPUT)
|
||||
use(name, first_non_input);
|
||||
|
||||
if (is_high(id->defn)) {
|
||||
ra_set_node_class(ctx->g, name,
|
||||
ctx->set->high_classes[id->cls - HIGH_OFFSET]);
|
||||
|
@@ -783,18 +783,28 @@ sched_block(struct ir3_sched_ctx *ctx, struct ir3_block *block)
|
||||
list_inithead(&block->instr_list);
|
||||
list_inithead(&ctx->depth_list);
|
||||
|
||||
/* first a pre-pass to schedule all meta:input instructions
|
||||
* (which need to appear first so that RA knows the register is
|
||||
* occupied), and move remaining to depth sorted list:
|
||||
/* First schedule all meta:input instructions, followed by
|
||||
* tex-prefetch. We want all of the instructions that load
|
||||
* values into registers before the shader starts to go
|
||||
* before any other instructions. But in particular we
|
||||
* want inputs to come before prefetches. This is because
|
||||
* a FS's bary_ij input may not actually be live in the
|
||||
* shader, but it should not be scheduled on top of any
|
||||
* other input (but can be overwritten by a tex prefetch)
|
||||
*
|
||||
* Finally, move all the remaining instructions to the depth-
|
||||
* list
|
||||
*/
|
||||
list_for_each_entry_safe (struct ir3_instruction, instr, &unscheduled_list, node) {
|
||||
if ((instr->opc == OPC_META_INPUT) ||
|
||||
(instr->opc == OPC_META_TEX_PREFETCH)) {
|
||||
list_for_each_entry_safe (struct ir3_instruction, instr, &unscheduled_list, node)
|
||||
if (instr->opc == OPC_META_INPUT)
|
||||
schedule(ctx, instr);
|
||||
} else {
|
||||
ir3_insert_by_depth(instr, &ctx->depth_list);
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry_safe (struct ir3_instruction, instr, &unscheduled_list, node)
|
||||
if (instr->opc == OPC_META_TEX_PREFETCH)
|
||||
schedule(ctx, instr);
|
||||
|
||||
list_for_each_entry_safe (struct ir3_instruction, instr, &unscheduled_list, node)
|
||||
ir3_insert_by_depth(instr, &ctx->depth_list);
|
||||
|
||||
while (!list_is_empty(&ctx->depth_list)) {
|
||||
struct ir3_sched_notes notes = {0};
|
||||
|
@@ -2006,8 +2006,7 @@ to upconvert to 32b float internally?
|
||||
</enum>
|
||||
|
||||
<bitset name="a6xx_2d_blit_cntl" inline="yes">
|
||||
<bitfield name="ROTATE" low="0" high="1" type="a6xx_rotation"/>
|
||||
<bitfield name="HORIZONTAL_FLIP" low="2" high="2" type="boolean"/>
|
||||
<bitfield name="ROTATE" low="0" high="2" type="a6xx_rotation"/>
|
||||
<bitfield name="SOLID_COLOR" pos="7" type="boolean"/>
|
||||
<bitfield name="COLOR_FORMAT" low="8" high="15" type="a6xx_color_fmt"/>
|
||||
<bitfield name="SCISSOR" pos="16" type="boolean"/>
|
||||
|
@@ -456,7 +456,20 @@ lp_build_create_jit_compiler_for_module(LLVMExecutionEngineRef *OutJIT,
|
||||
* when not using MCJIT so no instructions are generated which the old JIT
|
||||
* can't handle. Not entirely sure if we really need to do anything yet.
|
||||
*/
|
||||
#if defined(PIPE_ARCH_LITTLE_ENDIAN) && defined(PIPE_ARCH_PPC_64)
|
||||
|
||||
#ifdef PIPE_ARCH_PPC_64
|
||||
/*
|
||||
* Large programs, e.g. gnome-shell and firefox, may tax the addressability
|
||||
* of the Medium code model once dynamically generated JIT-compiled shader
|
||||
* programs are linked in and relocated. Yet the default code model as of
|
||||
* LLVM 8 is Medium or even Small.
|
||||
* The cost of changing from Medium to Large is negligible:
|
||||
* - an additional 8-byte pointer stored immediately before the shader entrypoint;
|
||||
* - change an add-immediate (addis) instruction to a load (ld).
|
||||
*/
|
||||
builder.setCodeModel(CodeModel::Large);
|
||||
|
||||
#if PIPE_ARCH_LITTLE_ENDIAN
|
||||
/*
|
||||
* Versions of LLVM prior to 4.0 lacked a table entry for "POWER8NVL",
|
||||
* resulting in (big-endian) "generic" being returned on
|
||||
@@ -468,6 +481,7 @@ lp_build_create_jit_compiler_for_module(LLVMExecutionEngineRef *OutJIT,
|
||||
*/
|
||||
if (MCPU == "generic")
|
||||
MCPU = "pwr8";
|
||||
#endif
|
||||
#endif
|
||||
builder.setMCPU(MCPU);
|
||||
if (gallivm_debug & (GALLIVM_DEBUG_IR | GALLIVM_DEBUG_ASM | GALLIVM_DEBUG_DUMP_BC)) {
|
||||
|
@@ -20,6 +20,7 @@ DRI_CONF_SECTION_DEBUG
|
||||
DRI_CONF_FORCE_GLSL_EXTENSIONS_WARN("false")
|
||||
DRI_CONF_DISABLE_GLSL_LINE_CONTINUATIONS("false")
|
||||
DRI_CONF_DISABLE_BLEND_FUNC_EXTENDED("false")
|
||||
DRI_CONF_DISABLE_ARB_GPU_SHADER5("false")
|
||||
DRI_CONF_FORCE_GLSL_VERSION(0)
|
||||
DRI_CONF_ALLOW_GLSL_EXTENSION_DIRECTIVE_MIDSHADER("false")
|
||||
DRI_CONF_ALLOW_GLSL_BUILTIN_CONST_EXPRESSION("false")
|
||||
|
@@ -219,6 +219,7 @@ struct st_config_options
|
||||
{
|
||||
bool disable_blend_func_extended;
|
||||
bool disable_glsl_line_continuations;
|
||||
bool disable_arb_gpu_shader5;
|
||||
bool force_glsl_extensions_warn;
|
||||
unsigned force_glsl_version;
|
||||
bool allow_glsl_extension_directive_midshader;
|
||||
|
@@ -727,8 +727,7 @@ dri2_update_tex_buffer(struct dri_drawable *drawable,
|
||||
|
||||
static __DRIimage *
|
||||
dri2_create_image_from_winsys(__DRIscreen *_screen,
|
||||
int width, int height, unsigned use,
|
||||
const struct dri2_format_mapping *map,
|
||||
int width, int height, const struct dri2_format_mapping *map,
|
||||
int num_handles, struct winsys_handle *whandle,
|
||||
void *loaderPrivate)
|
||||
{
|
||||
@@ -793,10 +792,7 @@ dri2_create_image_from_winsys(__DRIscreen *_screen,
|
||||
assert(templ.format != PIPE_FORMAT_NONE);
|
||||
|
||||
tex = pscreen->resource_from_handle(pscreen,
|
||||
&templ, &whandle[i],
|
||||
PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE |
|
||||
(use & __DRI_IMAGE_USE_FLUSH_EXTERNAL ?
|
||||
PIPE_HANDLE_USAGE_EXPLICIT_FLUSH : 0));
|
||||
&templ, &whandle[i], PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE);
|
||||
if (!tex) {
|
||||
pipe_resource_reference(&img->texture, NULL);
|
||||
FREE(img);
|
||||
@@ -809,17 +805,16 @@ dri2_create_image_from_winsys(__DRIscreen *_screen,
|
||||
|
||||
img->level = 0;
|
||||
img->layer = 0;
|
||||
img->use = use;
|
||||
img->use = 0;
|
||||
img->loader_private = loaderPrivate;
|
||||
|
||||
return img;
|
||||
}
|
||||
|
||||
static __DRIimage *
|
||||
dri2_create_image_from_name2(__DRIscreen *_screen,
|
||||
dri2_create_image_from_name(__DRIscreen *_screen,
|
||||
int width, int height, int format,
|
||||
int name, int pitch, unsigned use,
|
||||
void *loaderPrivate)
|
||||
int name, int pitch, void *loaderPrivate)
|
||||
{
|
||||
const struct dri2_format_mapping *map = dri2_get_mapping_by_format(format);
|
||||
struct winsys_handle whandle;
|
||||
@@ -835,7 +830,7 @@ dri2_create_image_from_name2(__DRIscreen *_screen,
|
||||
|
||||
whandle.stride = pitch * util_format_get_blocksize(map->pipe_format);
|
||||
|
||||
img = dri2_create_image_from_winsys(_screen, width, height, use, map,
|
||||
img = dri2_create_image_from_winsys(_screen, width, height, map,
|
||||
1, &whandle, loaderPrivate);
|
||||
|
||||
if (!img)
|
||||
@@ -848,15 +843,6 @@ dri2_create_image_from_name2(__DRIscreen *_screen,
|
||||
return img;
|
||||
}
|
||||
|
||||
static __DRIimage *
|
||||
dri2_create_image_from_name(__DRIscreen *_screen,
|
||||
int width, int height, int format,
|
||||
int name, int pitch, void *loaderPrivate)
|
||||
{
|
||||
return dri2_create_image_from_name2(_screen, width, height, format, name,
|
||||
pitch, 0, loaderPrivate);
|
||||
}
|
||||
|
||||
static unsigned
|
||||
dri2_get_modifier_num_planes(uint64_t modifier)
|
||||
{
|
||||
@@ -891,8 +877,7 @@ dri2_get_modifier_num_planes(uint64_t modifier)
|
||||
static __DRIimage *
|
||||
dri2_create_image_from_fd(__DRIscreen *_screen,
|
||||
int width, int height, int fourcc,
|
||||
uint64_t modifier, unsigned use,
|
||||
int *fds, int num_fds,
|
||||
uint64_t modifier, int *fds, int num_fds,
|
||||
int *strides, int *offsets, unsigned *error,
|
||||
void *loaderPrivate)
|
||||
{
|
||||
@@ -943,7 +928,7 @@ dri2_create_image_from_fd(__DRIscreen *_screen,
|
||||
whandles[i].plane = index;
|
||||
}
|
||||
|
||||
img = dri2_create_image_from_winsys(_screen, width, height, use, map,
|
||||
img = dri2_create_image_from_winsys(_screen, width, height, map,
|
||||
num_handles, whandles, loaderPrivate);
|
||||
if(img == NULL) {
|
||||
err = __DRI_IMAGE_ERROR_BAD_ALLOC;
|
||||
@@ -1123,11 +1108,10 @@ dri2_query_image_by_resource_handle(__DRIimage *image, int attrib, int *value)
|
||||
return false;
|
||||
}
|
||||
|
||||
usage = PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE;
|
||||
|
||||
if (image->use & (__DRI_IMAGE_USE_BACKBUFFER |
|
||||
__DRI_IMAGE_USE_FLUSH_EXTERNAL))
|
||||
usage |= PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
|
||||
if (image->use & __DRI_IMAGE_USE_BACKBUFFER)
|
||||
usage = PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
|
||||
else
|
||||
usage = PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE;
|
||||
|
||||
if (!pscreen->resource_get_handle(pscreen, NULL, image->texture,
|
||||
&whandle, usage))
|
||||
@@ -1210,11 +1194,10 @@ dri2_query_image_by_resource_param(__DRIimage *image, int attrib, int *value)
|
||||
return false;
|
||||
}
|
||||
|
||||
handle_usage = PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE;
|
||||
|
||||
if (image->use & (__DRI_IMAGE_USE_BACKBUFFER |
|
||||
__DRI_IMAGE_USE_FLUSH_EXTERNAL))
|
||||
handle_usage |= PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
|
||||
if (image->use & __DRI_IMAGE_USE_BACKBUFFER)
|
||||
handle_usage = PIPE_HANDLE_USAGE_EXPLICIT_FLUSH;
|
||||
else
|
||||
handle_usage = PIPE_HANDLE_USAGE_FRAMEBUFFER_WRITE;
|
||||
|
||||
if (!dri2_resource_get_param(image, param, handle_usage, &res_param))
|
||||
return false;
|
||||
@@ -1333,7 +1316,7 @@ dri2_from_names(__DRIscreen *screen, int width, int height, int format,
|
||||
whandle.offset = offsets[0];
|
||||
whandle.modifier = DRM_FORMAT_MOD_INVALID;
|
||||
|
||||
img = dri2_create_image_from_winsys(screen, width, height, 0, map,
|
||||
img = dri2_create_image_from_winsys(screen, width, height, map,
|
||||
1, &whandle, loaderPrivate);
|
||||
if (img == NULL)
|
||||
return NULL;
|
||||
@@ -1390,7 +1373,7 @@ dri2_from_fds(__DRIscreen *screen, int width, int height, int fourcc,
|
||||
void *loaderPrivate)
|
||||
{
|
||||
return dri2_create_image_from_fd(screen, width, height, fourcc,
|
||||
DRM_FORMAT_MOD_INVALID, 0, fds, num_fds,
|
||||
DRM_FORMAT_MOD_INVALID, fds, num_fds,
|
||||
strides, offsets, NULL, loaderPrivate);
|
||||
}
|
||||
|
||||
@@ -1438,36 +1421,6 @@ dri2_query_dma_buf_format_modifier_attribs(__DRIscreen *_screen,
|
||||
}
|
||||
}
|
||||
|
||||
static __DRIimage *
|
||||
dri2_from_dma_bufs3(__DRIscreen *screen,
|
||||
int width, int height, int fourcc,
|
||||
uint64_t modifier, unsigned use,
|
||||
int *fds, int num_fds,
|
||||
int *strides, int *offsets,
|
||||
enum __DRIYUVColorSpace yuv_color_space,
|
||||
enum __DRISampleRange sample_range,
|
||||
enum __DRIChromaSiting horizontal_siting,
|
||||
enum __DRIChromaSiting vertical_siting,
|
||||
unsigned *error,
|
||||
void *loaderPrivate)
|
||||
{
|
||||
__DRIimage *img;
|
||||
|
||||
img = dri2_create_image_from_fd(screen, width, height, fourcc,
|
||||
modifier, use, fds, num_fds, strides, offsets,
|
||||
error, loaderPrivate);
|
||||
if (img == NULL)
|
||||
return NULL;
|
||||
|
||||
img->yuv_color_space = yuv_color_space;
|
||||
img->sample_range = sample_range;
|
||||
img->horizontal_siting = horizontal_siting;
|
||||
img->vertical_siting = vertical_siting;
|
||||
|
||||
*error = __DRI_IMAGE_ERROR_SUCCESS;
|
||||
return img;
|
||||
}
|
||||
|
||||
static __DRIimage *
|
||||
dri2_from_dma_bufs(__DRIscreen *screen,
|
||||
int width, int height, int fourcc,
|
||||
@@ -1480,12 +1433,21 @@ dri2_from_dma_bufs(__DRIscreen *screen,
|
||||
unsigned *error,
|
||||
void *loaderPrivate)
|
||||
{
|
||||
return dri2_from_dma_bufs3(screen, width, height, fourcc,
|
||||
DRM_FORMAT_MOD_INVALID, 0,
|
||||
fds, num_fds, strides, offsets,
|
||||
yuv_color_space, sample_range,
|
||||
horizontal_siting, vertical_siting, error,
|
||||
loaderPrivate);
|
||||
__DRIimage *img;
|
||||
|
||||
img = dri2_create_image_from_fd(screen, width, height, fourcc,
|
||||
DRM_FORMAT_MOD_INVALID, fds, num_fds,
|
||||
strides, offsets, error, loaderPrivate);
|
||||
if (img == NULL)
|
||||
return NULL;
|
||||
|
||||
img->yuv_color_space = yuv_color_space;
|
||||
img->sample_range = sample_range;
|
||||
img->horizontal_siting = horizontal_siting;
|
||||
img->vertical_siting = vertical_siting;
|
||||
|
||||
*error = __DRI_IMAGE_ERROR_SUCCESS;
|
||||
return img;
|
||||
}
|
||||
|
||||
static __DRIimage *
|
||||
@@ -1500,11 +1462,21 @@ dri2_from_dma_bufs2(__DRIscreen *screen,
|
||||
unsigned *error,
|
||||
void *loaderPrivate)
|
||||
{
|
||||
return dri2_from_dma_bufs3(screen, width, height, fourcc, modifier, 0,
|
||||
fds, num_fds, strides, offsets,
|
||||
yuv_color_space, sample_range,
|
||||
horizontal_siting, vertical_siting, error,
|
||||
loaderPrivate);
|
||||
__DRIimage *img;
|
||||
|
||||
img = dri2_create_image_from_fd(screen, width, height, fourcc,
|
||||
modifier, fds, num_fds, strides, offsets,
|
||||
error, loaderPrivate);
|
||||
if (img == NULL)
|
||||
return NULL;
|
||||
|
||||
img->yuv_color_space = yuv_color_space;
|
||||
img->sample_range = sample_range;
|
||||
img->horizontal_siting = horizontal_siting;
|
||||
img->vertical_siting = vertical_siting;
|
||||
|
||||
*error = __DRI_IMAGE_ERROR_SUCCESS;
|
||||
return img;
|
||||
}
|
||||
|
||||
static void
|
||||
@@ -1601,26 +1573,9 @@ dri2_get_capabilities(__DRIscreen *_screen)
|
||||
return (screen->can_share_buffer ? __DRI_IMAGE_CAP_GLOBAL_NAMES : 0);
|
||||
}
|
||||
|
||||
static void
|
||||
dri2_image_flush_external(__DRIcontext *context, __DRIimage *image,
|
||||
unsigned flags)
|
||||
{
|
||||
struct dri_context *ctx = dri_context(context);
|
||||
struct pipe_context *pipe = ctx->st->pipe;
|
||||
|
||||
pipe->flush_resource(pipe, image->texture);
|
||||
}
|
||||
|
||||
static void
|
||||
dri2_image_invalidate_external(__DRIcontext *context, __DRIimage *image,
|
||||
unsigned flags)
|
||||
{
|
||||
/* nothing to do */
|
||||
}
|
||||
|
||||
/* The extension is modified during runtime if DRI_PRIME is detected */
|
||||
static __DRIimageExtension dri2ImageExtension = {
|
||||
.base = { __DRI_IMAGE, 18 },
|
||||
.base = { __DRI_IMAGE, 17 },
|
||||
|
||||
.createImageFromName = dri2_create_image_from_name,
|
||||
.createImageFromRenderbuffer = dri2_create_image_from_renderbuffer,
|
||||
@@ -1644,10 +1599,6 @@ static __DRIimageExtension dri2ImageExtension = {
|
||||
.queryDmaBufModifiers = NULL,
|
||||
.queryDmaBufFormatModifierAttribs = NULL,
|
||||
.createImageFromRenderbuffer2 = dri2_create_image_from_renderbuffer2,
|
||||
.imageFlushExternal = dri2_image_flush_external,
|
||||
.imageInvalidateExternal = dri2_image_invalidate_external,
|
||||
.createImageFromName2 = dri2_create_image_from_name2,
|
||||
.createImageFromDmaBufs3 = NULL,
|
||||
};
|
||||
|
||||
static const __DRIrobustnessExtension dri2Robustness = {
|
||||
@@ -2120,7 +2071,6 @@ dri2_init_screen(__DRIscreen * sPriv)
|
||||
dri2ImageExtension.createImageFromFds = dri2_from_fds;
|
||||
dri2ImageExtension.createImageFromDmaBufs = dri2_from_dma_bufs;
|
||||
dri2ImageExtension.createImageFromDmaBufs2 = dri2_from_dma_bufs2;
|
||||
dri2ImageExtension.createImageFromDmaBufs3 = dri2_from_dma_bufs3;
|
||||
if (pscreen->query_dmabuf_modifiers) {
|
||||
dri2ImageExtension.queryDmaBufFormats = dri2_query_dma_buf_formats;
|
||||
dri2ImageExtension.queryDmaBufModifiers =
|
||||
@@ -2203,7 +2153,6 @@ dri_kms_init_screen(__DRIscreen * sPriv)
|
||||
dri2ImageExtension.createImageFromFds = dri2_from_fds;
|
||||
dri2ImageExtension.createImageFromDmaBufs = dri2_from_dma_bufs;
|
||||
dri2ImageExtension.createImageFromDmaBufs2 = dri2_from_dma_bufs2;
|
||||
dri2ImageExtension.createImageFromDmaBufs3 = dri2_from_dma_bufs3;
|
||||
if (pscreen->query_dmabuf_modifiers) {
|
||||
dri2ImageExtension.queryDmaBufFormats = dri2_query_dma_buf_formats;
|
||||
dri2ImageExtension.queryDmaBufModifiers = dri2_query_dma_buf_modifiers;
|
||||
|
@@ -65,6 +65,8 @@ dri_fill_st_options(struct dri_screen *screen)
|
||||
|
||||
options->disable_blend_func_extended =
|
||||
driQueryOptionb(optionCache, "disable_blend_func_extended");
|
||||
options->disable_arb_gpu_shader5 =
|
||||
driQueryOptionb(optionCache, "disable_arb_gpu_shader5");
|
||||
options->disable_glsl_line_continuations =
|
||||
driQueryOptionb(optionCache, "disable_glsl_line_continuations");
|
||||
options->force_glsl_extensions_warn =
|
||||
|
@@ -92,7 +92,8 @@ alloc_shm(struct dri_sw_displaytarget *dri_sw_dt, unsigned size)
|
||||
{
|
||||
char *addr;
|
||||
|
||||
dri_sw_dt->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT|0777);
|
||||
/* 0600 = user read+write */
|
||||
dri_sw_dt->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | 0600);
|
||||
if (dri_sw_dt->shmid < 0)
|
||||
return NULL;
|
||||
|
||||
|
@@ -126,7 +126,8 @@ alloc_shm(struct xlib_displaytarget *buf, unsigned size)
|
||||
shminfo->shmid = -1;
|
||||
shminfo->shmaddr = (char *) -1;
|
||||
|
||||
shminfo->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT|0777);
|
||||
/* 0600 = user read+write */
|
||||
shminfo->shmid = shmget(IPC_PRIVATE, size, IPC_CREAT | 0600);
|
||||
if (shminfo->shmid < 0) {
|
||||
return NULL;
|
||||
}
|
||||
|
@@ -1400,7 +1400,7 @@ fs_visitor::nir_emit_alu(const fs_builder &bld, nir_alu_instr *instr,
|
||||
temp_op[0] = bld.fix_byte_src(op[0]);
|
||||
temp_op[1] = bld.fix_byte_src(op[1]);
|
||||
|
||||
const uint32_t bit_size = nir_src_bit_size(instr->src[0].src);
|
||||
const uint32_t bit_size = type_sz(temp_op[0].type) * 8;
|
||||
if (bit_size != 32)
|
||||
dest = bld.vgrf(temp_op[0].type, 1);
|
||||
|
||||
|
@@ -482,8 +482,10 @@ anv_batch_bo_list_clone(const struct list_head *list,
|
||||
}
|
||||
|
||||
if (result != VK_SUCCESS) {
|
||||
list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link)
|
||||
list_for_each_entry_safe(struct anv_batch_bo, bbo, new_list, link) {
|
||||
list_del(&bbo->link);
|
||||
anv_batch_bo_destroy(bbo, cmd_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
@@ -808,6 +810,7 @@ anv_cmd_buffer_fini_batch_bo_chain(struct anv_cmd_buffer *cmd_buffer)
|
||||
/* Destroy all of the batch buffers */
|
||||
list_for_each_entry_safe(struct anv_batch_bo, bbo,
|
||||
&cmd_buffer->batch_bos, link) {
|
||||
list_del(&bbo->link);
|
||||
anv_batch_bo_destroy(bbo, cmd_buffer);
|
||||
}
|
||||
}
|
||||
@@ -1624,6 +1627,9 @@ anv_cmd_buffer_execbuf(struct anv_device *device,
|
||||
assert(!pdevice->has_syncobj);
|
||||
if (in_fence == -1) {
|
||||
in_fence = impl->fd;
|
||||
if (in_fence == -1)
|
||||
return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
|
||||
impl->fd = -1;
|
||||
} else {
|
||||
int merge = anv_gem_sync_file_merge(device, in_fence, impl->fd);
|
||||
if (merge == -1)
|
||||
@@ -1631,10 +1637,9 @@ anv_cmd_buffer_execbuf(struct anv_device *device,
|
||||
|
||||
close(impl->fd);
|
||||
close(in_fence);
|
||||
impl->fd = -1;
|
||||
in_fence = merge;
|
||||
}
|
||||
|
||||
impl->fd = -1;
|
||||
break;
|
||||
|
||||
case ANV_SEMAPHORE_TYPE_DRM_SYNCOBJ:
|
||||
|
@@ -247,12 +247,28 @@ VkResult anv_AcquireNextImage2KHR(
|
||||
pAcquireInfo,
|
||||
pImageIndex);
|
||||
|
||||
/* Thanks to implicit sync, the image is ready immediately. However, we
|
||||
* should wait for the current GPU state to finish.
|
||||
/* Thanks to implicit sync, the image is ready immediately. However, we
|
||||
* should wait for the current GPU state to finish. Regardless of the
|
||||
* result of the presentation, we need to signal the semaphore & fence.
|
||||
*/
|
||||
|
||||
if (pAcquireInfo->semaphore != VK_NULL_HANDLE) {
|
||||
/* Put a dummy semaphore in temporary, this is the fastest way to avoid
|
||||
* any kind of work yet still provide some kind of synchronization. This
|
||||
* only works because the Mesa WSI code always returns an image
|
||||
* immediately if available.
|
||||
*/
|
||||
ANV_FROM_HANDLE(anv_semaphore, semaphore, pAcquireInfo->semaphore);
|
||||
anv_semaphore_reset_temporary(device, semaphore);
|
||||
|
||||
struct anv_semaphore_impl *impl = &semaphore->temporary;
|
||||
|
||||
impl->type = ANV_SEMAPHORE_TYPE_DUMMY;
|
||||
}
|
||||
|
||||
if (pAcquireInfo->fence != VK_NULL_HANDLE) {
|
||||
anv_QueueSubmit(anv_queue_to_handle(&device->queue), 0, NULL,
|
||||
pAcquireInfo->fence);
|
||||
result = anv_QueueSubmit(anv_queue_to_handle(&device->queue),
|
||||
0, NULL, pAcquireInfo->fence);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@@ -2602,20 +2602,12 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer,
|
||||
const struct anv_pipeline_binding *binding =
|
||||
&bind_map->surface_to_descriptor[surface];
|
||||
|
||||
struct anv_address read_addr;
|
||||
uint32_t read_len;
|
||||
struct anv_address addr;
|
||||
if (binding->set == ANV_DESCRIPTOR_SET_SHADER_CONSTANTS) {
|
||||
struct anv_address constant_data = {
|
||||
addr = (struct anv_address) {
|
||||
.bo = pipeline->device->dynamic_state_pool.block_pool.bo,
|
||||
.offset = pipeline->shaders[stage]->constant_data.offset,
|
||||
};
|
||||
unsigned constant_data_size =
|
||||
pipeline->shaders[stage]->constant_data_size;
|
||||
|
||||
read_len = MIN2(range->length,
|
||||
DIV_ROUND_UP(constant_data_size, 32) - range->start);
|
||||
read_addr = anv_address_add(constant_data,
|
||||
range->start * 32);
|
||||
} else if (binding->set == ANV_DESCRIPTOR_SET_DESCRIPTORS) {
|
||||
/* This is a descriptor set buffer so the set index is
|
||||
* actually given by binding->binding. (Yes, that's
|
||||
@@ -2623,45 +2615,27 @@ cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer,
|
||||
*/
|
||||
struct anv_descriptor_set *set =
|
||||
gfx_state->base.descriptors[binding->binding];
|
||||
struct anv_address desc_buffer_addr =
|
||||
anv_descriptor_set_address(cmd_buffer, set);
|
||||
const unsigned desc_buffer_size = set->desc_mem.alloc_size;
|
||||
|
||||
read_len = MIN2(range->length,
|
||||
DIV_ROUND_UP(desc_buffer_size, 32) - range->start);
|
||||
read_addr = anv_address_add(desc_buffer_addr,
|
||||
range->start * 32);
|
||||
addr = anv_descriptor_set_address(cmd_buffer, set);
|
||||
} else {
|
||||
const struct anv_descriptor *desc =
|
||||
anv_descriptor_for_binding(&gfx_state->base, binding);
|
||||
|
||||
if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
|
||||
read_len = MIN2(range->length,
|
||||
DIV_ROUND_UP(desc->buffer_view->range, 32) - range->start);
|
||||
read_addr = anv_address_add(desc->buffer_view->address,
|
||||
range->start * 32);
|
||||
addr = desc->buffer_view->address;
|
||||
} else {
|
||||
assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
|
||||
|
||||
uint32_t dynamic_offset =
|
||||
dynamic_offset_for_binding(&gfx_state->base, binding);
|
||||
uint32_t buf_offset =
|
||||
MIN2(desc->offset + dynamic_offset, desc->buffer->size);
|
||||
uint32_t buf_range =
|
||||
MIN2(desc->range, desc->buffer->size - buf_offset);
|
||||
|
||||
read_len = MIN2(range->length,
|
||||
DIV_ROUND_UP(buf_range, 32) - range->start);
|
||||
read_addr = anv_address_add(desc->buffer->address,
|
||||
buf_offset + range->start * 32);
|
||||
addr = anv_address_add(desc->buffer->address,
|
||||
desc->offset + dynamic_offset);
|
||||
}
|
||||
}
|
||||
|
||||
if (read_len > 0) {
|
||||
c.ConstantBody.Buffer[n] = read_addr;
|
||||
c.ConstantBody.ReadLength[n] = read_len;
|
||||
n--;
|
||||
}
|
||||
c.ConstantBody.Buffer[n] =
|
||||
anv_address_add(addr, range->start * 32);
|
||||
c.ConstantBody.ReadLength[n] = range->length;
|
||||
n--;
|
||||
}
|
||||
|
||||
struct anv_state state =
|
||||
|
@@ -1005,6 +1005,7 @@ emit_ds_state(struct anv_pipeline *pipeline,
|
||||
pipeline->stencil_test_enable = false;
|
||||
pipeline->writes_depth = false;
|
||||
pipeline->depth_test_enable = false;
|
||||
pipeline->depth_bounds_test_enable = false;
|
||||
memset(depth_stencil_dw, 0, sizeof(depth_stencil_dw));
|
||||
return;
|
||||
}
|
||||
@@ -1023,8 +1024,6 @@ emit_ds_state(struct anv_pipeline *pipeline,
|
||||
pipeline->depth_test_enable = info.depthTestEnable;
|
||||
pipeline->depth_bounds_test_enable = info.depthBoundsTestEnable;
|
||||
|
||||
/* VkBool32 depthBoundsTestEnable; // optional (depth_bounds_test) */
|
||||
|
||||
#if GEN_GEN <= 7
|
||||
struct GENX(DEPTH_STENCIL_STATE) depth_stencil = {
|
||||
#else
|
||||
|
@@ -3051,7 +3051,7 @@ genX(upload_blend_state)(struct brw_context *brw)
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct brw_tracked_state genX(blend_state) = {
|
||||
UNUSED static const struct brw_tracked_state genX(blend_state) = {
|
||||
.dirty = {
|
||||
.mesa = _NEW_BUFFERS |
|
||||
_NEW_COLOR |
|
||||
@@ -3412,7 +3412,7 @@ genX(upload_color_calc_state)(struct brw_context *brw)
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct brw_tracked_state genX(color_calc_state) = {
|
||||
UNUSED static const struct brw_tracked_state genX(color_calc_state) = {
|
||||
.dirty = {
|
||||
.mesa = _NEW_COLOR |
|
||||
_NEW_STENCIL |
|
||||
@@ -3430,6 +3430,35 @@ static const struct brw_tracked_state genX(color_calc_state) = {
|
||||
};
|
||||
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
#if GEN_IS_HASWELL
|
||||
static void
|
||||
genX(upload_color_calc_and_blend_state)(struct brw_context *brw)
|
||||
{
|
||||
genX(upload_blend_state)(brw);
|
||||
genX(upload_color_calc_state)(brw);
|
||||
}
|
||||
|
||||
/* On Haswell when BLEND_STATE is emitted CC_STATE should also be re-emitted,
|
||||
* this workarounds the flickering shadows in several games.
|
||||
*/
|
||||
static const struct brw_tracked_state genX(cc_and_blend_state) = {
|
||||
.dirty = {
|
||||
.mesa = _NEW_BUFFERS |
|
||||
_NEW_COLOR |
|
||||
_NEW_STENCIL |
|
||||
_NEW_MULTISAMPLE,
|
||||
.brw = BRW_NEW_BATCH |
|
||||
BRW_NEW_BLORP |
|
||||
BRW_NEW_CC_STATE |
|
||||
BRW_NEW_FS_PROG_DATA |
|
||||
BRW_NEW_STATE_BASE_ADDRESS,
|
||||
},
|
||||
.emit = genX(upload_color_calc_and_blend_state),
|
||||
};
|
||||
#endif
|
||||
|
||||
/* ---------------------------------------------------------------------- */
|
||||
|
||||
#if GEN_GEN >= 7
|
||||
@@ -5697,8 +5726,12 @@ genX(init_atoms)(struct brw_context *brw)
|
||||
&gen7_l3_state,
|
||||
&gen7_push_constant_space,
|
||||
&gen7_urb,
|
||||
#if GEN_IS_HASWELL
|
||||
&genX(cc_and_blend_state),
|
||||
#else
|
||||
&genX(blend_state), /* must do before cc unit */
|
||||
&genX(color_calc_state), /* must do before cc unit */
|
||||
#endif
|
||||
&genX(depth_stencil_state), /* must do before cc unit */
|
||||
|
||||
&brw_vs_image_surfaces, /* Before vs push/pull constants and binding table */
|
||||
|
@@ -89,8 +89,9 @@ alloc_back_shm_ximage(XMesaBuffer b, GLuint width, GLuint height)
|
||||
return GL_FALSE;
|
||||
}
|
||||
|
||||
/* 0600 = user read+write */
|
||||
b->shminfo.shmid = shmget(IPC_PRIVATE, b->backxrb->ximage->bytes_per_line
|
||||
* b->backxrb->ximage->height, IPC_CREAT|0777);
|
||||
* b->backxrb->ximage->height, IPC_CREAT | 0600);
|
||||
if (b->shminfo.shmid < 0) {
|
||||
_mesa_warning(NULL, "shmget failed while allocating back buffer.\n");
|
||||
XDestroyImage(b->backxrb->ximage);
|
||||
|
@@ -692,12 +692,6 @@ clear_bufferfi(struct gl_context *ctx, GLenum buffer, GLint drawbuffer,
|
||||
drawbuffer);
|
||||
return;
|
||||
}
|
||||
|
||||
if (ctx->DrawBuffer->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
|
||||
_mesa_error(ctx, GL_INVALID_FRAMEBUFFER_OPERATION_EXT,
|
||||
"glClearBufferfi(incomplete framebuffer)");
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx->RasterDiscard)
|
||||
@@ -707,6 +701,12 @@ clear_bufferfi(struct gl_context *ctx, GLenum buffer, GLint drawbuffer,
|
||||
_mesa_update_state( ctx );
|
||||
}
|
||||
|
||||
if (!no_error && ctx->DrawBuffer->_Status != GL_FRAMEBUFFER_COMPLETE_EXT) {
|
||||
_mesa_error(ctx, GL_INVALID_FRAMEBUFFER_OPERATION_EXT,
|
||||
"glClearBufferfi(incomplete framebuffer)");
|
||||
return;
|
||||
}
|
||||
|
||||
if (ctx->DrawBuffer->Attachment[BUFFER_DEPTH].Renderbuffer)
|
||||
mask |= BUFFER_BIT_DEPTH;
|
||||
if (ctx->DrawBuffer->Attachment[BUFFER_STENCIL].Renderbuffer)
|
||||
|
@@ -1095,7 +1095,7 @@ void st_init_extensions(struct pipe_screen *screen,
|
||||
if (api == API_OPENGLES2 && ESSLVersion >= 320)
|
||||
extensions->ARB_gpu_shader5 = GL_TRUE;
|
||||
|
||||
if (GLSLVersion >= 400)
|
||||
if (GLSLVersion >= 400 && !options->disable_arb_gpu_shader5)
|
||||
extensions->ARB_gpu_shader5 = GL_TRUE;
|
||||
if (GLSLVersion >= 410)
|
||||
extensions->ARB_shader_precision = GL_TRUE;
|
||||
|
@@ -56,11 +56,13 @@ TODO: document the other workarounds.
|
||||
<application name="Unigine Sanctuary" executable="Sanctuary">
|
||||
<option name="force_glsl_extensions_warn" value="true" />
|
||||
<option name="disable_blend_func_extended" value="true" />
|
||||
<option name="disable_arb_gpu_shader5" value="true" />
|
||||
</application>
|
||||
|
||||
<application name="Unigine Tropics" executable="Tropics">
|
||||
<option name="force_glsl_extensions_warn" value="true" />
|
||||
<option name="disable_blend_func_extended" value="true" />
|
||||
<option name="disable_arb_gpu_shader5" value="true" />
|
||||
</application>
|
||||
|
||||
<application name="Unigine Heaven (32-bit)" executable="heaven_x86">
|
||||
|
@@ -80,6 +80,11 @@ DRI_CONF_OPT_BEGIN_B(disable_blend_func_extended, def) \
|
||||
DRI_CONF_DESC(en,gettext("Disable dual source blending")) \
|
||||
DRI_CONF_OPT_END
|
||||
|
||||
#define DRI_CONF_DISABLE_ARB_GPU_SHADER5(def) \
|
||||
DRI_CONF_OPT_BEGIN_B(disable_arb_gpu_shader5, def) \
|
||||
DRI_CONF_DESC(en,"Disable GL_ARB_gpu_shader5") \
|
||||
DRI_CONF_OPT_END
|
||||
|
||||
#define DRI_CONF_DUAL_COLOR_BLEND_BY_LOCATION(def) \
|
||||
DRI_CONF_OPT_BEGIN_B(dual_color_blend_by_location, def) \
|
||||
DRI_CONF_DESC(en,gettext("Identify dual color blending sources by location rather than index")) \
|
||||
|
Reference in New Issue
Block a user