summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorAndres Freund2018-11-16 06:00:30 +0000
committerAndres Freund2018-11-16 06:00:30 +0000
commit675af5c01e297262cd20d7416f7e568393c22c6e (patch)
tree3af0a63c0fb58bfe930f1e7fdff3c4cea1ea448d /src
parent1a0586de3657cd35581f0639c87d5050c6197bb7 (diff)
Compute information about EEOP_*_FETCHSOME at expression init time.
Previously this information was computed when JIT compiling an expression. But the information is useful for assertions in the non-JIT case too (for assertions), therefore it makes sense to move it. This will, in a followup commit, allow to treat different slot types differently. E.g. for virtual slots there's no need to generate a JIT function to deform the slot. Author: Andres Freund Discussion: https://postgr.es/m/[email protected]
Diffstat (limited to 'src')
-rw-r--r--src/backend/executor/execExpr.c104
-rw-r--r--src/backend/jit/llvm/llvmjit_expr.c44
-rw-r--r--src/include/executor/execExpr.h5
3 files changed, 124 insertions, 29 deletions
diff --git a/src/backend/executor/execExpr.c b/src/backend/executor/execExpr.c
index cd5ee91cd43..d64dab43b8d 100644
--- a/src/backend/executor/execExpr.c
+++ b/src/backend/executor/execExpr.c
@@ -65,6 +65,7 @@ static void ExecInitFunc(ExprEvalStep *scratch, Expr *node, List *args,
static void ExecInitExprSlots(ExprState *state, Node *node);
static void ExecPushExprSlots(ExprState *state, LastAttnumInfo *info);
static bool get_last_attnums_walker(Node *node, LastAttnumInfo *info);
+static void ExecComputeSlotInfo(ExprState *state, ExprEvalStep *op);
static void ExecInitWholeRowVar(ExprEvalStep *scratch, Var *variable,
ExprState *state);
static void ExecInitArrayRef(ExprEvalStep *scratch, ArrayRef *aref,
@@ -2288,21 +2289,30 @@ ExecPushExprSlots(ExprState *state, LastAttnumInfo *info)
{
scratch.opcode = EEOP_INNER_FETCHSOME;
scratch.d.fetch.last_var = info->last_inner;
+ scratch.d.fetch.fixed = false;
+ scratch.d.fetch.kind = NULL;
scratch.d.fetch.known_desc = NULL;
+ ExecComputeSlotInfo(state, &scratch);
ExprEvalPushStep(state, &scratch);
}
if (info->last_outer > 0)
{
scratch.opcode = EEOP_OUTER_FETCHSOME;
scratch.d.fetch.last_var = info->last_outer;
+ scratch.d.fetch.fixed = false;
+ scratch.d.fetch.kind = NULL;
scratch.d.fetch.known_desc = NULL;
+ ExecComputeSlotInfo(state, &scratch);
ExprEvalPushStep(state, &scratch);
}
if (info->last_scan > 0)
{
scratch.opcode = EEOP_SCAN_FETCHSOME;
scratch.d.fetch.last_var = info->last_scan;
+ scratch.d.fetch.fixed = false;
+ scratch.d.fetch.kind = NULL;
scratch.d.fetch.known_desc = NULL;
+ ExecComputeSlotInfo(state, &scratch);
ExprEvalPushStep(state, &scratch);
}
}
@@ -2356,6 +2366,94 @@ get_last_attnums_walker(Node *node, LastAttnumInfo *info)
}
/*
+ * Compute additional information for EEOP_*_FETCHSOME ops.
+ *
+ * The goal is to determine whether a slot is 'fixed', that is, every
+ * evaluation of the the expression will have the same type of slot, with an
+ * equivalent descriptor.
+ */
+static void
+ExecComputeSlotInfo(ExprState *state, ExprEvalStep *op)
+{
+ PlanState *parent = state->parent;
+ TupleDesc desc = NULL;
+ const TupleTableSlotOps *tts_ops = NULL;
+ bool isfixed = false;
+
+ if (op->d.fetch.known_desc != NULL)
+ {
+ desc = op->d.fetch.known_desc;
+ tts_ops = op->d.fetch.kind;
+ isfixed = op->d.fetch.kind != NULL;
+ }
+ else if (!parent)
+ {
+ isfixed = false;
+ }
+ else if (op->opcode == EEOP_INNER_FETCHSOME)
+ {
+ PlanState *is = innerPlanState(parent);
+
+ if (parent->inneropsset && !parent->inneropsfixed)
+ {
+ isfixed = false;
+ }
+ else if (parent->inneropsset && parent->innerops)
+ {
+ isfixed = true;
+ tts_ops = parent->innerops;
+ }
+ else if (is)
+ {
+ tts_ops = ExecGetResultSlotOps(is, &isfixed);
+ desc = ExecGetResultType(is);
+ }
+ }
+ else if (op->opcode == EEOP_OUTER_FETCHSOME)
+ {
+ PlanState *os = outerPlanState(parent);
+
+ if (parent->outeropsset && !parent->outeropsfixed)
+ {
+ isfixed = false;
+ }
+ else if (parent->outeropsset && parent->outerops)
+ {
+ isfixed = true;
+ tts_ops = parent->outerops;
+ }
+ else if (os)
+ {
+ tts_ops = ExecGetResultSlotOps(os, &isfixed);
+ desc = ExecGetResultType(os);
+ }
+ }
+ else if (op->opcode == EEOP_SCAN_FETCHSOME)
+ {
+ desc = parent->scandesc;
+
+ if (parent && parent->scanops)
+ tts_ops = parent->scanops;
+
+ if (parent->scanopsset)
+ isfixed = parent->scanopsfixed;
+ }
+
+ if (isfixed && desc != NULL && tts_ops != NULL)
+ {
+ op->d.fetch.fixed = true;
+ op->d.fetch.kind = tts_ops;
+ op->d.fetch.known_desc = desc;
+ }
+ else
+ {
+ op->d.fetch.fixed = false;
+ op->d.fetch.kind = NULL;
+ op->d.fetch.known_desc = NULL;
+ }
+}
+
+/*
* Prepare step for the evaluation of a whole-row variable.
* The caller still has to push the step.
*/
@@ -3255,12 +3353,18 @@ ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc,
/* push deform steps */
scratch.opcode = EEOP_INNER_FETCHSOME;
scratch.d.fetch.last_var = maxatt;
+ scratch.d.fetch.fixed = false;
scratch.d.fetch.known_desc = ldesc;
+ scratch.d.fetch.kind = lops;
+ ExecComputeSlotInfo(state, &scratch);
ExprEvalPushStep(state, &scratch);
scratch.opcode = EEOP_OUTER_FETCHSOME;
scratch.d.fetch.last_var = maxatt;
+ scratch.d.fetch.fixed = false;
scratch.d.fetch.known_desc = rdesc;
+ scratch.d.fetch.kind = rops;
+ ExecComputeSlotInfo(state, &scratch);
ExprEvalPushStep(state, &scratch);
/*
diff --git a/src/backend/jit/llvm/llvmjit_expr.c b/src/backend/jit/llvm/llvmjit_expr.c
index 0dbc1e41062..be9b2aecffe 100644
--- a/src/backend/jit/llvm/llvmjit_expr.c
+++ b/src/backend/jit/llvm/llvmjit_expr.c
@@ -276,6 +276,8 @@ llvm_compile_expr(ExprState *state)
LLVMValueRef v_slot;
LLVMBasicBlockRef b_fetch;
LLVMValueRef v_nvalid;
+ LLVMValueRef l_jit_deform = NULL;
+ const TupleTableSlotOps *tts_ops = NULL;
b_fetch = l_bb_before_v(opblocks[i + 1],
"op.%d.fetch", i);
@@ -283,40 +285,22 @@ llvm_compile_expr(ExprState *state)
if (op->d.fetch.known_desc)
desc = op->d.fetch.known_desc;
- if (opcode == EEOP_INNER_FETCHSOME)
- {
- PlanState *is = innerPlanState(parent);
+ if (op->d.fetch.fixed)
+ tts_ops = op->d.fetch.kind;
+ if (opcode == EEOP_INNER_FETCHSOME)
v_slot = v_innerslot;
-
- if (!desc &&
- is &&
- is->ps_ResultTupleSlot &&
- TTS_FIXED(is->ps_ResultTupleSlot))
- desc = is->ps_ResultTupleSlot->tts_tupleDescriptor;
- }
else if (opcode == EEOP_OUTER_FETCHSOME)
- {
- PlanState *os = outerPlanState(parent);
-
v_slot = v_outerslot;
-
- if (!desc &&
- os &&
- os->ps_ResultTupleSlot &&
- TTS_FIXED(os->ps_ResultTupleSlot))
- desc = os->ps_ResultTupleSlot->tts_tupleDescriptor;
- }
else
- {
v_slot = v_scanslot;
- if (!desc && parent)
- desc = parent->scandesc;
- }
/*
* Check if all required attributes are available, or
* whether deforming is required.
+ *
+ * TODO: skip nvalid check if slot is fixed and known to
+ * be a virtual slot.
*/
v_nvalid =
l_load_struct_gep(b, v_slot,
@@ -336,19 +320,21 @@ llvm_compile_expr(ExprState *state)
* function specific to tupledesc and the exact number of
* to-be-extracted attributes.
*/
- if (desc && (context->base.flags & PGJIT_DEFORM))
+ if (tts_ops && desc && (context->base.flags & PGJIT_DEFORM))
{
- LLVMValueRef params[1];
- LLVMValueRef l_jit_deform;
-
l_jit_deform =
slot_compile_deform(context, desc,
op->d.fetch.last_var);
+ }
+
+ if (l_jit_deform)
+ {
+ LLVMValueRef params[1];
+
params[0] = v_slot;
LLVMBuildCall(b, l_jit_deform,
params, lengthof(params), "");
-
}
else
{
diff --git a/src/include/executor/execExpr.h b/src/include/executor/execExpr.h
index ac53935d700..194bf46e0f5 100644
--- a/src/include/executor/execExpr.h
+++ b/src/include/executor/execExpr.h
@@ -262,7 +262,12 @@ typedef struct ExprEvalStep
{
/* attribute number up to which to fetch (inclusive) */
int last_var;
+ /* will the type of slot be the same for every invocation */
+ bool fixed;
+ /* tuple descriptor, if known */
TupleDesc known_desc;
+ /* type of slot, can only be relied upon if fixed is set */
+ const TupleTableSlotOps *kind;
} fetch;
/* for EEOP_INNER/OUTER/SCAN_[SYS]VAR[_FIRST] */