summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorMichal Krol <[email protected]>2009-09-16 19:24:50 +0200
committerMichal Krol <[email protected]>2009-09-16 19:24:50 +0200
commiteeb5202e5ddf1cc95c35d46fd425afd0695b85bb (patch)
treebdf22a4858cf54bb46c9766dcc3443deca016a1a /src
parent0f302b60fd6d43a47e208979d0677e09f4a802fc (diff)
slang: Invoke the preprocessor from withing the slang compiler.
This allows us to validate the shader version number.
Diffstat (limited to 'src')
-rw-r--r--src/mesa/shader/grammar/grammar.c125
-rw-r--r--src/mesa/shader/grammar/grammar.h9
-rw-r--r--src/mesa/shader/grammar/grammar_mesa.h4
-rw-r--r--src/mesa/shader/slang/slang_compile.c131
4 files changed, 132 insertions, 137 deletions
diff --git a/src/mesa/shader/grammar/grammar.c b/src/mesa/shader/grammar/grammar.c
index ebfcef06800..eb58e0cddd6 100644
--- a/src/mesa/shader/grammar/grammar.c
+++ b/src/mesa/shader/grammar/grammar.c
@@ -3106,14 +3106,13 @@ int grammar_set_reg8 (grammar id, const byte *name, byte value)
int
grammar_fast_check (grammar id,
- const byte *text,
+ struct sl_pp_context *context,
+ struct sl_pp_token_info *tokens,
byte **prod,
unsigned int *size,
unsigned int estimate_prod_size)
{
dict *di = NULL;
- struct sl_pp_context context;
- struct sl_pp_token_info *tokens;
int index = 0;
regbyte_ctx *rbc = NULL;
bytepool *bp = NULL;
@@ -3131,135 +3130,17 @@ grammar_fast_check (grammar id,
*prod = NULL;
*size = 0;
- /*
- * Preprocess the source string with a GLSL preprocessor.
- * This is a hack but since nowadays we use grammar only for
- * GLSL compiler, and that also is going away, we'll do it anyway.
- */
-
- {
- struct sl_pp_purify_options options;
- char *outbuf;
- struct sl_pp_token_info *intokens;
- unsigned int version;
- unsigned int tokens_eaten;
-
- memset(&options, 0, sizeof(options));
- if (sl_pp_purify((const char *)text, &options, &outbuf)) {
- return 0;
- }
-
- if (sl_pp_context_init(&context)) {
- free(outbuf);
- return 1;
- }
-
- if (sl_pp_tokenise(&context, outbuf, &intokens)) {
- sl_pp_context_destroy(&context);
- free(outbuf);
- return 0;
- }
-
- free(outbuf);
-
- if (sl_pp_version(&context, intokens, &version, &tokens_eaten)) {
- sl_pp_context_destroy(&context);
- free(intokens);
- return 0;
- }
-
- if (sl_pp_process(&context, &intokens[tokens_eaten], &tokens)) {
- sl_pp_context_destroy(&context);
- free(intokens);
- return 0;
- }
-
- free(intokens);
-
- /* For the time being we care about only a handful of tokens. */
- {
- const struct sl_pp_token_info *src = tokens;
- struct sl_pp_token_info *dst = tokens;
-
- while (src->token != SL_PP_EOF) {
- switch (src->token) {
- case SL_PP_COMMA:
- case SL_PP_SEMICOLON:
- case SL_PP_LBRACE:
- case SL_PP_RBRACE:
- case SL_PP_LPAREN:
- case SL_PP_RPAREN:
- case SL_PP_LBRACKET:
- case SL_PP_RBRACKET:
- case SL_PP_DOT:
- case SL_PP_INCREMENT:
- case SL_PP_ADDASSIGN:
- case SL_PP_PLUS:
- case SL_PP_DECREMENT:
- case SL_PP_SUBASSIGN:
- case SL_PP_MINUS:
- case SL_PP_BITNOT:
- case SL_PP_NOTEQUAL:
- case SL_PP_NOT:
- case SL_PP_MULASSIGN:
- case SL_PP_STAR:
- case SL_PP_DIVASSIGN:
- case SL_PP_SLASH:
- case SL_PP_MODASSIGN:
- case SL_PP_MODULO:
- case SL_PP_LSHIFTASSIGN:
- case SL_PP_LSHIFT:
- case SL_PP_LESSEQUAL:
- case SL_PP_LESS:
- case SL_PP_RSHIFTASSIGN:
- case SL_PP_RSHIFT:
- case SL_PP_GREATEREQUAL:
- case SL_PP_GREATER:
- case SL_PP_EQUAL:
- case SL_PP_ASSIGN:
- case SL_PP_AND:
- case SL_PP_BITANDASSIGN:
- case SL_PP_BITAND:
- case SL_PP_XOR:
- case SL_PP_BITXORASSIGN:
- case SL_PP_BITXOR:
- case SL_PP_OR:
- case SL_PP_BITORASSIGN:
- case SL_PP_BITOR:
- case SL_PP_QUESTION:
- case SL_PP_COLON:
- case SL_PP_IDENTIFIER:
- case SL_PP_NUMBER:
- *dst++ = *src++;
- break;
-
- default:
- src++;
- }
- }
-
- /* The end of stream token. */
- *dst = *src;
- }
- }
-
bytepool_create(&bp, estimate_prod_size);
if (bp == NULL) {
- sl_pp_context_destroy(&context);
- free(tokens);
return 0;
}
- if (fast_match(di, tokens, &index, di->m_syntax, &_P, bp, &rbc, &context) != mr_matched) {
- sl_pp_context_destroy(&context);
- free(tokens);
+ if (fast_match(di, tokens, &index, di->m_syntax, &_P, bp, &rbc, context) != mr_matched) {
bytepool_destroy (&bp);
free_regbyte_ctx_stack (rbc, NULL);
return 0;
}
- sl_pp_context_destroy(&context);
- free(tokens);
free_regbyte_ctx_stack(rbc, NULL);
*prod = bp->_F;
diff --git a/src/mesa/shader/grammar/grammar.h b/src/mesa/shader/grammar/grammar.h
index 151b5f082b9..c3c21659d6c 100644
--- a/src/mesa/shader/grammar/grammar.h
+++ b/src/mesa/shader/grammar/grammar.h
@@ -69,8 +69,13 @@ int grammar_set_reg8 (grammar id, const byte *name, byte value);
<estimate_prod_size> is a hint - the initial production buffer size will be of this size,
but if more room is needed it will be safely resized; set it to 0x1000 or so
*/
-int grammar_fast_check (grammar id, const byte *text, byte **prod, unsigned int *size,
- unsigned int estimate_prod_size);
+int
+grammar_fast_check (grammar id,
+ struct sl_pp_context *context,
+ struct sl_pp_token_info *tokens,
+ byte **prod,
+ unsigned int *size,
+ unsigned int estimate_prod_size);
/*
destroys grammar object identified by <id>
diff --git a/src/mesa/shader/grammar/grammar_mesa.h b/src/mesa/shader/grammar/grammar_mesa.h
index 7f4370f32d0..20d13da8381 100644
--- a/src/mesa/shader/grammar/grammar_mesa.h
+++ b/src/mesa/shader/grammar/grammar_mesa.h
@@ -27,10 +27,6 @@
#include "../../glsl/pp/sl_pp_context.h"
-#include "../../glsl/pp/sl_pp_purify.h"
-#include "../../glsl/pp/sl_pp_version.h"
-#include "../../glsl/pp/sl_pp_process.h"
-
#include "main/imports.h"
/* NOTE: include Mesa 3-D specific headers here */
diff --git a/src/mesa/shader/slang/slang_compile.c b/src/mesa/shader/slang/slang_compile.c
index 75dd8a045bd..8ea86756184 100644
--- a/src/mesa/shader/slang/slang_compile.c
+++ b/src/mesa/shader/slang/slang_compile.c
@@ -36,6 +36,10 @@
#include "shader/prog_print.h"
#include "shader/prog_parameter.h"
#include "shader/grammar/grammar_mesa.h"
+#include "../../glsl/pp/sl_pp_context.h"
+#include "../../glsl/pp/sl_pp_purify.h"
+#include "../../glsl/pp/sl_pp_version.h"
+#include "../../glsl/pp/sl_pp_process.h"
#include "slang_codegen.h"
#include "slang_compile.h"
#include "slang_storage.h"
@@ -2579,9 +2583,115 @@ compile_with_grammar(grammar id, const char *source, slang_code_unit * unit,
const struct gl_extensions *extensions,
struct gl_sl_pragmas *pragmas)
{
+ struct sl_pp_context context;
+ struct sl_pp_token_info *tokens;
byte *prod;
- GLuint size, version;
- GLuint maxVersion;
+ GLuint size;
+ unsigned int version;
+ unsigned int maxVersion;
+ int result;
+ struct sl_pp_purify_options options;
+ char *outbuf;
+ struct sl_pp_token_info *intokens;
+ unsigned int tokens_eaten;
+
+ memset(&options, 0, sizeof(options));
+ if (sl_pp_purify(source, &options, &outbuf)) {
+ return GL_FALSE;
+ }
+
+ if (sl_pp_context_init(&context)) {
+ free(outbuf);
+ return GL_FALSE;
+ }
+
+ if (sl_pp_tokenise(&context, outbuf, &intokens)) {
+ sl_pp_context_destroy(&context);
+ free(outbuf);
+ return GL_FALSE;
+ }
+
+ free(outbuf);
+
+ if (sl_pp_version(&context, intokens, &version, &tokens_eaten)) {
+ sl_pp_context_destroy(&context);
+ free(intokens);
+ return GL_FALSE;
+ }
+
+ if (sl_pp_process(&context, &intokens[tokens_eaten], &tokens)) {
+ sl_pp_context_destroy(&context);
+ free(intokens);
+ return GL_FALSE;
+ }
+
+ free(intokens);
+
+ /* For the time being we care about only a handful of tokens. */
+ {
+ const struct sl_pp_token_info *src = tokens;
+ struct sl_pp_token_info *dst = tokens;
+
+ while (src->token != SL_PP_EOF) {
+ switch (src->token) {
+ case SL_PP_COMMA:
+ case SL_PP_SEMICOLON:
+ case SL_PP_LBRACE:
+ case SL_PP_RBRACE:
+ case SL_PP_LPAREN:
+ case SL_PP_RPAREN:
+ case SL_PP_LBRACKET:
+ case SL_PP_RBRACKET:
+ case SL_PP_DOT:
+ case SL_PP_INCREMENT:
+ case SL_PP_ADDASSIGN:
+ case SL_PP_PLUS:
+ case SL_PP_DECREMENT:
+ case SL_PP_SUBASSIGN:
+ case SL_PP_MINUS:
+ case SL_PP_BITNOT:
+ case SL_PP_NOTEQUAL:
+ case SL_PP_NOT:
+ case SL_PP_MULASSIGN:
+ case SL_PP_STAR:
+ case SL_PP_DIVASSIGN:
+ case SL_PP_SLASH:
+ case SL_PP_MODASSIGN:
+ case SL_PP_MODULO:
+ case SL_PP_LSHIFTASSIGN:
+ case SL_PP_LSHIFT:
+ case SL_PP_LESSEQUAL:
+ case SL_PP_LESS:
+ case SL_PP_RSHIFTASSIGN:
+ case SL_PP_RSHIFT:
+ case SL_PP_GREATEREQUAL:
+ case SL_PP_GREATER:
+ case SL_PP_EQUAL:
+ case SL_PP_ASSIGN:
+ case SL_PP_AND:
+ case SL_PP_BITANDASSIGN:
+ case SL_PP_BITAND:
+ case SL_PP_XOR:
+ case SL_PP_BITXORASSIGN:
+ case SL_PP_BITXOR:
+ case SL_PP_OR:
+ case SL_PP_BITORASSIGN:
+ case SL_PP_BITOR:
+ case SL_PP_QUESTION:
+ case SL_PP_COLON:
+ case SL_PP_IDENTIFIER:
+ case SL_PP_NUMBER:
+ *dst++ = *src++;
+ break;
+
+ default:
+ src++;
+ }
+ }
+
+ /* The end of stream token. */
+ *dst = *src;
+ }
#if FEATURE_ARB_shading_language_120
maxVersion = 120;
@@ -2591,20 +2701,23 @@ compile_with_grammar(grammar id, const char *source, slang_code_unit * unit,
maxVersion = 110;
#endif
- /* First retrieve the version number. */
- version = 110;
-
- if (version > maxVersion) {
+ if (version > maxVersion ||
+ (version != 100 && version != 110 && version != 120)) {
slang_info_log_error(infolog,
"language version %.2f is not supported.",
version * 0.01);
+ sl_pp_context_destroy(&context);
+ free(tokens);
return GL_FALSE;
}
/* Finally check the syntax and generate its binary representation. */
- if (!grammar_fast_check(id,
- (const byte *)source,
- &prod, &size, 65536)) {
+ result = grammar_fast_check(id, &context, tokens, &prod, &size, 65536);
+
+ sl_pp_context_destroy(&context);
+ free(tokens);
+
+ if (!result) {
char buf[1024];
GLint pos;