summaryrefslogtreecommitdiffstats
path: root/qobject/json-lexer.c
diff options
context:
space:
mode:
Diffstat (limited to 'qobject/json-lexer.c')
-rw-r--r--qobject/json-lexer.c313
1 files changed, 139 insertions, 174 deletions
diff --git a/qobject/json-lexer.c b/qobject/json-lexer.c
index 980ba159d6..e1745a3d95 100644
--- a/qobject/json-lexer.c
+++ b/qobject/json-lexer.c
@@ -12,63 +12,116 @@
*/
#include "qemu/osdep.h"
-#include "qemu-common.h"
-#include "qapi/qmp/json-lexer.h"
+#include "json-parser-int.h"
#define MAX_TOKEN_SIZE (64ULL << 20)
/*
- * Required by JSON (RFC 7159):
+ * From RFC 8259 "The JavaScript Object Notation (JSON) Data
+ * Interchange Format", with [comments in brackets]:
*
- * \"([^\\\"]|\\[\"'\\/bfnrt]|\\u[0-9a-fA-F]{4})*\"
- * -?(0|[1-9][0-9]*)(.[0-9]+)?([eE][-+]?[0-9]+)?
- * [{}\[\],:]
- * [a-z]+ # covers null, true, false
+ * The set of tokens includes six structural characters, strings,
+ * numbers, and three literal names.
*
- * Extension of '' strings:
+ * These are the six structural characters:
*
- * '([^\\']|\\[\"'\\/bfnrt]|\\u[0-9a-fA-F]{4})*'
+ * begin-array = ws %x5B ws ; [ left square bracket
+ * begin-object = ws %x7B ws ; { left curly bracket
+ * end-array = ws %x5D ws ; ] right square bracket
+ * end-object = ws %x7D ws ; } right curly bracket
+ * name-separator = ws %x3A ws ; : colon
+ * value-separator = ws %x2C ws ; , comma
*
- * Extension for vararg handling in JSON construction:
+ * Insignificant whitespace is allowed before or after any of the six
+ * structural characters.
+ * [This lexer accepts it before or after any token, which is actually
+ * the same, as the grammar always has structural characters between
+ * other tokens.]
*
- * %((l|ll|I64)?d|[ipsf])
+ * ws = *(
+ * %x20 / ; Space
+ * %x09 / ; Horizontal tab
+ * %x0A / ; Line feed or New line
+ * %x0D ) ; Carriage return
*
+ * [...] three literal names:
+ * false null true
+ * [This lexer accepts [a-z]+, and leaves rejecting unknown literal
+ * names to the parser.]
+ *
+ * [Numbers:]
+ *
+ * number = [ minus ] int [ frac ] [ exp ]
+ * decimal-point = %x2E ; .
+ * digit1-9 = %x31-39 ; 1-9
+ * e = %x65 / %x45 ; e E
+ * exp = e [ minus / plus ] 1*DIGIT
+ * frac = decimal-point 1*DIGIT
+ * int = zero / ( digit1-9 *DIGIT )
+ * minus = %x2D ; -
+ * plus = %x2B ; +
+ * zero = %x30 ; 0
+ *
+ * [Strings:]
+ * string = quotation-mark *char quotation-mark
+ *
+ * char = unescaped /
+ * escape (
+ * %x22 / ; " quotation mark U+0022
+ * %x5C / ; \ reverse solidus U+005C
+ * %x2F / ; / solidus U+002F
+ * %x62 / ; b backspace U+0008
+ * %x66 / ; f form feed U+000C
+ * %x6E / ; n line feed U+000A
+ * %x72 / ; r carriage return U+000D
+ * %x74 / ; t tab U+0009
+ * %x75 4HEXDIG ) ; uXXXX U+XXXX
+ * escape = %x5C ; \
+ * quotation-mark = %x22 ; "
+ * unescaped = %x20-21 / %x23-5B / %x5D-10FFFF
+ * [This lexer accepts any non-control character after escape, and
+ * leaves rejecting invalid ones to the parser.]
+ *
+ *
+ * Extensions over RFC 8259:
+ * - Extra escape sequence in strings:
+ * 0x27 (apostrophe) is recognized after escape, too
+ * - Single-quoted strings:
+ * Like double-quoted strings, except they're delimited by %x27
+ * (apostrophe) instead of %x22 (quotation mark), and can't contain
+ * unescaped apostrophe, but can contain unescaped quotation mark.
+ * - Interpolation, if enabled:
+ * The lexer accepts %[A-Za-z0-9]*, and leaves rejecting invalid
+ * ones to the parser.
+ *
+ * Note:
+ * - Input must be encoded in modified UTF-8.
+ * - Decoding and validating is left to the parser.
*/
enum json_lexer_state {
IN_ERROR = 0, /* must really be 0, see json_lexer[] */
- IN_DQ_UCODE3,
- IN_DQ_UCODE2,
- IN_DQ_UCODE1,
- IN_DQ_UCODE0,
IN_DQ_STRING_ESCAPE,
IN_DQ_STRING,
- IN_SQ_UCODE3,
- IN_SQ_UCODE2,
- IN_SQ_UCODE1,
- IN_SQ_UCODE0,
IN_SQ_STRING_ESCAPE,
IN_SQ_STRING,
IN_ZERO,
- IN_DIGITS,
- IN_DIGIT,
+ IN_EXP_DIGITS,
+ IN_EXP_SIGN,
IN_EXP_E,
IN_MANTISSA,
IN_MANTISSA_DIGITS,
- IN_NONZERO_NUMBER,
- IN_NEG_NONZERO_NUMBER,
+ IN_DIGITS,
+ IN_SIGN,
IN_KEYWORD,
- IN_ESCAPE,
- IN_ESCAPE_L,
- IN_ESCAPE_LL,
- IN_ESCAPE_I,
- IN_ESCAPE_I6,
- IN_ESCAPE_I64,
+ IN_INTERP,
IN_WHITESPACE,
IN_START,
+ IN_START_INTERP, /* must be IN_START + 1 */
};
-QEMU_BUILD_BUG_ON((int)JSON_MIN <= (int)IN_START);
+QEMU_BUILD_BUG_ON((int)JSON_MIN <= (int)IN_START_INTERP);
+QEMU_BUILD_BUG_ON(IN_START_INTERP != IN_START + 1);
#define TERMINAL(state) [0 ... 0x7F] = (state)
@@ -76,87 +129,27 @@ QEMU_BUILD_BUG_ON((int)JSON_MIN <= (int)IN_START);
from OLD_STATE required lookahead. This happens whenever the table
below uses the TERMINAL macro. */
#define TERMINAL_NEEDED_LOOKAHEAD(old_state, terminal) \
- (json_lexer[(old_state)][0] == (terminal))
+ (terminal != IN_ERROR && json_lexer[(old_state)][0] == (terminal))
static const uint8_t json_lexer[][256] = {
/* Relies on default initialization to IN_ERROR! */
/* double quote string */
- [IN_DQ_UCODE3] = {
- ['0' ... '9'] = IN_DQ_STRING,
- ['a' ... 'f'] = IN_DQ_STRING,
- ['A' ... 'F'] = IN_DQ_STRING,
- },
- [IN_DQ_UCODE2] = {
- ['0' ... '9'] = IN_DQ_UCODE3,
- ['a' ... 'f'] = IN_DQ_UCODE3,
- ['A' ... 'F'] = IN_DQ_UCODE3,
- },
- [IN_DQ_UCODE1] = {
- ['0' ... '9'] = IN_DQ_UCODE2,
- ['a' ... 'f'] = IN_DQ_UCODE2,
- ['A' ... 'F'] = IN_DQ_UCODE2,
- },
- [IN_DQ_UCODE0] = {
- ['0' ... '9'] = IN_DQ_UCODE1,
- ['a' ... 'f'] = IN_DQ_UCODE1,
- ['A' ... 'F'] = IN_DQ_UCODE1,
- },
[IN_DQ_STRING_ESCAPE] = {
- ['b'] = IN_DQ_STRING,
- ['f'] = IN_DQ_STRING,
- ['n'] = IN_DQ_STRING,
- ['r'] = IN_DQ_STRING,
- ['t'] = IN_DQ_STRING,
- ['/'] = IN_DQ_STRING,
- ['\\'] = IN_DQ_STRING,
- ['\''] = IN_DQ_STRING,
- ['\"'] = IN_DQ_STRING,
- ['u'] = IN_DQ_UCODE0,
+ [0x20 ... 0xFD] = IN_DQ_STRING,
},
[IN_DQ_STRING] = {
- [1 ... 0xBF] = IN_DQ_STRING,
- [0xC2 ... 0xF4] = IN_DQ_STRING,
+ [0x20 ... 0xFD] = IN_DQ_STRING,
['\\'] = IN_DQ_STRING_ESCAPE,
['"'] = JSON_STRING,
},
/* single quote string */
- [IN_SQ_UCODE3] = {
- ['0' ... '9'] = IN_SQ_STRING,
- ['a' ... 'f'] = IN_SQ_STRING,
- ['A' ... 'F'] = IN_SQ_STRING,
- },
- [IN_SQ_UCODE2] = {
- ['0' ... '9'] = IN_SQ_UCODE3,
- ['a' ... 'f'] = IN_SQ_UCODE3,
- ['A' ... 'F'] = IN_SQ_UCODE3,
- },
- [IN_SQ_UCODE1] = {
- ['0' ... '9'] = IN_SQ_UCODE2,
- ['a' ... 'f'] = IN_SQ_UCODE2,
- ['A' ... 'F'] = IN_SQ_UCODE2,
- },
- [IN_SQ_UCODE0] = {
- ['0' ... '9'] = IN_SQ_UCODE1,
- ['a' ... 'f'] = IN_SQ_UCODE1,
- ['A' ... 'F'] = IN_SQ_UCODE1,
- },
[IN_SQ_STRING_ESCAPE] = {
- ['b'] = IN_SQ_STRING,
- ['f'] = IN_SQ_STRING,
- ['n'] = IN_SQ_STRING,
- ['r'] = IN_SQ_STRING,
- ['t'] = IN_SQ_STRING,
- ['/'] = IN_SQ_STRING,
- ['\\'] = IN_SQ_STRING,
- ['\''] = IN_SQ_STRING,
- ['\"'] = IN_SQ_STRING,
- ['u'] = IN_SQ_UCODE0,
+ [0x20 ... 0xFD] = IN_SQ_STRING,
},
[IN_SQ_STRING] = {
- [1 ... 0xBF] = IN_SQ_STRING,
- [0xC2 ... 0xF4] = IN_SQ_STRING,
+ [0x20 ... 0xFD] = IN_SQ_STRING,
['\\'] = IN_SQ_STRING_ESCAPE,
['\''] = JSON_STRING,
},
@@ -169,19 +162,19 @@ static const uint8_t json_lexer[][256] = {
},
/* Float */
- [IN_DIGITS] = {
+ [IN_EXP_DIGITS] = {
TERMINAL(JSON_FLOAT),
- ['0' ... '9'] = IN_DIGITS,
+ ['0' ... '9'] = IN_EXP_DIGITS,
},
- [IN_DIGIT] = {
- ['0' ... '9'] = IN_DIGITS,
+ [IN_EXP_SIGN] = {
+ ['0' ... '9'] = IN_EXP_DIGITS,
},
[IN_EXP_E] = {
- ['-'] = IN_DIGIT,
- ['+'] = IN_DIGIT,
- ['0' ... '9'] = IN_DIGITS,
+ ['-'] = IN_EXP_SIGN,
+ ['+'] = IN_EXP_SIGN,
+ ['0' ... '9'] = IN_EXP_DIGITS,
},
[IN_MANTISSA_DIGITS] = {
@@ -196,17 +189,17 @@ static const uint8_t json_lexer[][256] = {
},
/* Number */
- [IN_NONZERO_NUMBER] = {
+ [IN_DIGITS] = {
TERMINAL(JSON_INTEGER),
- ['0' ... '9'] = IN_NONZERO_NUMBER,
+ ['0' ... '9'] = IN_DIGITS,
['e'] = IN_EXP_E,
['E'] = IN_EXP_E,
['.'] = IN_MANTISSA,
},
- [IN_NEG_NONZERO_NUMBER] = {
+ [IN_SIGN] = {
['0'] = IN_ZERO,
- ['1' ... '9'] = IN_NONZERO_NUMBER,
+ ['1' ... '9'] = IN_DIGITS,
},
/* keywords */
@@ -224,49 +217,25 @@ static const uint8_t json_lexer[][256] = {
['\n'] = IN_WHITESPACE,
},
- /* escape */
- [IN_ESCAPE_LL] = {
- ['d'] = JSON_ESCAPE,
- ['u'] = JSON_ESCAPE,
- },
-
- [IN_ESCAPE_L] = {
- ['d'] = JSON_ESCAPE,
- ['l'] = IN_ESCAPE_LL,
- ['u'] = JSON_ESCAPE,
- },
-
- [IN_ESCAPE_I64] = {
- ['d'] = JSON_ESCAPE,
- ['u'] = JSON_ESCAPE,
- },
-
- [IN_ESCAPE_I6] = {
- ['4'] = IN_ESCAPE_I64,
+ /* interpolation */
+ [IN_INTERP] = {
+ TERMINAL(JSON_INTERP),
+ ['A' ... 'Z'] = IN_INTERP,
+ ['a' ... 'z'] = IN_INTERP,
+ ['0' ... '9'] = IN_INTERP,
},
- [IN_ESCAPE_I] = {
- ['6'] = IN_ESCAPE_I6,
- },
-
- [IN_ESCAPE] = {
- ['d'] = JSON_ESCAPE,
- ['i'] = JSON_ESCAPE,
- ['p'] = JSON_ESCAPE,
- ['s'] = JSON_ESCAPE,
- ['u'] = JSON_ESCAPE,
- ['f'] = JSON_ESCAPE,
- ['l'] = IN_ESCAPE_L,
- ['I'] = IN_ESCAPE_I,
- },
-
- /* top level rule */
- [IN_START] = {
+ /*
+ * Two start states:
+ * - IN_START recognizes JSON tokens with our string extensions
+ * - IN_START_INTERP additionally recognizes interpolation.
+ */
+ [IN_START ... IN_START_INTERP] = {
['"'] = IN_DQ_STRING,
['\''] = IN_SQ_STRING,
['0'] = IN_ZERO,
- ['1' ... '9'] = IN_NONZERO_NUMBER,
- ['-'] = IN_NEG_NONZERO_NUMBER,
+ ['1' ... '9'] = IN_DIGITS,
+ ['-'] = IN_SIGN,
['{'] = JSON_LCURLY,
['}'] = JSON_RCURLY,
['['] = JSON_LSQUARE,
@@ -274,23 +243,23 @@ static const uint8_t json_lexer[][256] = {
[','] = JSON_COMMA,
[':'] = JSON_COLON,
['a' ... 'z'] = IN_KEYWORD,
- ['%'] = IN_ESCAPE,
[' '] = IN_WHITESPACE,
['\t'] = IN_WHITESPACE,
['\r'] = IN_WHITESPACE,
['\n'] = IN_WHITESPACE,
},
+ [IN_START_INTERP]['%'] = IN_INTERP,
};
-void json_lexer_init(JSONLexer *lexer, JSONLexerEmitter func)
+void json_lexer_init(JSONLexer *lexer, bool enable_interpolation)
{
- lexer->emit = func;
- lexer->state = IN_START;
+ lexer->start_state = lexer->state = enable_interpolation
+ ? IN_START_INTERP : IN_START;
lexer->token = g_string_sized_new(3);
lexer->x = lexer->y = 0;
}
-static int json_lexer_feed_char(JSONLexer *lexer, char ch, bool flush)
+static void json_lexer_feed_char(JSONLexer *lexer, char ch, bool flush)
{
int char_consumed, new_state;
@@ -304,7 +273,7 @@ static int json_lexer_feed_char(JSONLexer *lexer, char ch, bool flush)
assert(lexer->state <= ARRAY_SIZE(json_lexer));
new_state = json_lexer[lexer->state][(uint8_t)ch];
char_consumed = !TERMINAL_NEEDED_LOOKAHEAD(lexer->state, new_state);
- if (char_consumed) {
+ if (char_consumed && !flush) {
g_string_append_c(lexer->token, ch);
}
@@ -315,23 +284,23 @@ static int json_lexer_feed_char(JSONLexer *lexer, char ch, bool flush)
case JSON_RSQUARE:
case JSON_COLON:
case JSON_COMMA:
- case JSON_ESCAPE:
+ case JSON_INTERP:
case JSON_INTEGER:
case JSON_FLOAT:
case JSON_KEYWORD:
case JSON_STRING:
- lexer->emit(lexer, lexer->token, new_state, lexer->x, lexer->y);
+ json_message_process_token(lexer, lexer->token, new_state,
+ lexer->x, lexer->y);
/* fall through */
case JSON_SKIP:
g_string_truncate(lexer->token, 0);
- new_state = IN_START;
+ new_state = lexer->start_state;
break;
case IN_ERROR:
/* XXX: To avoid having previous bad input leaving the parser in an
* unresponsive state where we consume unpredictable amounts of
* subsequent "good" input, percolate this error state up to the
- * tokenizer/parser by forcing a NULL object to be emitted, then
- * reset state.
+ * parser by emitting a JSON_ERROR token, then reset lexer state.
*
* Also note that this handling is required for reliable channel
* negotiation between QMP and the guest agent, since chr(0xFF)
@@ -340,11 +309,11 @@ static int json_lexer_feed_char(JSONLexer *lexer, char ch, bool flush)
* never a valid ASCII/UTF-8 sequence, so this should reliably
* induce an error/flush state.
*/
- lexer->emit(lexer, lexer->token, JSON_ERROR, lexer->x, lexer->y);
+ json_message_process_token(lexer, lexer->token, JSON_ERROR,
+ lexer->x, lexer->y);
g_string_truncate(lexer->token, 0);
- new_state = IN_START;
- lexer->state = new_state;
- return 0;
+ lexer->state = lexer->start_state;
+ return;
default:
break;
}
@@ -355,33 +324,29 @@ static int json_lexer_feed_char(JSONLexer *lexer, char ch, bool flush)
* this is a security consideration.
*/
if (lexer->token->len > MAX_TOKEN_SIZE) {
- lexer->emit(lexer, lexer->token, lexer->state, lexer->x, lexer->y);
+ json_message_process_token(lexer, lexer->token, lexer->state,
+ lexer->x, lexer->y);
g_string_truncate(lexer->token, 0);
- lexer->state = IN_START;
+ lexer->state = lexer->start_state;
}
-
- return 0;
}
-int json_lexer_feed(JSONLexer *lexer, const char *buffer, size_t size)
+void json_lexer_feed(JSONLexer *lexer, const char *buffer, size_t size)
{
size_t i;
for (i = 0; i < size; i++) {
- int err;
-
- err = json_lexer_feed_char(lexer, buffer[i], false);
- if (err < 0) {
- return err;
- }
+ json_lexer_feed_char(lexer, buffer[i], false);
}
-
- return 0;
}
-int json_lexer_flush(JSONLexer *lexer)
+void json_lexer_flush(JSONLexer *lexer)
{
- return lexer->state == IN_START ? 0 : json_lexer_feed_char(lexer, 0, true);
+ if (lexer->state != lexer->start_state) {
+ json_lexer_feed_char(lexer, 0, true);
+ }
+ json_message_process_token(lexer, lexer->token, JSON_END_OF_INPUT,
+ lexer->x, lexer->y);
}
void json_lexer_destroy(JSONLexer *lexer)