diff --git a/client/mysqltest.cc b/client/mysqltest.cc index b48caa0c2d694..c5a11f854d5d7 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -271,12 +271,25 @@ static const char *opt_plugin_dir; static const char *opt_suite_dir, *opt_overlay_dir; static size_t suite_dir_len, overlay_dir_len; +/* ReplayTest mode variables */ +static MYSQL *replay_server_mysql= NULL; +static const char *replay_server_socket= NULL; +static my_bool replay_test_mode= FALSE; +static FILE *replay_log_file= NULL; +static const char *replay_log_path= NULL; +static my_bool disable_replay_next_query= FALSE; +static char disable_replay_reason[512]= {0}; +static my_bool disable_replay_testfile= FALSE; +static char disable_replay_testfile_reason[512]= {0}; + /* Precompiled re's */ static regex_t ps_re; /* the query can be run using PS protocol */ static regex_t ps2_re; /* the query can be run using PS protocol with second execution*/ static regex_t sp_re; /* the query can be run as a SP */ static regex_t view_re; /* the query can be run as a view*/ static regex_t cursor_re; /* the query can be run with cursor protocol*/ +static regex_t explain_re; /* the query is EXPLAIN (any variant) */ +static regex_t explain_for_conn_re; /* the query is EXPLAIN ... FOR CONNECTION ... */ static void init_re(void); static int match_re(regex_t *, char *); @@ -423,6 +436,7 @@ enum enum_commands { Q_PS_BIND, Q_PS_EXECUTE, Q_PS_CLOSE, + Q_DISABLE_REPLAY, Q_UNKNOWN, /* Unknown command. */ Q_COMMENT, /* Comments, ignored. */ Q_COMMENT_WITH_COMMAND, @@ -548,6 +562,7 @@ const char *command_names[]= "PS_bind", "PS_execute", "PS_close", + "disable_replay", 0 }; @@ -1506,6 +1521,24 @@ void free_used_memory() uint i; DBUG_ENTER("free_used_memory"); + if (replay_server_mysql) + { + mysql_close(replay_server_mysql); + replay_server_mysql= NULL; + } + + if (replay_log_file) + { + fclose(replay_log_file); + replay_log_file= NULL; + } + + if (replay_log_path) + { + my_free((void*)replay_log_path); + replay_log_path= NULL; + } + if (connections) { close_connections(); @@ -8318,6 +8351,544 @@ void run_execute_stmt(struct st_connection *cn, struct st_command *command, cons void run_close_stmt(struct st_connection *cn, struct st_command *command, const char *query, size_t query_len, DYNAMIC_STRING *ds, DYNAMIC_STRING *ds_warnings); +/* + ReplayTest mode helper functions +*/ + +/* + Ensure connection to replay server is established + Returns 0 on success, non-zero on error +*/ +static int ensure_replay_server_connection() +{ + DBUG_ENTER("ensure_replay_server_connection"); + + if (replay_server_mysql) + DBUG_RETURN(0); + + replay_server_mysql= mysql_init(NULL); + if (!replay_server_mysql) + { + fprintf(stdout, "ReplayTest: Failed to initialize MySQL handle for replay server\n"); + DBUG_RETURN(1); + } + + if (!mysql_real_connect(replay_server_mysql, NULL, NULL, NULL, "test", 0, + replay_server_socket, CLIENT_MULTI_STATEMENTS)) + { + fprintf(stdout, "ReplayTest: Failed to connect to replay server at socket '%s': %d %s\n", + replay_server_socket, mysql_errno(replay_server_mysql), + mysql_error(replay_server_mysql)); + mysql_close(replay_server_mysql); + replay_server_mysql= NULL; + DBUG_RETURN(1); + } + + verbose_msg("ReplayTest: Connected to replay server (database: test)"); + DBUG_RETURN(0); +} + +/* + Check if query is of the form "EXPLAIN ..." that we want to handle via the + replay server. + + Returns TRUE if the query starts with the EXPLAIN keyword. + Returns FALSE for "EXPLAIN ... FOR CONNECTION ..." forms (e.g. + "EXPLAIN FOR CONNECTION " or "EXPLAIN FORMAT=JSON FOR CONNECTION "), + since that form does not trigger query optimization/recording. + + Uses the precompiled regexes explain_re / explain_for_conn_re (see init_re). +*/ +static my_bool is_explain_query(const char *query, size_t query_len) +{ + /* match_re / regexec need a null-terminated string; query isn't guaranteed + to be null-terminated at query_len. Copy into a temp buffer. */ + char stack_buf[512]; + char *buf; + my_bool result; + + if (query_len + 1 <= sizeof(stack_buf)) + buf= stack_buf; + else + buf= (char*) my_malloc(PSI_NOT_INSTRUMENTED, query_len + 1, MYF(MY_WME)); + if (!buf) + return FALSE; + + memcpy(buf, query, query_len); + buf[query_len]= '\0'; + + result= match_re(&explain_re, buf) && !match_re(&explain_for_conn_re, buf); + + if (buf != stack_buf) + my_free(buf); + return result; +} + +/* + Log the start of a new replay session +*/ +static void log_replay_session_start() +{ + if (replay_log_file) + { + fprintf(replay_log_file, "### REPLAY SESSION ###\n"); + fflush(replay_log_file); + } +} + +/* + Log a query being sent to the replay server +*/ +static void log_replay_query(const char *query, size_t query_len) +{ + if (replay_log_file) + { + fprintf(replay_log_file, "%.*s;\n", (int)query_len, query); + fflush(replay_log_file); + } +} + +/* + Print the current test-file location (file, line, and include stack) to the + given stream. Mirrors the format used by make_error_message() for regular + mysqltest errors, but prefixed with "ReplayTest: " to group with surrounding + replay diagnostics. +*/ +static void print_replay_test_location(FILE *f) +{ + if (cur_file && cur_file != file_stack) + { + /* Enough for the full 16-entry include stack. */ + char buf[4096]; + buf[0]= '\0'; + fprintf(f, "ReplayTest: In included file \"%s\":\n", + cur_file->file_name); + print_file_stack(buf, buf + sizeof(buf)); + if (buf[0]) + fprintf(f, "ReplayTest: %s", buf); + } + else if (cur_file && cur_file->file_name) + { + fprintf(f, "ReplayTest: In file \"%s\"\n", cur_file->file_name); + } + if (start_lineno > 0) + fprintf(f, "ReplayTest: At line %u\n", start_lineno); +} + + +/* + Handle "disable_replay " command. + + Syntax: + disable_replay next_query + disable_replay testfile + + The first token after the command must be "next_query" or "testfile". + Everything after the scope token is the reason string (spaces allowed). + + - "next_query": one-shot; the next SQL query executed via run_query_normal() + bypasses replay-server processing (if it is EXPLAIN). The flag is consumed + by that one query regardless of whether it is EXPLAIN. + - "testfile": sticky; disables replay-server processing for every EXPLAIN + until mysqltest exits. + + Any syntax violation is a hard error (die). +*/ +static void do_disable_replay(struct st_command *command) +{ + const char *p= command->first_argument; + const char *end= command->end; + const char *tok; + size_t tok_len; + size_t reason_len; + my_bool is_testfile; + char *reason_buf; + size_t reason_buf_size; + DBUG_ENTER("do_disable_replay"); + + /* Skip leading whitespace */ + while (p < end && my_isspace(charset_info, *p)) + p++; + + tok= p; + while (p < end && !my_isspace(charset_info, *p)) + p++; + tok_len= (size_t)(p - tok); + + if (tok_len == 10 && strncmp(tok, "next_query", 10) == 0) + is_testfile= FALSE; + else if (tok_len == 8 && strncmp(tok, "testfile", 8) == 0) + is_testfile= TRUE; + else + die("Syntax: disable_replay next_query|testfile "); + + /* Skip whitespace between the scope token and the reason */ + while (p < end && my_isspace(charset_info, *p)) + p++; + + if (p >= end) + die("Syntax: disable_replay next_query|testfile (reason missing)"); + + if (is_testfile) + { + reason_buf= disable_replay_testfile_reason; + reason_buf_size= sizeof(disable_replay_testfile_reason); + } + else + { + reason_buf= disable_replay_reason; + reason_buf_size= sizeof(disable_replay_reason); + } + + /* Copy reason, trim trailing whitespace */ + reason_len= (size_t)(end - p); + while (reason_len > 0 && + my_isspace(charset_info, p[reason_len - 1])) + reason_len--; + + if (reason_len >= reason_buf_size) + reason_len= reason_buf_size - 1; + memcpy(reason_buf, p, reason_len); + reason_buf[reason_len]= '\0'; + + if (is_testfile) + { + disable_replay_testfile= TRUE; + verbose_msg("disable_replay: replay disabled for the rest of this test " + "file (reason: %s)", disable_replay_testfile_reason); + } + else + { + disable_replay_next_query= TRUE; + verbose_msg("disable_replay: next query will bypass replay server " + "(reason: %s)", disable_replay_reason); + } + + command->last_argument= command->end; + DBUG_VOID_RETURN; +} + + +/* + Execute queries from SQL script on replay server + Split by ";\n" and execute each query + Append output from last query to ds +*/ +static void execute_replay_queries(const char *sql_script, DYNAMIC_STRING *ds) +{ + DYNAMIC_STRING result; + const char *p= sql_script; + const char *query_start= p; + my_bool found_explain= FALSE; + DBUG_ENTER("execute_replay_queries"); + + verbose_msg("ReplayTest: SQL script from optimizer_context:\n%s", sql_script); + + log_replay_session_start(); + + init_dynamic_string(&result, "", 1024, 1024); + + /* Single-pass execution: stop at first EXPLAIN FORMAT=JSON */ + while (*p) + { + if (p[0] == ';' && p[1] == '\n') + { + size_t query_len= p - query_start; + const char *q= query_start; + const char *q_end= query_start + query_len; + + /* Skip leading whitespace */ + while (q < q_end && my_isspace(charset_info, *q)) + q++; + + if (q < q_end) + { + /* Check if this is EXPLAIN FORMAT=JSON */ + my_bool is_explain= is_explain_query(query_start, query_len); + + verbose_msg("ReplayTest: Executing query on replay server (%s): %.*s", + is_explain ? "EXPLAIN - will stop after this" : "intermediate", + (int)query_len, query_start); + + /* Log the query */ + log_replay_query(query_start, query_len); + + /* Execute the query */ + if (mysql_real_query(replay_server_mysql, query_start, query_len)) + { + char buf[512]; + int len= my_snprintf(buf, sizeof(buf), + "ReplayTest: Query error: %s\n", + mysql_error(replay_server_mysql)); + fputs(buf, stdout); + print_replay_test_location(stdout); + verbose_msg("%s", buf); + + /* Add failure marker to result output */ + dynstr_append_mem(&result, buf, len); + + goto cleanup; + } + + /* Process results */ + do + { + MYSQL_RES *res= mysql_store_result(replay_server_mysql); + if (res) + { + /* Capture output only if this is EXPLAIN FORMAT=JSON */ + if (is_explain) + { + MYSQL_FIELD *fields= mysql_fetch_fields(res); + uint num_fields= mysql_num_fields(res); + MYSQL_ROW row; + + if (!display_result_vertically) + { + append_table_headings(&result, fields, num_fields); + } + + while ((row= mysql_fetch_row(res))) + { + uint i; + ulong *lengths= mysql_fetch_lengths(res); + for (i= 0; i < num_fields; i++) + append_field(&result, i, &fields[i], + row[i], lengths[i], !row[i]); + if (!display_result_vertically) + dynstr_append_mem(&result, "\n", 1); + } + } + mysql_free_result(res); + } + if (mysql_errno(replay_server_mysql)) + { + char buf[512]; + int len= my_snprintf(buf, sizeof(buf), + "ReplayTest: Query error: %s\n", + mysql_error(replay_server_mysql)); + fputs(buf, stdout); + if (is_explain) + dynstr_append_mem(&result, buf, len); + } + } while (mysql_next_result(replay_server_mysql) == 0); + + /* Collect warnings from the EXPLAIN query (replay server) */ + if (is_explain && !disable_warnings) + { + DYNAMIC_STRING ds_warn; + init_dynamic_string(&ds_warn, "", 256, 256); + if (append_warnings(&ds_warn, replay_server_mysql) || ds_warn.length) + { + dynstr_append_mem(&result, "Warnings:\n", 10); + dynstr_append_mem(&result, ds_warn.str, ds_warn.length); + } + dynstr_free(&ds_warn); + } + + /* If this was EXPLAIN, we're done - stop processing */ + if (is_explain) + { + found_explain= TRUE; + verbose_msg("ReplayTest: Found EXPLAIN, stopping script execution"); + break; + } + } + + p += 2; + query_start= p; + } + else + { + p++; + } + } + + /* Handle final query if we haven't found EXPLAIN yet */ + if (!found_explain && query_start < p) + { + const char *q= query_start; + const char *q_end= p; + while (q < q_end && my_isspace(charset_info, *q)) + q++; + if (q < q_end) + { + size_t query_len= p - query_start; + my_bool is_explain= is_explain_query(query_start, query_len); + + verbose_msg("ReplayTest: Executing final query on replay server (%s): %.*s", + is_explain ? "EXPLAIN" : "last query", + (int)query_len, query_start); + + /* Log the query */ + log_replay_query(query_start, query_len); + + if (mysql_real_query(replay_server_mysql, query_start, query_len)) + { + fprintf(stdout, "ReplayTest: Query failed on replay server: %d %s\n", + mysql_errno(replay_server_mysql), + mysql_error(replay_server_mysql)); + fprintf(stdout, "ReplayTest: Failed query was: %.*s\n", (int)query_len, query_start); + print_replay_test_location(stdout); + goto cleanup; + } + + do + { + MYSQL_RES *res= mysql_store_result(replay_server_mysql); + if (res) + { + MYSQL_FIELD *fields= mysql_fetch_fields(res); + uint num_fields= mysql_num_fields(res); + MYSQL_ROW row; + + if (!display_result_vertically) + { + append_table_headings(&result, fields, num_fields); + } + + while ((row= mysql_fetch_row(res))) + { + uint i; + ulong *lengths= mysql_fetch_lengths(res); + for (i= 0; i < num_fields; i++) + append_field(&result, i, &fields[i], + row[i], lengths[i], !row[i]); + if (!display_result_vertically) + dynstr_append_mem(&result, "\n", 1); + } + mysql_free_result(res); + } + } while (mysql_next_result(replay_server_mysql) == 0); + + /* Collect warnings from the EXPLAIN query (replay server) */ + if (is_explain && !disable_warnings) + { + DYNAMIC_STRING ds_warn; + init_dynamic_string(&ds_warn, "", 256, 256); + if (append_warnings(&ds_warn, replay_server_mysql) || ds_warn.length) + { + dynstr_append_mem(&result, "Warnings:\n", 10); + dynstr_append_mem(&result, ds_warn.str, ds_warn.length); + } + dynstr_free(&ds_warn); + } + + if (is_explain) + found_explain= TRUE; + } + } + + if (!found_explain) + { + verbose_msg("ReplayTest: Warning - no EXPLAIN FORMAT=JSON found in script"); + } + +cleanup: + /* Preserve accumulated output (EXPLAIN / last-query) in ds BEFORE cleanup query */ + dynstr_append_mem(ds, result.str, result.length); + dynstr_free(&result); + + /* Reset optimizer_replay_context on the replay server, regardless of errors. + Drain and discard any output so ds is not affected. */ + if (replay_server_mysql) + { + if (mysql_real_query(replay_server_mysql, + "set optimizer_replay_context=''", 31)) + { + fprintf(stdout, "ReplayTest: Warning - failed to reset optimizer_replay_context: %d %s\n", + mysql_errno(replay_server_mysql), + mysql_error(replay_server_mysql)); + } + else + { + do + { + MYSQL_RES *res= mysql_store_result(replay_server_mysql); + if (res) + mysql_free_result(res); + } while (mysql_next_result(replay_server_mysql) == 0); + } + } + + DBUG_VOID_RETURN; +} + + +/* + Run an EXPLAIN query directly on the replay server (no context replay), + appending its output (headings + rows + warnings) to ds. + + This is the fallback used when the test server produced an empty + optimizer_context for an EXPLAIN query. +*/ +static void run_explain_directly_on_replay(const char *query, size_t query_len, + DYNAMIC_STRING *ds) +{ + DBUG_ENTER("run_explain_directly_on_replay"); + + if (ensure_replay_server_connection() != 0) + { + fprintf(stdout, "ReplayTest: Failed to connect to replay server\n"); + DBUG_VOID_RETURN; + } + + if (mysql_real_query(replay_server_mysql, query, (ulong)query_len)) + { + char buf[512]; + int len= my_snprintf(buf, sizeof(buf), + "ReplayTest: Direct EXPLAIN failed on replay server: %d %s\n", + mysql_errno(replay_server_mysql), + mysql_error(replay_server_mysql)); + fputs(buf, stdout); + fprintf(stdout, "ReplayTest: Failed query was: %.*s\n", + (int)query_len, query); + dynstr_append_mem(ds, buf, len); + DBUG_VOID_RETURN; + } + + do + { + MYSQL_RES *res= mysql_store_result(replay_server_mysql); + if (res) + { + MYSQL_FIELD *fields= mysql_fetch_fields(res); + uint num_fields= mysql_num_fields(res); + MYSQL_ROW row; + + if (!display_result_vertically) + append_table_headings(ds, fields, num_fields); + + while ((row= mysql_fetch_row(res))) + { + uint i; + ulong *lengths= mysql_fetch_lengths(res); + for (i= 0; i < num_fields; i++) + append_field(ds, i, &fields[i], row[i], lengths[i], !row[i]); + if (!display_result_vertically) + dynstr_append_mem(ds, "\n", 1); + } + mysql_free_result(res); + } + } while (mysql_next_result(replay_server_mysql) == 0); + + /* Append warnings from the EXPLAIN query */ + if (!disable_warnings) + { + DYNAMIC_STRING ds_warn; + init_dynamic_string(&ds_warn, "", 256, 256); + if (append_warnings(&ds_warn, replay_server_mysql) || ds_warn.length) + { + dynstr_append_mem(ds, "Warnings:\n", 10); + dynstr_append_mem(ds, ds_warn.str, ds_warn.length); + } + dynstr_free(&ds_warn); + } + + DBUG_VOID_RETURN; +} + + /* Run query using MySQL C API @@ -8338,6 +8909,7 @@ void run_query_normal(struct st_connection *cn, struct st_command *command, MYSQL_RES *res= 0; MYSQL *mysql= cn->mysql; int err= 0, counter= 0; + my_bool replay_mode_active= FALSE; DBUG_ENTER("run_query_normal"); DBUG_PRINT("enter",("flags: %d", flags)); DBUG_PRINT("enter", ("query: '%-.60s'", query)); @@ -8374,6 +8946,48 @@ void run_query_normal(struct st_connection *cn, struct st_command *command, break; } + /* Consume the one-shot "disable_replay next_query" flag: it applies to + exactly one SQL query executed via this function, whether or not it is + an EXPLAIN. */ + { + my_bool skip_replay_this_query= disable_replay_next_query; + if (disable_replay_next_query) + { + verbose_msg("ReplayTest: replay disabled for this query (reason: %s)", + disable_replay_reason); + disable_replay_next_query= FALSE; + disable_replay_reason[0]= '\0'; + } + + /* ReplayTest mode: Set optimizer_record_context BEFORE sending EXPLAIN query */ + if (replay_test_mode && !skip_replay_this_query && + !disable_replay_testfile && + (flags & QUERY_SEND_FLAG) && (flags & QUERY_REAP_FLAG) && + is_explain_query(query, query_len)) + { + replay_mode_active= TRUE; + verbose_msg("ReplayTest: Detected EXPLAIN FORMAT=JSON query, activating replay mode"); + + /* Clear any previously-recorded context left over from an earlier query + (e.g. a prior EXPLAIN whose context must not leak into this one). */ + (void) mysql_real_query(mysql, "SET optimizer_record_context=0", 30); + + /* Step 1: Set optimizer_record_context=1 */ + if (mysql_real_query(mysql, "SET optimizer_record_context=1", 30)) + { + fprintf(stdout, "ReplayTest: Failed to set optimizer_record_context: %d %s\n", + mysql_errno(mysql), mysql_error(mysql)); + replay_mode_active= FALSE; + } + else + { + MYSQL_RES *tmp_res= mysql_store_result(mysql); + if (tmp_res) + mysql_free_result(tmp_res); + } + } + } + if (flags & QUERY_SEND_FLAG) { /* @@ -8428,10 +9042,72 @@ void run_query_normal(struct st_connection *cn, struct st_command *command, if (display_metadata) append_metadata(ds, fields, num_fields); - if (!display_result_vertically) + /* In replay mode, headings come from the replay server's result */ + if (!display_result_vertically && + !(replay_mode_active && counter == 0)) append_table_headings(ds, fields, num_fields); - append_result(ds, res); + /* ReplayTest mode: Replace EXPLAIN result with replay server output */ + if (replay_mode_active && counter == 0) + { + /* Free the EXPLAIN result - we won't use it */ + mysql_free_result(res); + res= 0; + + /* Step 2: Query optimizer_context */ + fprintf(stdout, "ReplayTest: Loading context \n"); + if (mysql_real_query(mysql, + "SELECT context FROM information_schema.optimizer_context", 56)) + { + fprintf(stdout, "ReplayTest: Failed to query optimizer_context: %d %s\n", + mysql_errno(mysql), mysql_error(mysql)); + } + else + { + MYSQL_RES *context_res= mysql_store_result(mysql); + my_bool have_context= FALSE; + if (context_res && mysql_num_rows(context_res) > 0) + { + MYSQL_ROW context_row= mysql_fetch_row(context_res); + if (context_row && context_row[0]) + { + const char *sql_script= context_row[0]; + have_context= TRUE; + + /* Step 3: Connect to replay server and execute queries */ + if (ensure_replay_server_connection() == 0) + { + execute_replay_queries(sql_script, ds); + } + else + { + fprintf(stdout, "ReplayTest: Failed to connect to replay server\n"); + } + } + } + if (context_res) + mysql_free_result(context_res); + + if (!have_context) + { + /* Empty context: fall back to running the EXPLAIN directly on the + replay server. */ + verbose_msg("ReplayTest: empty optimizer_context, running EXPLAIN " + "directly on replay server"); + run_explain_directly_on_replay(query, query_len, ds); + } + + /* Clear the test server's recorded context so it doesn't leak into + the next EXPLAIN. */ + (void) mysql_real_query(mysql, "SET optimizer_record_context=0", 30); + } + + replay_mode_active= FALSE; + } + else + { + append_result(ds, res); + } } /* @@ -9915,11 +10591,31 @@ void init_re(void) "^(" "[[:space:]]*SELECT[[:space:]])"; + /* + Filter: query starts with the EXPLAIN keyword. + */ + const char *explain_re_str = + "^[[:space:]]*EXPLAIN([[:space:]]|$)"; + + /* + Filter: EXPLAIN ... FOR CONNECTION ... (any EXPLAIN options between). + Matches forms like: + EXPLAIN FOR CONNECTION + EXPLAIN FORMAT=JSON FOR CONNECTION + EXPLAIN EXTENDED FOR CONNECTION + The query body of a real EXPLAIN never ends with "FOR CONNECTION", so a + plain substring-style match is safe in practice. + */ + const char *explain_for_conn_re_str = + "^[[:space:]]*EXPLAIN[[:space:]](.*[[:space:]])?FOR[[:space:]]+CONNECTION([[:space:]]|$)"; + init_re_comp(&ps_re, ps_re_str); init_re_comp(&ps2_re, ps2_re_str); init_re_comp(&sp_re, sp_re_str); init_re_comp(&view_re, view_re_str); init_re_comp(&cursor_re, cursor_re_str); + init_re_comp(&explain_re, explain_re_str); + init_re_comp(&explain_for_conn_re, explain_for_conn_re_str); } @@ -9958,6 +10654,8 @@ void free_re(void) regfree(&sp_re); regfree(&view_re); regfree(&cursor_re); + regfree(&explain_re); + regfree(&explain_for_conn_re); } /****************************************************************************/ @@ -10302,6 +11000,39 @@ int main(int argc, char **argv) var_set_string("MYSQLTEST_FILE", cur_file->file_name); init_re(); + /* Check for ReplayTest mode */ + replay_server_socket= getenv("REPLAY_SERVER_SOCKET"); + if (replay_server_socket && replay_server_socket[0]) + { + replay_test_mode= TRUE; + verbose_msg("ReplayTest mode enabled, replay server socket: %s", + replay_server_socket); + + /* Initialize replay query log file */ + const char *vardir= getenv("MYSQLTEST_VARDIR"); + if (vardir) + { + size_t path_len= strlen(vardir) + 30; /* room for "/log/replay_queries.log" */ + char *log_path= (char*)my_malloc(PSI_NOT_INSTRUMENTED, path_len, MYF(0)); + if (log_path) + { + snprintf(log_path, path_len, "%s/log/replay_queries.log", vardir); + replay_log_path= log_path; + /* Use append mode - MTR cleans var directory on each run */ + replay_log_file= fopen(replay_log_path, "a"); + if (!replay_log_file) + { + fprintf(stderr, "Warning: Could not open replay log file: %s\n", + replay_log_path); + } + else + { + verbose_msg("ReplayTest: Logging queries to %s", replay_log_path); + } + } + } + } + /* Cursor protocol implies ps protocol */ if (cursor_protocol) ps_protocol= 1; @@ -10691,6 +11422,9 @@ int main(int argc, char **argv) case Q_OPTIMIZER_TRACE: enable_optimizer_trace(cur_con); break; + case Q_DISABLE_REPLAY: + do_disable_replay(command); + break; case Q_SEND_SHUTDOWN: handle_command_error(command, mysql_shutdown(cur_con->mysql, diff --git a/mariadb-plugin-columnstore.install.generated b/mariadb-plugin-columnstore.install.generated index d987525f2a671..2b1434e58b9a1 100644 --- a/mariadb-plugin-columnstore.install.generated +++ b/mariadb-plugin-columnstore.install.generated @@ -1 +1,3 @@ #File is generated by ColumnstoreLibrary.cmake, do not edit +etc/mysql/columnstore.cnf # added in dbcon/mysql/CMakeLists.txt +usr/local/mysql/lib/plugin/ha_columnstore.so # added in dbcon/mysql/CMakeLists.txt diff --git a/mysql-test/EXTRA_SERVER_QUICKSTART.txt b/mysql-test/EXTRA_SERVER_QUICKSTART.txt new file mode 100644 index 0000000000000..8e1000438d1e0 --- /dev/null +++ b/mysql-test/EXTRA_SERVER_QUICKSTART.txt @@ -0,0 +1,89 @@ +================================================================================ +EXTRA SERVER QUICK START GUIDE +================================================================================ + +This feature allows you to start additional MariaDB server instances during +test execution while mysql-test-run is already running. + +BASIC USAGE +----------- + +1. In your test file (.test): + + # Start extra server + --let $extra_server_num= 1 + --source include/start_extra_server.inc + + # Connect to it + --connect (extra1, 127.0.0.1, root, , test, $EXTRA_SERVER_PORT) + CREATE TABLE t1 (id INT); + SELECT * FROM t1; + + # Stop it + --disconnect extra1 + --let $extra_server_num= 1 + --source include/stop_extra_server.inc + +2. Run the test: + + cd mysql-test + ./mysql-test-run main.extra_server_example + +WHAT IT DOES +------------ + +✓ Creates new data directory: var/extra_server_N/data (copied from install.db) +✓ Picks non-conflicting port: MASTER_MYPORT + 10 + N +✓ Creates unique socket: var/tmp/extra_server_N.sock +✓ Starts mysqld with --skip-grant-tables (no password needed) +✓ Exports connection info: $EXTRA_SERVER_PORT, $EXTRA_SERVER_SOCKET, etc. + +AVAILABLE VARIABLES AFTER START +-------------------------------- + +$EXTRA_SERVER_PORT - Port number (e.g., 10011 for server 1) +$EXTRA_SERVER_SOCKET - Socket path +$EXTRA_SERVER_DATADIR - Data directory path +$EXTRA_SERVER_PID - Process ID + +CUSTOM PORT/SOCKET +------------------ + +--let $extra_server_num= 2 +--let $extra_server_port= 15000 +--let $extra_server_socket= /tmp/my_custom.sock +--source include/start_extra_server.inc + +MULTIPLE SERVERS +---------------- + +--let $extra_server_num= 1 +--source include/start_extra_server.inc + +--let $extra_server_num= 2 +--source include/start_extra_server.inc + +# Now you have two extra servers running! + +FILES CREATED +------------- + +lib/start_extra_server.pl - Perl script (main implementation) +include/start_extra_server.inc - Test include to start server +include/stop_extra_server.inc - Test include to stop server +main/extra_server_example.test - Example test +lib/EXTRA_SERVER_README.md - Full documentation + +TROUBLESHOOTING +--------------- + +If server fails to start, check: + var/log/extra_server_N.err + +Connection info is stored in: + var/tmp/extra_server_N.info + +Direct invocation (from Perl or shell): + perl lib/start_extra_server.pl [port] [socket] + +================================================================================ diff --git a/mysql-test/include/get_rec_idx_ranges_from_opt_ctx.inc b/mysql-test/include/get_rec_idx_ranges_from_opt_ctx.inc index 2f3bffa1d1c49..7e0a363c8af34 100644 --- a/mysql-test/include/get_rec_idx_ranges_from_opt_ctx.inc +++ b/mysql-test/include/get_rec_idx_ranges_from_opt_ctx.inc @@ -4,9 +4,9 @@ set @opt_context= '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); set @records= - (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); + (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@records, - '$[*]' columns(num_of_records text path '$')) as jt; + '$[*]' columns(file_stat_records text path '$')) as jt; set @file_stat_records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@file_stat_records, diff --git a/mysql-test/include/opt_context_schema.inc b/mysql-test/include/opt_context_schema.inc index c7731e69cfe66..855f988eb5aa4 100644 --- a/mysql-test/include/opt_context_schema.inc +++ b/mysql-test/include/opt_context_schema.inc @@ -14,7 +14,7 @@ let $opt_context_schema= "type": "string", "minLength": 1 }, - "num_of_records": { + "file_stat_records": { "type": "number" }, "file_stat_records": { diff --git a/mysql-test/include/start_extra_server.inc b/mysql-test/include/start_extra_server.inc new file mode 100644 index 0000000000000..e768056501a10 --- /dev/null +++ b/mysql-test/include/start_extra_server.inc @@ -0,0 +1,105 @@ +# ==== Purpose ==== +# +# Start an additional mysqld server instance while mysql-test-run is running. +# This creates a new data directory (copied from var/install.db), picks a +# non-conflicting port and socket, and starts the server. +# +# ==== Usage ==== +# +# --let $extra_server_num= 1 +# [--let $extra_server_port= 13307] +# [--let $extra_server_socket= /path/to/socket] +# --source include/start_extra_server.inc +# +# After sourcing this file, the following variables will be set: +# $EXTRA_SERVER_PORT - Port number of the extra server +# $EXTRA_SERVER_SOCKET - Socket path of the extra server +# $EXTRA_SERVER_DATADIR - Data directory of the extra server +# $EXTRA_SERVER_PID - Process ID of the extra server +# +# You can then connect to the server using: +# --connect (conn_name, 127.0.0.1, root, , test, $EXTRA_SERVER_PORT) +# +# ==== Parameters ==== +# +# $extra_server_num +# Required. A unique number identifying this extra server instance. +# Must be unique across all extra servers started in the same test. +# +# $extra_server_port +# Optional. Custom port number for the server. If not specified, +# the port will be auto-calculated as MASTER_MYPORT + 10 + server_num. +# +# $extra_server_socket +# Optional. Custom socket path. If not specified, will use +# $MYSQLTEST_VARDIR/tmp/extra_server_N.sock +# + +if (!$extra_server_num) +{ + --die extra_server_num must be set before sourcing start_extra_server.inc +} + +--let $include_filename= start_extra_server.inc [server $extra_server_num] +--source include/begin_include_file.inc + +# Export mysqltest variables to environment for Perl +--let extra_server_num_env= $extra_server_num +--let extra_server_port_env= $extra_server_port +--let extra_server_socket_env= $extra_server_socket + +--perl + use strict; + use warnings; + + my $server_num = $ENV{extra_server_num_env} or die "extra_server_num not set"; + my $port = $ENV{extra_server_port_env} || ""; + my $socket = $ENV{extra_server_socket_env} || ""; + + my $script = "$ENV{MYSQL_TEST_DIR}/lib/start_extra_server.pl"; + die "Script not found: $script\n" unless -f $script; + + my $cmd = "perl $script $server_num"; + $cmd .= " $port" if $port; + $cmd .= " $socket" if $socket; + + print "Executing: $cmd\n"; + my $result = system($cmd); + if ($result != 0) { + die "Failed to start extra server $server_num (exit code: $result)\n"; + } +EOF + +# Read connection info from the info file +--let $extra_server_info_file= $MYSQLTEST_VARDIR/tmp/extra_server_$extra_server_num.info + +--perl + use strict; + use warnings; + + my $file = $ENV{extra_server_info_file}; + die "Info file not found: $file\n" unless -f $file; + + open my $fh, '<', $file or die "Cannot read $file: $!"; + my %info; + while (<$fh>) { + chomp; + if (/^(\w+)=(.+)/) { + $info{$1} = $2; + } + } + close $fh; + + # Export to environment for mysqltest + $ENV{EXTRA_SERVER_PORT} = $info{PORT} if $info{PORT}; + $ENV{EXTRA_SERVER_SOCKET} = $info{SOCKET} if $info{SOCKET}; + $ENV{EXTRA_SERVER_DATADIR} = $info{DATADIR} if $info{DATADIR}; + $ENV{EXTRA_SERVER_PID} = $info{PID} if $info{PID}; + + print "Extra server $ENV{extra_server_num_env} connection info:\n"; + print " PORT=$info{PORT}\n" if $info{PORT}; + print " SOCKET=$info{SOCKET}\n" if $info{SOCKET}; +EOF + +--let $include_filename= start_extra_server.inc [server $extra_server_num] +--source include/end_include_file.inc diff --git a/mysql-test/include/stop_extra_server.inc b/mysql-test/include/stop_extra_server.inc new file mode 100644 index 0000000000000..474686073b70f --- /dev/null +++ b/mysql-test/include/stop_extra_server.inc @@ -0,0 +1,98 @@ +# ==== Purpose ==== +# +# Stop an extra server instance that was started with start_extra_server.inc +# +# ==== Usage ==== +# +# --let $extra_server_num= 1 +# --source include/stop_extra_server.inc +# +# ==== Parameters ==== +# +# $extra_server_num +# Required. The number of the extra server to stop (same number used +# when starting it with start_extra_server.inc). +# + +if (!$extra_server_num) +{ + --die extra_server_num must be set before sourcing stop_extra_server.inc +} + +--let $include_filename= stop_extra_server.inc [server $extra_server_num] +--source include/begin_include_file.inc + +# Export mysqltest variable to environment for Perl +--let extra_server_num_env= $extra_server_num + +--perl + use strict; + use warnings; + use POSIX ":sys_wait_h"; + + my $server_num = $ENV{extra_server_num_env} or die "extra_server_num not set"; + my $vardir = $ENV{MYSQLTEST_VARDIR} or die "MYSQLTEST_VARDIR not set"; + my $info_file = "$vardir/tmp/extra_server_$server_num.info"; + + unless (-f $info_file) { + print "Warning: Info file not found: $info_file\n"; + print "Server may not be running or already stopped.\n"; + exit 0; + } + + # Read PID and other info + open my $fh, '<', $info_file or die "Cannot read $info_file: $!"; + my ($pid, $pid_file, $socket); + while (<$fh>) { + chomp; + $pid = $1 if /^PID=(.+)/; + $pid_file = $1 if /^PID_FILE=(.+)/; + $socket = $1 if /^SOCKET=(.+)/; + } + close $fh; + + unless ($pid) { + print "Warning: No PID found in $info_file\n"; + unlink $info_file; + exit 0; + } + + print "Stopping extra server $server_num (PID: $pid)...\n"; + + # Send SIGTERM to server + if (kill 0, $pid) { + kill 'TERM', $pid; + + # Wait for process to exit (up to 10 seconds) + my $max_wait = 10; + my $waited = 0; + while ($waited < $max_wait) { + my $result = waitpid($pid, WNOHANG); + if ($result == $pid || $result == -1) { + print "Server process $pid has exited\n"; + last; + } + sleep 1; + $waited++; + } + + # If still running, send SIGKILL + if ($waited >= $max_wait && kill 0, $pid) { + print "Server did not stop gracefully, sending SIGKILL...\n"; + kill 'KILL', $pid; + sleep 1; + } + } else { + print "Process $pid is not running\n"; + } + + # Cleanup files + unlink $info_file if -f $info_file; + unlink $pid_file if $pid_file && -f $pid_file; + unlink $socket if $socket && -S $socket; + + print "Extra server $server_num stopped\n"; +EOF + +--let $include_filename= stop_extra_server.inc [server $extra_server_num] +--source include/end_include_file.inc diff --git a/mysql-test/lib/EXTRA_SERVER_README.md b/mysql-test/lib/EXTRA_SERVER_README.md new file mode 100644 index 0000000000000..d40508c965329 --- /dev/null +++ b/mysql-test/lib/EXTRA_SERVER_README.md @@ -0,0 +1,136 @@ +# Extra Server Script for MySQL Test Framework + +## Overview + +This script allows you to dynamically start additional MariaDB server instances during test execution while `mysql-test-run` is already running. This is useful for testing scenarios that require multiple independent server instances. + +## Features + +- **Dynamic server creation**: Start servers on-demand during test execution +- **Automatic port allocation**: Non-conflicting ports (base_port + 10 + N) +- **Automatic socket allocation**: Unique socket paths per server +- **Data directory management**: Copies from existing `var/install.db` +- **Connection info export**: Provides host, port, socket, datadir, PID + +## Files + +- `lib/start_extra_server.pl` - Perl script that starts the extra server +- `include/start_extra_server.inc` - Test include file to start server +- `include/stop_extra_server.inc` - Test include file to stop server +- `main/extra_server_example.test` - Example test demonstrating usage + +## Usage + +### Starting an Extra Server + +```sql +# Set the server number (must be unique) +--let $extra_server_num= 1 + +# Optional: specify custom port +# --let $extra_server_port= 13307 + +# Optional: specify custom socket +# --let $extra_server_socket= /path/to/socket + +# Start the server +--source include/start_extra_server.inc +``` + +After starting, the following variables are available: +- `$EXTRA_SERVER_PORT` - Port number +- `$EXTRA_SERVER_SOCKET` - Socket path +- `$EXTRA_SERVER_DATADIR` - Data directory +- `$EXTRA_SERVER_PID` - Process ID + +### Connecting to the Extra Server + +```sql +--connect (conn_name, 127.0.0.1, root, , test, $EXTRA_SERVER_PORT) +SELECT "Connected!" AS status; +# ... perform operations ... +--disconnect conn_name +``` + +### Stopping the Extra Server + +```sql +--let $extra_server_num= 1 +--source include/stop_extra_server.inc +``` + +## Port Allocation + +Ports are automatically calculated to avoid conflicts: +- Master servers: `base_port + 0`, `base_port + 1` +- Slave servers: `base_port + 2`, `base_port + 3`, `base_port + 4` +- Extra servers: `base_port + 10 + server_num` + +Default `base_port` is typically 10000 (or `MASTER_MYPORT` if set). + +## Data Directory + +The script copies `var/install.db` to `var/extra_server_N/data`, so: +- No bootstrap needed (system tables already exist) +- Fast startup +- Clean slate for each server + +## Server Configuration + +The extra server is started with: +- `--skip-grant-tables` (for easy test access) +- `--default-storage-engine=myisam` +- `--loose-skip-innodb` (for faster startup) +- Minimal memory settings + +## Example Test + +See `main/extra_server_example.test` for a complete working example. + +## Invocation from mysqltest.cc + +The script can be invoked using the `--exec` command in test files: + +```sql +--exec perl $MYSQL_TEST_DIR/lib/start_extra_server.pl 1 +``` + +Or more conveniently via the include files as shown above. + +## Troubleshooting + +### Server fails to start + +Check the log file: `var/log/extra_server_N.err` + +### Port conflicts + +Specify a custom port: +```sql +--let $extra_server_port= 15000 +``` + +### Connection issues + +Verify the server is running: +```sql +--exec ps aux | grep extra_server +``` + +Check the info file: +```sql +--exec cat $MYSQLTEST_VARDIR/tmp/extra_server_1.info +``` + +## Limitations + +- Servers run with `--skip-grant-tables` (no authentication) +- InnoDB is disabled by default (use `--loose-innodb` if needed) +- No automatic cleanup on test failure (use `--force` in mysql-test-run) + +## Future Enhancements + +- Support for custom mysqld options +- Better error handling and diagnostics +- Automatic cleanup on test failure +- Support for replication setup between extra servers diff --git a/mysql-test/lib/start_extra_server.pl b/mysql-test/lib/start_extra_server.pl new file mode 100755 index 0000000000000..ab9ac4b43b449 --- /dev/null +++ b/mysql-test/lib/start_extra_server.pl @@ -0,0 +1,155 @@ +#!/usr/bin/env perl +# Copyright (c) 2026, MariaDB Corporation. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; version 2 of the License. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1335 USA + +use strict; +use warnings; +use File::Path qw(make_path remove_tree); +use File::Basename; +use File::Copy; +use POSIX ":sys_wait_h"; + +# Parse arguments +my $vardir = $ENV{MYSQLTEST_VARDIR} or die "MYSQLTEST_VARDIR not set\n"; +my $server_num = shift @ARGV or die "Usage: $0 [port] [socket]\n"; +my $custom_port = shift @ARGV; +my $custom_socket = shift @ARGV; + +# Calculate port (base + 10 + server_num if not custom) +my $base_port = $ENV{MASTER_MYPORT} || 10000; +my $port = $custom_port || ($base_port + 10 + $server_num); +my $socket = $custom_socket || "$vardir/tmp/extra_server_$server_num.sock"; + +# Create data directory +my $datadir = "$vardir/extra_server_$server_num/data"; +my $install_db = "$vardir/install.db"; + +die "install.db not found at $install_db\n" unless -d $install_db; + +# Create parent directory if needed +my $server_dir = "$vardir/extra_server_$server_num"; +make_path($server_dir) unless -d $server_dir; + +# Copy install.db to new datadir +if (-d $datadir) { + print "Removing existing datadir: $datadir\n"; + remove_tree($datadir); +} + +print "Copying $install_db to $datadir...\n"; +# Use cp -a to preserve permissions and attributes +system("cp", "-a", $install_db, $datadir) == 0 + or die "Failed to copy $install_db to $datadir: $!\n"; + +# Ensure proper permissions on the datadir +system("chmod", "-R", "u+rwX", $datadir) == 0 + or warn "Warning: Failed to set permissions on $datadir\n"; + +# Start mysqld +my $mysqld = $ENV{MYSQLD} or die "MYSQLD environment variable not set\n"; +die "mysqld binary not found at $mysqld\n" unless -x $mysqld; + +my $pid_file = "$server_dir/mysqld.pid"; +my $log_file = "$vardir/log/extra_server_$server_num.err"; +my $general_log_file = "$vardir/log/extra_server_$server_num.log"; + +# Ensure log directory exists +make_path("$vardir/log") unless -d "$vardir/log"; + +my @mysqld_args = ( + $mysqld, + "--no-defaults", + "--datadir=$datadir", + "--port=$port", + "--socket=$socket", + "--pid-file=$pid_file", + "--log-error=$log_file", + "--general-log=1", + "--general-log-file=$general_log_file", + "--skip-networking=0", + "--skip-grant-tables", + "--key-buffer-size=1M", + "--sort-buffer-size=256K", + "--max-heap-table-size=1M", + "--gdb", +); + +print "Starting mysqld on port $port with socket $socket...\n"; +print "Command: " . join(" ", @mysqld_args) . "\n"; + +# Fork and start server +my $pid = fork(); +die "Fork failed: $!\n" unless defined $pid; + +if ($pid == 0) { + # Child process - start server + # Redirect stdout/stderr to log file + open STDOUT, '>>', $log_file or die "Cannot redirect STDOUT: $!\n"; + open STDERR, '>>', $log_file or die "Cannot redirect STDERR: $!\n"; + exec(@mysqld_args) or die "Failed to exec mysqld: $!\n"; +} + +# Parent - wait for server to be ready +print "Server process started with PID $pid\n"; +print "Waiting for server to be ready...\n"; + +# Wait for socket file to appear (up to 30 seconds) +my $max_wait = 30; +my $waited = 0; +while ($waited < $max_wait) { + if (-S $socket) { + print "Socket file created: $socket\n"; + last; + } + sleep 1; + $waited++; + + # Check if process is still alive + my $result = waitpid($pid, WNOHANG); + if ($result == $pid) { + die "Server process died during startup. Check $log_file for errors.\n"; + } +} + +if ($waited >= $max_wait) { + kill 'TERM', $pid; + die "Timeout waiting for server to start. Check $log_file for errors.\n"; +} + +# Additional wait for server to be fully ready +sleep 2; + +# Write connection info to file +my $info_file = "$vardir/tmp/extra_server_$server_num.info"; +open my $fh, '>', $info_file or die "Cannot write $info_file: $!\n"; +print $fh "HOST=127.0.0.1\n"; +print $fh "PORT=$port\n"; +print $fh "SOCKET=$socket\n"; +print $fh "DATADIR=$datadir\n"; +print $fh "PID=$pid\n"; +print $fh "PID_FILE=$pid_file\n"; +print $fh "LOG_FILE=$log_file\n"; +print $fh "GENERAL_LOG_FILE=$general_log_file\n"; +close $fh; + +print "Extra server $server_num started successfully\n"; +print "Connection info written to $info_file\n"; +print " Host: 127.0.0.1\n"; +print " Port: $port\n"; +print " Socket: $socket\n"; +print " Datadir: $datadir\n"; +print " General log: $general_log_file\n"; + +exit 0; diff --git a/mysql-test/main/create.test b/mysql-test/main/create.test index 42bf890cb7510..d088fe2b50128 100644 --- a/mysql-test/main/create.test +++ b/mysql-test/main/create.test @@ -398,6 +398,7 @@ select ifnull(h,cast('yet another binary data' as binary)) as h, addtime(cast('1:0:0' as time),cast('1:0:0' as time)) as dd from t1; +--disable_replay next_query This is EXPLAIN TABLE. explain t2; select * from t2; drop table t1, t2; diff --git a/mysql-test/main/cte_recursive.test b/mysql-test/main/cte_recursive.test index 30f94667b2d02..768eb06fae5a6 100644 --- a/mysql-test/main/cte_recursive.test +++ b/mysql-test/main/cte_recursive.test @@ -2640,7 +2640,9 @@ where (dt.a1) in (with recursive cte as (select a2 from t2 where a2='2' select a2 from cte); eval $q; +--disable_replay next_query Dont support SPs eval explain $q; +--disable_replay next_query Dont support SPs --source include/analyze-format.inc eval analyze format=json $q; diff --git a/mysql-test/main/custom_aggregates_i_s.test b/mysql-test/main/custom_aggregates_i_s.test index dd5eb50d82ce6..15577d059f638 100644 --- a/mysql-test/main/custom_aggregates_i_s.test +++ b/mysql-test/main/custom_aggregates_i_s.test @@ -3,6 +3,7 @@ --source include/default_optimizer_switch.inc +--disable_replay testfile Don't support SPs. --disable_ps2_protocol flush status; diff --git a/mysql-test/main/derived.test b/mysql-test/main/derived.test index c5c27c940c840..cc4a75eab2ee8 100644 --- a/mysql-test/main/derived.test +++ b/mysql-test/main/derived.test @@ -1953,6 +1953,7 @@ explain format=json select * from v1 where if( a1 regexp 'def', 'foo', a2 ) create function f1(a int) returns int DETERMINISTIC return (a+1); create view v9 as select * from (select c1, f1(c2), sum(c3) from t1 group by c1, f1(c2)) as f (c1, c2, c3); +--disable_replay next_query Don't support SPs. explain format=json select * from v9 where (c3+1) > 10 and c1 > 1 and c2 > 123; drop function f1; diff --git a/mysql-test/main/derived_cond_pushdown.test b/mysql-test/main/derived_cond_pushdown.test index 70d5582e62c5a..102041f7ba8fd 100644 --- a/mysql-test/main/derived_cond_pushdown.test +++ b/mysql-test/main/derived_cond_pushdown.test @@ -2410,10 +2410,12 @@ select col2, col1 from v1 union all select col2, col1 from v2; +--disable_replay next_query Don't support SPs explain select * from v3 where col1=123; --echo # This must use ref accesses for reading table t1, not full scans: --source include/explain-no-costs.inc +--disable_replay next_query Don't support SPs explain format=json select * from v3 where col1=123 and col2=321; @@ -2438,6 +2440,7 @@ select a, f1(a), sum(b) from t1 group by a, f1(a); --echo # "a2>123" will be pushed into HAVING (as it refers to an SP call which --echo # prevents pushing it to the WHERE) --source include/explain-no-costs.inc +--disable_replay next_query Don't support SPs explain format=json select * from v2 where (s+1) > 10 AND a > 1 and a2>123; @@ -2451,6 +2454,7 @@ insert into t4 select a,a,a from t1; --echo # "attached_condition": "t1.a + 1 > 10", --echo # "having_condition": "`f1(a)` > 1 and `sum(b)` > 123", --source include/explain-no-costs.inc +--disable_replay next_query Don't support SPs explain format=json select * from t4 diff --git a/mysql-test/main/desc_index_range.test b/mysql-test/main/desc_index_range.test index 36f4c406dc39c..b158353bee65b 100644 --- a/mysql-test/main/desc_index_range.test +++ b/mysql-test/main/desc_index_range.test @@ -14,6 +14,7 @@ create table t1 ( insert into t1 select seq from seq_1_to_1000; set optimizer_trace=1; +--disable_replay next_query Need to preserve optimizer trace explain select * from t1 force index(a) where a in (2, 4, 6); #Enable after fix MDEV-32034 @@ -39,6 +40,7 @@ create table t1 ( insert into t1 select A.seq, B.seq*10 from seq_1_to_10 A, seq_1_to_10 B; set optimizer_trace=1; +--disable_replay next_query Need to preserve optimizer trace explain select * from t1 force index(ab) where a>=8 and b>=50; #Enable after fix MDEV-32034 --disable_view_protocol @@ -46,6 +48,7 @@ select json_detailed(json_extract(trace, '$**.range_access_plan.ranges')) as jd from information_schema.optimizer_trace; --enable_view_protocol +--disable_replay next_query Need to preserve optimizer trace explain select * from t1 force index(ab) where a>=8 and b<=50; #Enable after fix MDEV-32034 --disable_view_protocol @@ -56,6 +59,7 @@ from information_schema.optimizer_trace; select * from t1 force index(ab) where a>=8 and b<=50; select * from t1 ignore index(ab) where a>=8 and b<=50 order by a, b desc; +--disable_replay next_query Need to preserve optimizer trace explain select * from t1 where a between 2 and 4 and b between 50 and 80; #Enable after fix MDEV-32034 @@ -75,6 +79,7 @@ create table t2 ( ); insert into t2 select A.seq, B.seq*10 from seq_1_to_10 A, seq_1_to_10 B; +--disable_replay next_query Need to preserve optimizer trace explain select * from t2 where a between 2 and 4; #Enable after fix MDEV-32034 @@ -83,6 +88,7 @@ select json_detailed(json_extract(trace, '$**.range_access_plan.ranges')) as jd from information_schema.optimizer_trace; --enable_view_protocol +--disable_replay next_query Need to preserve optimizer trace explain select * from t2 where a between 2 and 4 and b between 50 and 80; #Enable after fix MDEV-32034 @@ -99,6 +105,7 @@ drop table t2; --echo # CREATE TABLE t1 (p int NOT NULL, a int NOT NULL, PRIMARY KEY (p,a desc)); insert into t1 select 2,seq from seq_0_to_1000; +--disable_replay next_query Need to preserve optimizer trace EXPLAIN select MIN(a) from t1 where p = 2 group by p; #Enable after fix MDEV-32034 --disable_view_protocol diff --git a/mysql-test/main/distinct_notembedded.test b/mysql-test/main/distinct_notembedded.test index 84d39f975a6e0..f4125ca204410 100644 --- a/mysql-test/main/distinct_notembedded.test +++ b/mysql-test/main/distinct_notembedded.test @@ -45,6 +45,7 @@ eval $trace; SELECT AVG(DISTINCT b / a) FROM t1; eval $trace; +--disable_replay next_query Need to preserve optimizer trace EXPLAIN SELECT COUNT(DISTINCT (SELECT a)) FROM t1; eval $trace; diff --git a/mysql-test/main/explain_json.test b/mysql-test/main/explain_json.test index 0d34b95070426..56ecf3c7d3f27 100644 --- a/mysql-test/main/explain_json.test +++ b/mysql-test/main/explain_json.test @@ -141,6 +141,8 @@ select * from t1 where a in (select max(a) from t1 group by b); create table t2 like t1; insert into t2 select * from t1; --source include/explain-no-costs.inc + +--disable_replay next_query Will try to create table for sequence engine explain format=json select * from t1,t2 where t1.a in ( select seq+0 from seq_1_to_100); diff --git a/mysql-test/main/extra_server_example.result b/mysql-test/main/extra_server_example.result new file mode 100644 index 0000000000000..0c6c39cb46a9e --- /dev/null +++ b/mysql-test/main/extra_server_example.result @@ -0,0 +1,34 @@ +include/start_extra_server.inc [server 1] +Executing: perl MYSQL_TEST_DIR/lib/start_extra_server.pl 1 +Copying MYSQLTEST_VARDIR/install.db to MYSQLTEST_VARDIR/extra_server_1/data... +Starting mysqld on port PORT with socket SOCKET... +Command: MYSQLD --no-defaults --datadir=MYSQLTEST_VARDIR/extra_server_1/data --port=PORT --socket=SOCKET --pid-file=MYSQLTEST_VARDIR/extra_server_1/mysqld.pid --log-error=MYSQLTEST_VARDIR/log/extra_server_1.err --skip-networking=0 --skip-grant-tables --default-storage-engine=myisam --loose-skip-innodb --key-buffer-size=1M --sort-buffer-size=256K --max-heap-table-size=1M +Server process started with PID PID +Waiting for server to be ready... +Socket file created: SOCKET +Extra server 1 started successfully +Connection info written to MYSQLTEST_VARDIR/tmp/extra_server_1.info + Host: 127.0.0.1 + Port: PORT + Socket: SOCKET + Datadir: MYSQLTEST_VARDIR/extra_server_1/data +Extra server 1 connection info: + PORT=PORT + SOCKET=SOCKET +include/start_extra_server.inc [server 1] +status +Connected to extra server +CREATE TABLE t1 (id INT); +INSERT INTO t1 VALUES (1), (2), (3); +SELECT * FROM t1; +id +1 +2 +3 +status +On default server +include/stop_extra_server.inc [server 1] +Stopping extra server 1 (PID: PID)... +Server process PID has exited +Extra server 1 stopped +include/stop_extra_server.inc [server 1] diff --git a/mysql-test/main/extra_server_example.test.DISABLED b/mysql-test/main/extra_server_example.test.DISABLED new file mode 100644 index 0000000000000..e11be32432872 --- /dev/null +++ b/mysql-test/main/extra_server_example.test.DISABLED @@ -0,0 +1,22 @@ +# Test starting an extra server instance +--source include/not_embedded.inc + +# Start extra server +--let $extra_server_num= 1 +--source include/start_extra_server.inc + +# Connect to it +--connect (extra1, 127.0.0.1, root, , test, $EXTRA_SERVER_PORT) +SELECT "Connected to extra server" AS status; +CREATE TABLE t1 (id INT); +INSERT INTO t1 VALUES (1), (2), (3); +SELECT * FROM t1; + +# Back to default +--connection default +SELECT "On default server" AS status; + +# Stop extra server +--disconnect extra1 +--let $extra_server_num= 1 +--source include/stop_extra_server.inc diff --git a/mysql-test/main/greedy_optimizer.test b/mysql-test/main/greedy_optimizer.test index 34b5a571682b5..365058b01f07d 100644 --- a/mysql-test/main/greedy_optimizer.test +++ b/mysql-test/main/greedy_optimizer.test @@ -1,5 +1,7 @@ --source include/have_innodb.inc +--disable_replay testfile Uses Last_query_cost and counters for most queries. + # # A simple test of the greedy query optimization algorithm and the switches that # controls the optimization process. diff --git a/mysql-test/main/having.test b/mysql-test/main/having.test index 98b5ed851fbf7..32fdbed5cc0f2 100644 --- a/mysql-test/main/having.test +++ b/mysql-test/main/having.test @@ -898,6 +898,7 @@ CREATE TABLE t1 (t INT, u INT, KEY(t)); INSERT INTO t1 VALUES(10, 10), (11, 11), (12, 12), (12, 13),(14, 15), (15, 16), (16, 17), (17, 17); ANALYZE TABLE t1; +--disable_replay next_query Don't support SPs explain SELECT t, next_seq_value() r FROM t1 FORCE INDEX(t) GROUP BY t HAVING r = 1 ORDER BY t1.u; --disable_ps2_protocol SELECT t, next_seq_value() r FROM t1 FORCE INDEX(t) GROUP BY t HAVING r = 1 ORDER BY t1.u; diff --git a/mysql-test/main/having_cond_pushdown.test b/mysql-test/main/having_cond_pushdown.test index faf6e51192163..25da708bf00f5 100644 --- a/mysql-test/main/having_cond_pushdown.test +++ b/mysql-test/main/having_cond_pushdown.test @@ -255,6 +255,7 @@ GROUP BY t1.a HAVING (t1.a>1) AND (a=test.f1()); eval $no_pushdown $query; eval $query; +--disable_replay next_query Dont support SPs eval explain $query; --source include/explain-no-costs.inc eval explain format=json $query; @@ -264,6 +265,7 @@ FROM t1 WHERE (t1.a>1) GROUP BY t1.a HAVING (a=test.f1()); +--disable_replay next_query Dont support SPs --source include/explain-no-costs.inc eval $no_pushdown explain format=json $query; diff --git a/mysql-test/main/index_merge_innodb_notembedded.test b/mysql-test/main/index_merge_innodb_notembedded.test index eb38108562f35..78bb6471dd003 100644 --- a/mysql-test/main/index_merge_innodb_notembedded.test +++ b/mysql-test/main/index_merge_innodb_notembedded.test @@ -24,6 +24,7 @@ INSERT INTO t1(id,title,status,country_code) SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500; --echo # This must not use index_merge: +--disable_replay next_query Need to preserve optimizer trace EXPLAIN SELECT * FROM t1 WHERE country_code ='C1' and `status` =1; set @trace= (select JSON_EXTRACT(trace, '$**.range_scan_alternatives[*]') @@ -50,6 +51,7 @@ INSERT INTO t1(id,title,status,country_code) SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500; --echo # Must not use index_merge: +--disable_replay next_query Need to preserve optimizer trace EXPLAIN SELECT * FROM t1 WHERE country_code ='C1' and status = 1; @@ -75,6 +77,7 @@ INSERT INTO t1(id,title,status,country_code) SELECT seq, CONCAT('abc', seq), seq%10, CONCAT('C', seq%5) FROM seq_1_to_500; --echo # Must not use index_merge: +--disable_replay next_query Need to preserve optimizer trace EXPLAIN SELECT * FROM t1 WHERE country_code ='C1' and status = 1; diff --git a/mysql-test/main/join.test b/mysql-test/main/join.test index efdbf6724d963..1413d25aedca6 100644 --- a/mysql-test/main/join.test +++ b/mysql-test/main/join.test @@ -663,6 +663,7 @@ flush status; --disable_ps2_protocol select * from t1, t2, t3 where t3.a=t1.a and t2.a=t1.b; --enable_ps2_protocol +--disable_replay testfile Need to preserve counters explain select * from t1, t2, t3 where t3.a=t1.a and t2.a=t1.b; --echo We expect rnd_next=5, and read_key must be 0 because of short-cutting: show status like 'Handler_read%'; @@ -695,6 +696,7 @@ insert into t1 values (0),(1),(2),(3),(4),(5),(6),(7),(8),(9); create table t2 (a int, b int, primary key(a)); insert into t2 select @v:=A.a+10*B.a, @v from t1 A, t1 B; +--disable_replay testfile Need to preserve counters explain select * from t1; show status like 'Last_query_cost'; select 'The cost of accessing t1 (dont care if it changes' '^'; @@ -703,6 +705,7 @@ select 'vv: Following query must use ALL(t1), eq_ref(A), eq_ref(B): vv' Z; select @@myisam.optimizer_disk_read_ratio; set global myisam.optimizer_disk_read_ratio=0; +--disable_replay testfile Need to preserve counters explain select * from t1, t2 A, t2 B where A.a = t1.a and B.a=A.b; show status like 'Last_query_cost'; select '^^: The above should be ~= 40 + cost(select * from t1). Value less than 40 is an error' Z; diff --git a/mysql-test/main/join_cache_cardinality.test b/mysql-test/main/join_cache_cardinality.test index b178810b80764..9a0e9bb52fba8 100644 --- a/mysql-test/main/join_cache_cardinality.test +++ b/mysql-test/main/join_cache_cardinality.test @@ -1,5 +1,7 @@ --source include/have_sequence.inc +--disable_replay testfile Uses Optimizer Trace for most queries. + # Embedded doesn't have optimizer trace: --source include/not_embedded.inc diff --git a/mysql-test/main/join_outer.test b/mysql-test/main/join_outer.test index 6686d3e939ec9..e6d74e6be6dfe 100644 --- a/mysql-test/main/join_outer.test +++ b/mysql-test/main/join_outer.test @@ -1906,6 +1906,7 @@ SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) FROM t t1 LEFT JOIN t t2 ON t1.x = t2.x WHERE IFNULL(t2.x,0)=0; +--disable_replay next_query Don't support SPs. explain extended SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) FROM t t1 LEFT JOIN t t2 @@ -1915,6 +1916,7 @@ SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) FROM t t1 LEFT JOIN t t2 ON t1.x = t2.x WHERE f(t2.x,0)=0; +--disable_replay next_query Don't support SPs. explain extended SELECT t1.x, t2.x, IFNULL(t2.x,0), f(t2.x,0) FROM t t1 LEFT JOIN t t2 @@ -1969,6 +1971,7 @@ SELECT t1.col1, t2.col1, t2.col3 FROM t1 LEFT OUTER JOIN t2 ON t1.col1 = t2.col2 WHERE f1(t2.col3,0) = 0; eval $q2; +--disable_replay next_query Don't support SPs. eval EXPLAIN EXTENDED $q2; DROP FUNCTION f1; diff --git a/mysql-test/main/opt_context_load_stats_basic.result b/mysql-test/main/opt_context_load_stats_basic.result index f6250af5faa45..054dc827ed7aa 100644 --- a/mysql-test/main/opt_context_load_stats_basic.result +++ b/mysql-test/main/opt_context_load_stats_basic.result @@ -32,10 +32,10 @@ context, '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); set @records= -(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@records, -'$[*]' columns(num_of_records text path '$')) as jt; -num_of_records +'$[*]' columns(file_stat_records text path '$')) as jt; +file_stat_records 20 set @file_stat_records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); @@ -129,7 +129,7 @@ from information_schema.optimizer_context); select JSON_EQUALS(@saved_opt_context_1, @opt_context); JSON_EQUALS(@saved_opt_context_1, @opt_context) 0 -set @records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +set @records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); set @indexes=(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.indexes'))); set @list_ranges= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.list_ranges'))); set @saved_records_2 = @records; @@ -138,8 +138,8 @@ set @saved_list_ranges_2 = @list_ranges; select JSON_EQUALS(@saved_records_2, @saved_records_1); JSON_EQUALS(@saved_records_2, @saved_records_1) 0 -select * from json_table(@records, '$[*]' columns(num_of_records text path '$')) as jt; -num_of_records +select * from json_table(@records, '$[*]' columns(file_stat_records text path '$')) as jt; +file_stat_records 40 select JSON_EQUALS(@saved_indexes_2, @saved_indexes_1); JSON_EQUALS(@saved_indexes_2, @saved_indexes_1) @@ -240,7 +240,7 @@ from information_schema.optimizer_context); select JSON_EQUALS(@saved_opt_context_1, @opt_context); JSON_EQUALS(@saved_opt_context_1, @opt_context) 0 -set @records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +set @records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); set @indexes=(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.indexes'))); set @list_ranges= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.list_ranges'))); select JSON_EQUALS(@saved_records_2, @records); @@ -273,7 +273,7 @@ set @opt_context= context, '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); -set @records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +set @records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); set @indexes=(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.indexes'))); set @list_ranges= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.list_ranges'))); select JSON_EQUALS(@saved_records_2, @records); @@ -343,63 +343,63 @@ set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].name'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "name" element not present at offset 1409. +Warning 4253 Failed to parse saved optimizer context: "name" element not present at offset 1387. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].ddl'); select * from t1 where a > 10; a b Warnings: Warning 4254 Failed to match the stats from replay context with the optimizer stats: the given list of ranges i.e. [(10) < (a), ] doesn't exist in the list of ranges for table_name db1.t1 and index_name t1_idx_a Warning 4254 Failed to match the stats from replay context with the optimizer stats: the given list of ranges i.e. [(10) < (a), ] doesn't exist in the list of ranges for table_name db1.t1 and index_name t1_idx_ab -set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].num_of_records'); +set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].file_stat_records'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "num_of_records" element not present at offset 1405. +Warning 4253 Failed to parse saved optimizer context: "file_stat_records" element not present at offset 1380. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].file_stat_records'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "file_stat_records" element not present at offset 1402. +Warning 4253 Failed to parse saved optimizer context: "file_stat_records" element not present at offset 1380. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].indexes[0].index_name'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "index_name" element not present at offset 166. +Warning 4253 Failed to parse saved optimizer context: "index_name" element not present at offset 144. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].indexes[0].rec_per_key'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "rec_per_key" element not present at offset 170. +Warning 4253 Failed to parse saved optimizer context: "rec_per_key" element not present at offset 148. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_ranges[0].index_name'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "index_name" element not present at offset 643. +Warning 4253 Failed to parse saved optimizer context: "index_name" element not present at offset 621. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_ranges[0].ranges'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "ranges" element not present at offset 635. +Warning 4253 Failed to parse saved optimizer context: "ranges" element not present at offset 613. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_ranges[0].num_rows'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "num_rows" element not present at offset 653. +Warning 4253 Failed to parse saved optimizer context: "num_rows" element not present at offset 631. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_ranges[0].cost'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "cost" element not present at offset 437. +Warning 4253 Failed to parse saved optimizer context: "cost" element not present at offset 415. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_ranges[0].max_index_blocks'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "max_index_blocks" element not present at offset 646. +Warning 4253 Failed to parse saved optimizer context: "max_index_blocks" element not present at offset 624. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_ranges[0].max_row_blocks'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "max_row_blocks" element not present at offset 648. +Warning 4253 Failed to parse saved optimizer context: "max_row_blocks" element not present at offset 626. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].indexes[0]'); select * from t1 where a > 10; a b @@ -418,6 +418,9 @@ select * from t1 where a > 10; a b Warnings: Warning 4254 Failed to match the stats from replay context with the optimizer stats: db1.t1 doesn't exist in list of table contexts +Warning 4254 Failed to match the stats from replay context with the optimizer stats: db1.t1.t1_idx_a doesn't exist in list of index contexts +Warning 4254 Failed to match the stats from replay context with the optimizer stats: db1.t1.t1_idx_b doesn't exist in list of index contexts +Warning 4254 Failed to match the stats from replay context with the optimizer stats: db1.t1.t1_idx_ab doesn't exist in list of index contexts Warning 4254 Failed to match the stats from replay context with the optimizer stats: db1.t1 doesn't exist in list of table contexts Warning 4254 Failed to match the stats from replay context with the optimizer stats: db1.t1.t1_idx_a doesn't exist in list of range contexts Warning 4254 Failed to match the stats from replay context with the optimizer stats: db1.t1.t1_idx_ab doesn't exist in list of range contexts @@ -438,51 +441,51 @@ set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_inde select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "key_number" element not present at offset 535. +Warning 4253 Failed to parse saved optimizer context: "key_number" element not present at offset 513. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].num_records'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "num_records" element not present at offset 534. +Warning 4253 Failed to parse saved optimizer context: "num_records" element not present at offset 512. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].eq_ref'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "eq_ref" element not present at offset 539. +Warning 4253 Failed to parse saved optimizer context: "eq_ref" element not present at offset 517. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].index_cost_io'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "index_cost_io" element not present at offset 532. +Warning 4253 Failed to parse saved optimizer context: "index_cost_io" element not present at offset 510. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].index_cost_cpu'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "index_cost_cpu" element not present at offset 521. +Warning 4253 Failed to parse saved optimizer context: "index_cost_cpu" element not present at offset 499. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].row_cost_io'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "row_cost_io" element not present at offset 534. +Warning 4253 Failed to parse saved optimizer context: "row_cost_io" element not present at offset 512. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].row_cost_cpu'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "row_cost_cpu" element not present at offset 523. +Warning 4253 Failed to parse saved optimizer context: "row_cost_cpu" element not present at offset 501. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].max_index_blocks'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "max_index_blocks" element not present at offset 529. +Warning 4253 Failed to parse saved optimizer context: "max_index_blocks" element not present at offset 507. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].max_row_blocks'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "max_row_blocks" element not present at offset 531. +Warning 4253 Failed to parse saved optimizer context: "max_row_blocks" element not present at offset 509. set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].list_index_read_costs[0].copy_cost'); select * from t1 where a > 10; a b Warnings: -Warning 4253 Failed to parse saved optimizer context: "copy_cost" element not present at offset 536. +Warning 4253 Failed to parse saved optimizer context: "copy_cost" element not present at offset 514. drop table t1; drop database db1; diff --git a/mysql-test/main/opt_context_load_stats_basic.test b/mysql-test/main/opt_context_load_stats_basic.test index 2233e2b7c9f8a..323648fe625cd 100644 --- a/mysql-test/main/opt_context_load_stats_basic.test +++ b/mysql-test/main/opt_context_load_stats_basic.test @@ -1,6 +1,9 @@ --source include/not_embedded.inc --source include/have_sequence.inc --echo #enable optimizer_record_context + +--disable_replay testfile Don't replay a replay test + set optimizer_record_context=ON; set @saved_opt_context_var_name_1= 'saved_opt_context_1'; set @saved_opt_context_var_name_2= 'saved_opt_context_2'; @@ -76,7 +79,7 @@ select * from t1 where a < 3 and b > 6; select JSON_EQUALS(@saved_opt_context_1, @opt_context); -set @records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +set @records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); set @indexes=(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.indexes'))); set @list_ranges= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.list_ranges'))); @@ -85,7 +88,7 @@ set @saved_indexes_2 = @indexes; set @saved_list_ranges_2 = @list_ranges; select JSON_EQUALS(@saved_records_2, @saved_records_1); -select * from json_table(@records, '$[*]' columns(num_of_records text path '$')) as jt; +select * from json_table(@records, '$[*]' columns(file_stat_records text path '$')) as jt; select JSON_EQUALS(@saved_indexes_2, @saved_indexes_1); select JSON_EQUALS(@saved_list_ranges_2, @saved_list_ranges_1); select * from json_table( @@ -150,7 +153,7 @@ select * from t1 where a < 3 and b > 6; select JSON_EQUALS(@saved_opt_context_1, @opt_context); -set @records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +set @records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); set @indexes=(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.indexes'))); set @list_ranges= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.list_ranges'))); @@ -171,7 +174,7 @@ select * from t1 where a < 3 and b > 6; --source include/get_opt_context.inc -set @records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +set @records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); set @indexes=(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.indexes'))); set @list_ranges= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.list_ranges'))); @@ -220,7 +223,7 @@ select * from t1 where a > 10; set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].ddl'); select * from t1 where a > 10; -set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].num_of_records'); +set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].file_stat_records'); select * from t1 where a > 10; set @opt_context=json_remove(@saved_opt_context_1, '$.list_contexts[0].file_stat_records'); diff --git a/mysql-test/main/opt_context_replay_basic.result b/mysql-test/main/opt_context_replay_basic.result index 29107d5a98cbb..f344e36364330 100644 --- a/mysql-test/main/opt_context_replay_basic.result +++ b/mysql-test/main/opt_context_replay_basic.result @@ -121,6 +121,9 @@ CREATE DATABASE IF NOT EXISTS db1; Warnings: Note 1007 Can't create database 'db1'; database exists USE db1; +DROP TABLE IF EXISTS db1.t1; +Warnings: +Note 1051 Unknown table 'db1.t1' CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL, @@ -133,7 +136,6 @@ set @opt_context=' "list_contexts": [ { "name": "db1.t1", - "num_of_records": 0, "file_stat_records": 20, "read_cost_io": 0, "read_cost_cpu": 0.0100356, @@ -156,6 +158,8 @@ set @opt_context=' } '; set optimizer_replay_context='opt_context'; +SET character_set_client=latin1; +SET NAMES latin1 COLLATE latin1_swedish_ci; select count(*) from t1; count(*) 0 @@ -231,5 +235,72 @@ EXPLAIN ] } } +set optimizer_replay_context=''; +drop table t1; +# +# MDEV-39435: Server crash : Assertion `table_records || !head->file->stats.records' failed +# +CREATE TABLE t1 (a INT, PRIMARY KEY(a)); +INSERT INTO t1 VALUES (1),(2),(3); +set optimizer_record_context=ON; +EXPLAIN SELECT * FROM t1 WHERE a IN +((SELECT MAX(a) FROM t1), (SELECT MAX(a) FROM t1)); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t1 range PRIMARY PRIMARY 4 NULL 1 Using where; Using index +3 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away +2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Select tables optimized away +select context into dumpfile "../../tmp/dump1.sql" +from information_schema.optimizer_context; +set optimizer_record_context=OFF; +drop table t1; +drop table t1; +# +# MDEV-39409: Context replay doesnt handle MIN/MAX optimization +# +CREATE TABLE t1 (a int PRIMARY KEY, b int); +INSERT INTO t1 VALUES (2,20), (3,10), (1,10), (0,30), (5,10); +set optimizer_record_context=1; +EXPLAIN SELECT MAX(a) FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away +select context into dumpfile "../../tmp/dump1.sql" +from information_schema.optimizer_context; +set optimizer_record_context=0; +drop table t1; +set optimizer_replay_context='opt_context'; +# Same query as above, must have same explain: +EXPLAIN SELECT MAX(a) FROM t1; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL Select tables optimized away +set optimizer_replay_context=''; +drop table t1; +# +# MDEV-39412: Failed to parse saved optimizer context: error reading ranges value +# +set optimizer_record_context=0; +CREATE TABLE t1( +a VARCHAR(8), +b VARCHAR(8), +KEY(A), +KEY(B) +); +INSERT INTO t1 SELECT REPEAT('a',8), REPEAT('b',8) FROM seq_1_to_10; +set optimizer_record_context=1; +EXPLAIN +SELECT * FROM t1 FORCE INDEX(a,b) WHERE a LIKE 'a%' OR b LIKE 'b%' +ORDER BY a,b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge a,b a,b 35,35 NULL 10 Using sort_union(a,b); Using where; Using filesort +select context into dumpfile "../../tmp/dump1.sql" +from information_schema.optimizer_context; +drop table t1; +set optimizer_replay_context='opt_context'; +# Same query as above, must have same explain: +EXPLAIN +SELECT * FROM t1 FORCE INDEX(a,b) WHERE a LIKE 'a%' OR b LIKE 'b%' +ORDER BY a,b; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 index_merge a,b a,b 35,35 NULL 10 Using sort_union(a,b); Using where; Using filesort +set optimizer_replay_context=''; drop table t1; drop database db1; diff --git a/mysql-test/main/opt_context_replay_basic.test b/mysql-test/main/opt_context_replay_basic.test index fa1d6fba3c8c0..2575bd4f92802 100644 --- a/mysql-test/main/opt_context_replay_basic.test +++ b/mysql-test/main/opt_context_replay_basic.test @@ -4,6 +4,8 @@ --echo #enable optimizer_record_context set optimizer_record_context=ON; +--disable_replay testfile Dont replay a replay test + create database db1; use db1; @@ -66,5 +68,93 @@ set optimizer_replay_context='opt_context'; --echo # Same query as above, must have same explain: EXPLAIN FORMAT=JSON SELECT * FROM t1 WHERE a = 1; +--remove_file "$MYSQLTEST_VARDIR/tmp/dump1.sql" +set optimizer_replay_context=''; +drop table t1; + +--echo # +--echo # MDEV-39435: Server crash : Assertion `table_records || !head->file->stats.records' failed +--echo # +CREATE TABLE t1 (a INT, PRIMARY KEY(a)); +INSERT INTO t1 VALUES (1),(2),(3); + +set optimizer_record_context=ON; +EXPLAIN SELECT * FROM t1 WHERE a IN + ((SELECT MAX(a) FROM t1), (SELECT MAX(a) FROM t1)); +select context into dumpfile "../../tmp/dump1.sql" +from information_schema.optimizer_context; +set optimizer_record_context=OFF; +drop table t1; + +--disable_query_log +--disable_result_log +--source "$MYSQLTEST_VARDIR/tmp/dump1.sql" +--enable_query_log +--enable_result_log + +--remove_file "$MYSQLTEST_VARDIR/tmp/dump1.sql" +drop table t1; + +--echo # +--echo # MDEV-39409: Context replay doesnt handle MIN/MAX optimization +--echo # +CREATE TABLE t1 (a int PRIMARY KEY, b int); +INSERT INTO t1 VALUES (2,20), (3,10), (1,10), (0,30), (5,10); + +set optimizer_record_context=1; +EXPLAIN SELECT MAX(a) FROM t1; +select context into dumpfile "../../tmp/dump1.sql" +from information_schema.optimizer_context; +set optimizer_record_context=0; +drop table t1; +--disable_query_log +--disable_result_log +--source "$MYSQLTEST_VARDIR/tmp/dump1.sql" +--enable_query_log +--enable_result_log +set optimizer_replay_context='opt_context'; +--echo # Same query as above, must have same explain: +EXPLAIN SELECT MAX(a) FROM t1; + +set optimizer_replay_context=''; +--remove_file "$MYSQLTEST_VARDIR/tmp/dump1.sql" drop table t1; + +--echo # +--echo # MDEV-39412: Failed to parse saved optimizer context: error reading ranges value +--echo # +set optimizer_record_context=0; + +CREATE TABLE t1( + a VARCHAR(8), + b VARCHAR(8), + KEY(A), + KEY(B) +); +INSERT INTO t1 SELECT REPEAT('a',8), REPEAT('b',8) FROM seq_1_to_10; + +set optimizer_record_context=1; +EXPLAIN +SELECT * FROM t1 FORCE INDEX(a,b) WHERE a LIKE 'a%' OR b LIKE 'b%' +ORDER BY a,b; + +select context into dumpfile "../../tmp/dump1.sql" +from information_schema.optimizer_context; +drop table t1; + +--disable_query_log +--disable_result_log +--source "$MYSQLTEST_VARDIR/tmp/dump1.sql" +--enable_query_log +--enable_result_log +set optimizer_replay_context='opt_context'; +--echo # Same query as above, must have same explain: +EXPLAIN +SELECT * FROM t1 FORCE INDEX(a,b) WHERE a LIKE 'a%' OR b LIKE 'b%' +ORDER BY a,b; + +set optimizer_replay_context=''; +--remove_file "$MYSQLTEST_VARDIR/tmp/dump1.sql" +drop table t1; + drop database db1; diff --git a/mysql-test/main/opt_context_replay_innodb_comp.result b/mysql-test/main/opt_context_replay_innodb_comp.result index fa52d874df565..07bd83d0484c7 100644 --- a/mysql-test/main/opt_context_replay_innodb_comp.result +++ b/mysql-test/main/opt_context_replay_innodb_comp.result @@ -106,6 +106,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -228,6 +230,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -345,6 +349,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -466,6 +472,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -591,6 +599,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -712,6 +722,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -818,6 +830,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -914,6 +928,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -992,6 +1008,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -1079,6 +1097,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -1164,6 +1184,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -1248,6 +1270,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -1357,6 +1381,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -1491,6 +1517,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { diff --git a/mysql-test/main/opt_context_replay_innodb_pref.result b/mysql-test/main/opt_context_replay_innodb_pref.result index dd5d733f2ab7d..cd0f952e75464 100644 --- a/mysql-test/main/opt_context_replay_innodb_pref.result +++ b/mysql-test/main/opt_context_replay_innodb_pref.result @@ -106,6 +106,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -228,6 +230,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -345,6 +349,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -466,6 +472,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -591,6 +599,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -712,6 +722,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -818,6 +830,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -914,6 +928,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -992,6 +1008,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -1079,6 +1097,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -1164,6 +1184,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -1248,6 +1270,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -1357,6 +1381,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { @@ -1491,6 +1517,8 @@ Warnings: Warning 4200 The setting 'optimizer_adjust_secondary_key_costs' is ignored. It only exists for compatibility with old installations and will be removed in a future release Warnings: Note 1007 Can't create database 'db1'; database exists +Warnings: +Note 1051 Unknown table 'db1.t1' EXPLAIN { "query_block": { diff --git a/mysql-test/main/opt_context_store_ddls.result b/mysql-test/main/opt_context_store_ddls.result index d005edb966128..ebc2ead308b29 100644 --- a/mysql-test/main/opt_context_store_ddls.result +++ b/mysql-test/main/opt_context_store_ddls.result @@ -145,11 +145,13 @@ CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci; +DROP TABLE IF EXISTS db1.t1; CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci; +DROP VIEW IF EXISTS db1.view1; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW db1.view1 AS (select `db1`.`t1`.`a` AS `a`,`db1`.`t1`.`b` AS `b`,`db1`.`t2`.`a` AS `c` from (`db1`.`t1` join `db1`.`t2`) where `db1`.`t1`.`a` = `db1`.`t2`.`a`); @@ -180,6 +182,7 @@ CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci; +DROP TABLE IF EXISTS db1.temp1; CREATE TEMPORARY TABLE `temp1` ( `col1` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci; @@ -241,11 +244,13 @@ CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci; +DROP TABLE IF EXISTS db1.t1; CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci; +DROP VIEW IF EXISTS db1.view1; CREATE ALGORITHM=UNDEFINED DEFINER=`root`@`localhost` SQL SECURITY DEFINER VIEW db1.view1 AS (select `db1`.`t1`.`a` AS `a`,`db1`.`t1`.`b` AS `b`,`db1`.`t2`.`a` AS `c` from (`db1`.`t1` join `db1`.`t2`) where `db1`.`t1`.`a` = `db1`.`t2`.`a`); @@ -350,6 +355,7 @@ CREATE TABLE `t2` ( `a` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci; +DROP TABLE IF EXISTS db1.t1; CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL @@ -391,6 +397,7 @@ CREATE TABLE `db2`.`t1` ( `a` int(11) DEFAULT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci; +DROP TABLE IF EXISTS db1.t1; CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL @@ -430,6 +437,7 @@ CREATE TABLE `t1` ( CREATE DATABASE IF NOT EXISTS db1; +DROP TABLE IF EXISTS db1.t1; CREATE TABLE `db1`.`t1` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL @@ -507,8 +515,10 @@ CREATE TABLE `t2` ( PRIMARY KEY (`a`) ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci; +SET STATEMENT sql_mode=REPLACE(REPLACE(@@sql_mode,'STRICT_ALL_TABLES',''),'STRICT_TRANS_TABLES','') FOR REPLACE INTO db1.t2(a, b) VALUES (1, 1); +DROP TABLE IF EXISTS db1.t1; CREATE TABLE `t1` ( `a` int(11) DEFAULT NULL, `b` int(11) DEFAULT NULL @@ -670,6 +680,7 @@ CREATE TABLE `t2` ( KEY `fk_id` (`id`) ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci; +DROP TABLE IF EXISTS db1.t1; CREATE TABLE `t1` ( `id` int(11) NOT NULL AUTO_INCREMENT, `name` varchar(10) DEFAULT NULL, @@ -709,6 +720,7 @@ CREATE TABLE `t2` ( `id2` int(11) NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci; +DROP TABLE IF EXISTS db1.t1; CREATE TABLE `t1` ( `id1` int(11) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id1`) @@ -740,6 +752,7 @@ CREATE TABLE `t2` ( `id2` int(11) NOT NULL ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_uca1400_ai_ci; +DROP TABLE IF EXISTS db1.t1; CREATE TABLE `t1` ( `id1` int(11) NOT NULL AUTO_INCREMENT, PRIMARY KEY (`id1`) diff --git a/mysql-test/main/opt_context_store_ddls.test b/mysql-test/main/opt_context_store_ddls.test index 0e393c8b72d0c..1b1eb351a0194 100644 --- a/mysql-test/main/opt_context_store_ddls.test +++ b/mysql-test/main/opt_context_store_ddls.test @@ -3,6 +3,8 @@ --source include/have_partition.inc --source include/no_view_protocol.inc +--disable_replay testfile Don't replay a replay test + set optimizer_record_context=ON; show variables like 'optimizer_record_context'; diff --git a/mysql-test/main/opt_context_store_stats.result b/mysql-test/main/opt_context_store_stats.result index 569e74319629b..e1db299dadf42 100644 --- a/mysql-test/main/opt_context_store_stats.result +++ b/mysql-test/main/opt_context_store_stats.result @@ -40,11 +40,11 @@ context, '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); set @records= -(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@records, -'$[*]' columns(num_of_records text path '$')) as jt; -num_of_records -0 +'$[*]' columns(file_stat_records text path '$')) as jt; +file_stat_records +20 set @file_stat_records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@file_stat_records, @@ -85,10 +85,10 @@ context, '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); set @records= -(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@records, -'$[*]' columns(num_of_records text path '$')) as jt; -num_of_records +'$[*]' columns(file_stat_records text path '$')) as jt; +file_stat_records 30 20 set @file_stat_records= @@ -136,10 +136,10 @@ context, '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); set @records= -(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@records, -'$[*]' columns(num_of_records text path '$')) as jt; -num_of_records +'$[*]' columns(file_stat_records text path '$')) as jt; +file_stat_records set @file_stat_records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@file_stat_records, @@ -173,10 +173,10 @@ context, '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); set @records= -(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@records, -'$[*]' columns(num_of_records text path '$')) as jt; -num_of_records +'$[*]' columns(file_stat_records text path '$')) as jt; +file_stat_records 30 20 set @file_stat_records= @@ -225,10 +225,10 @@ context, '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); set @records= -(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@records, -'$[*]' columns(num_of_records text path '$')) as jt; -num_of_records +'$[*]' columns(file_stat_records text path '$')) as jt; +file_stat_records 20 set @file_stat_records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); @@ -268,10 +268,10 @@ context, '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); set @records= -(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@records, -'$[*]' columns(num_of_records text path '$')) as jt; -num_of_records +'$[*]' columns(file_stat_records text path '$')) as jt; +file_stat_records 30 20 set @file_stat_records= @@ -323,10 +323,10 @@ context, '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); set @records= -(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@records, -'$[*]' columns(num_of_records text path '$')) as jt; -num_of_records +'$[*]' columns(file_stat_records text path '$')) as jt; +file_stat_records 50 set @file_stat_records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); @@ -371,10 +371,10 @@ context, '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); set @records= -(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@records, -'$[*]' columns(num_of_records text path '$')) as jt; -num_of_records +'$[*]' columns(file_stat_records text path '$')) as jt; +file_stat_records 50 set @file_stat_records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); @@ -425,10 +425,10 @@ context, '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); set @records= -(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@records, -'$[*]' columns(num_of_records text path '$')) as jt; -num_of_records +'$[*]' columns(file_stat_records text path '$')) as jt; +file_stat_records set @file_stat_records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@file_stat_records, @@ -464,10 +464,10 @@ context, '(?<=set @opt_context=\')([\n\r].*)*(?=\'\;#opt_context_ends)') from information_schema.optimizer_context); set @records= -(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.num_of_records'))); +(select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); select *from json_table(@records, -'$[*]' columns(num_of_records text path '$')) as jt; -num_of_records +'$[*]' columns(file_stat_records text path '$')) as jt; +file_stat_records 50 set @file_stat_records= (select JSON_DETAILED(JSON_EXTRACT(@opt_context, '$**.file_stat_records'))); @@ -534,6 +534,7 @@ select @const_table_inserts; @const_table_inserts REPLACE INTO db1.t1(a, b) VALUES (5, 0); +SET STATEMENT sql_mode=REPLACE(REPLACE(@@sql_mode,'STRICT_ALL_TABLES',''),'STRICT_TRANS_TABLES','') FOR REPLACE INTO db1.t1(a, b) VALUES (4, 4); REPLACE INTO mysql.table_stats VALUES ('db1', 't1', 20); diff --git a/mysql-test/main/opt_context_store_stats.test b/mysql-test/main/opt_context_store_stats.test index 4c25c0f95a44d..f34af00f11cf4 100644 --- a/mysql-test/main/opt_context_store_stats.test +++ b/mysql-test/main/opt_context_store_stats.test @@ -1,6 +1,9 @@ --source include/not_embedded.inc --source include/have_sequence.inc --echo #enable optimizer_record_context + +--disable_replay testfile Don't replay a replay test + set optimizer_record_context=ON; create database db1; diff --git a/mysql-test/main/opt_trace.test b/mysql-test/main/opt_trace.test index 4c8fc52ae2965..6818086e6b468 100644 --- a/mysql-test/main/opt_trace.test +++ b/mysql-test/main/opt_trace.test @@ -1,6 +1,8 @@ --source include/not_embedded.inc --source include/have_sequence.inc +--disable_replay testfile Need to preserve optimizer trace + SELECT table_name, column_name FROM information_schema.columns where table_name="OPTIMIZER_TRACE"; set optimizer_trace="enabled=on"; show variables like 'optimizer_trace'; diff --git a/mysql-test/main/opt_trace_index_merge.test b/mysql-test/main/opt_trace_index_merge.test index 3cc54f5342867..d2b05cd9bf2d7 100644 --- a/mysql-test/main/opt_trace_index_merge.test +++ b/mysql-test/main/opt_trace_index_merge.test @@ -1,4 +1,5 @@ --source include/not_embedded.inc +--disable_replay testfile Need to preserve optimizer trace set @tmp_opt_switch= @@optimizer_switch; set optimizer_switch='index_merge_sort_intersection=on'; set optimizer_trace='enabled=on'; diff --git a/mysql-test/main/opt_trace_index_merge_innodb.test b/mysql-test/main/opt_trace_index_merge_innodb.test index 3b8e60f687e39..9d841eb74342d 100644 --- a/mysql-test/main/opt_trace_index_merge_innodb.test +++ b/mysql-test/main/opt_trace_index_merge_innodb.test @@ -32,6 +32,7 @@ analyze table t1; set optimizer_trace="enabled=on"; set @tmp_index_merge_ror_cpk=@@optimizer_switch; set optimizer_switch='extended_keys=off'; +--disable_replay next_query Need to preserve optimizer trace explain select * from t1 where pk1 != 0 and key1 = 1; --disable_view_protocol select * from information_schema.OPTIMIZER_TRACE; diff --git a/mysql-test/main/opt_trace_ucs2.test b/mysql-test/main/opt_trace_ucs2.test index 7a26eefc4d6de..0b6b9b142c26c 100644 --- a/mysql-test/main/opt_trace_ucs2.test +++ b/mysql-test/main/opt_trace_ucs2.test @@ -1,6 +1,8 @@ --source include/not_embedded.inc --source include/have_ucs2.inc +--disable_replay testfile Need to preserve optimizer trace + create or replace table t1 (col1 char(10) character set ucs2, filler char(100), key(col1)) ; insert into t1 values ('a', 'a'); insert into t1 values ('a', 'a'); diff --git a/mysql-test/main/optimizer_costs.test b/mysql-test/main/optimizer_costs.test index bd7e89a58cc01..39d5ddc9af82c 100644 --- a/mysql-test/main/optimizer_costs.test +++ b/mysql-test/main/optimizer_costs.test @@ -147,12 +147,14 @@ create table t1 (l_orderkey int(11) NOT NULL, l_suppkey int(11) DEFAULT NULL, PRIMARY KEY (l_orderkey)) engine=aria; insert into t1 select seq,seq,seq from seq_1_to_1000; +--disable_replay next_query Will try to create table for sequence engine explain select straight_join count(*) from seq_1_to_10000,t1 where seq=l_orderkey; show status like "last_query_cost"; set @org_cost=@@aria.optimizer_key_next_find_cost; # Set cost for t1 high so that we cannot use it for index scans set global aria.optimizer_key_next_find_cost=1000; flush tables; +--disable_replay next_query Will try to create table for sequence engine explain select count(*) from seq_1_to_10000,t1 where seq=l_orderkey; show status like "last_query_cost"; set global aria.optimizer_key_next_find_cost=@org_cost; diff --git a/mysql-test/main/order_by_sortkey.test b/mysql-test/main/order_by_sortkey.test index 1fb18aa0e0111..0cac888f420f0 100644 --- a/mysql-test/main/order_by_sortkey.test +++ b/mysql-test/main/order_by_sortkey.test @@ -59,6 +59,7 @@ FLUSH STATUS; SHOW SESSION STATUS LIKE 'Sort%'; --disable_ps2_protocol +--disable_replay next_query Needs to preserve counters explain SELECT * FROM t1 ORDER BY f2 LIMIT 100; SELECT * FROM t1 ORDER BY f2 LIMIT 100; --enable_ps2_protocol diff --git a/mysql-test/main/partition_explicit_prune.test b/mysql-test/main/partition_explicit_prune.test index bd42af70aa2a0..300b235dc79c4 100644 --- a/mysql-test/main/partition_explicit_prune.test +++ b/mysql-test/main/partition_explicit_prune.test @@ -258,6 +258,7 @@ eval $get_handler_status_counts; EXPLAIN PARTITIONS SELECT * FROM t1 PARTITION (pNonexistent); EXPLAIN PARTITIONS SELECT * FROM t1 PARTITION (subp2); FLUSH STATUS; +--disable_replay testfile Need to preserve counters EXPLAIN PARTITIONS SELECT * FROM t1 PARTITION (subp2,pNeg) AS TableAlias; eval $get_handler_status_counts; --echo # 8 locks (1 ha_partition + 3 ha_innobase) x 2 (lock/unlock) diff --git a/mysql-test/main/ps.test b/mysql-test/main/ps.test index ed213f1da7761..6b803c0b5202e 100644 --- a/mysql-test/main/ps.test +++ b/mysql-test/main/ps.test @@ -4989,6 +4989,7 @@ CREATE TABLE t1 (c int); CREATE TABLE t2 (d int); --echo # EXPLAIN EXTENDED in regular way (not PS mode) +--disable_replay testfile Need to preserve warnings EXPLAIN EXTENDED SELECT (SELECT 1 FROM t2 WHERE d = c) FROM t1; SHOW WARNINGS; diff --git a/mysql-test/main/range_notembedded.test b/mysql-test/main/range_notembedded.test index a13bb1d95c79a..7bb9abd01079e 100644 --- a/mysql-test/main/range_notembedded.test +++ b/mysql-test/main/range_notembedded.test @@ -8,6 +8,8 @@ drop table if exists t1,t2; --enable_warnings +--disable_replay testfile Need to preserve optimizer trace + --echo # --echo # MDEV-21958: Query having many NOT-IN clauses running forever --echo # diff --git a/mysql-test/main/replay_server_test.result b/mysql-test/main/replay_server_test.result new file mode 100644 index 0000000000000..75b113ee7c708 --- /dev/null +++ b/mysql-test/main/replay_server_test.result @@ -0,0 +1,89 @@ +REPLAY_SERVER_SOCKET is NOT set +CREATE TABLE t1 (a INT, b INT, KEY(a)); +INSERT INTO t1 VALUES (1,1), (2,2), (3,3); +analyze table t1; +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +EXPLAIN FORMAT=JSON SELECT * FROM t1 WHERE a = 1; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "cost": 0.002024411, + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "ref", + "possible_keys": ["a"], + "key": "a", + "key_length": "5", + "used_key_parts": ["a"], + "ref": ["const"], + "loops": 1, + "rows": 1, + "cost": 0.002024411, + "filtered": 100 + } + } + ] + } +} +EXPLAIN FORMAT=JSON +SELECT * FROM t1 WHERE a < 100; +EXPLAIN +{ + "query_block": { + "select_id": 1, + "cost": 0.005042291, + "nested_loop": [ + { + "table": { + "table_name": "t1", + "access_type": "range", + "possible_keys": ["a"], + "key": "a", + "key_length": "5", + "used_key_parts": ["a"], + "loops": 1, + "rows": 3, + "cost": 0.005042291, + "filtered": 100, + "index_condition": "t1.a < 100" + } + } + ] + } +} +SELECT * FROM t1 WHERE a < 100; +a b +1 1 +2 2 +3 3 +explain +SELECT * FROM t1 WHERE a < 22; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 range a a 5 NULL 3 Using index condition +explain extended +SELECT * FROM t1 WHERE a < 22; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 range a a 5 NULL 3 100.00 Using index condition +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b` from `test`.`t1` where `test`.`t1`.`a` < 22 +explain +SELECT * FROM t1 WHERE b < 22; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where +explain extended select LENGTH(CONCAT('aa','bbb')); +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE NULL NULL NULL NULL NULL NULL NULL NULL No tables used +Warnings: +Note 1003 select octet_length(concat('aa','bbb')) AS `LENGTH(CONCAT('aa','bbb'))` +create function add1(i int) returns int deterministic +return i+1; +explain select * from t1 where b< add1(b); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 3 Using where +drop function add1; +DROP TABLE t1; diff --git a/mysql-test/main/replay_server_test.test b/mysql-test/main/replay_server_test.test new file mode 100644 index 0000000000000..41163cc332940 --- /dev/null +++ b/mysql-test/main/replay_server_test.test @@ -0,0 +1,77 @@ +# Test that replay server is started and REPLAY_SERVER_SOCKET is set +--source include/not_embedded.inc + +--perl +if ($ENV{REPLAY_SERVER_SOCKET}) { + print "REPLAY_SERVER_SOCKET is set: $ENV{REPLAY_SERVER_SOCKET}\n"; +} else { + print "REPLAY_SERVER_SOCKET is NOT set\n"; +} +EOF + +# Try to connect to the replay server if socket is set +--perl +if ($ENV{REPLAY_SERVER_SOCKET}) { + my $socket = $ENV{REPLAY_SERVER_SOCKET}; + print "Checking socket: $socket\n"; + print "Socket exists: ", (-e $socket ? "YES" : "NO"), "\n"; + print "Is socket: ", (-S $socket ? "YES" : "NO"), "\n"; + + # Check if PID is set and process is running + if ($ENV{REPLAY_SERVER_PID}) { + my $pid = $ENV{REPLAY_SERVER_PID}; + print "Replay server PID: $pid\n"; + my $running = kill 0, $pid; + print "Process running: ", ($running ? "YES" : "NO"), "\n"; + } + + # List files in the socket directory + my $socket_dir = $socket; + $socket_dir =~ s{/[^/]+$}{}; + print "Socket directory: $socket_dir\n"; + if (-d $socket_dir) { + opendir(my $dh, $socket_dir); + my @files = grep { !/^\./ } readdir($dh); + closedir($dh); + print "Files in socket dir: ", join(", ", @files), "\n"; + } +} +EOF + +# Test ReplayTest mode with EXPLAIN FORMAT=JSON +CREATE TABLE t1 (a INT, b INT, KEY(a)); +INSERT INTO t1 VALUES (1,1), (2,2), (3,3); +analyze table t1; + +# This should trigger ReplayTest mode if REPLAY_SERVER_SOCKET is set +EXPLAIN FORMAT=JSON SELECT * FROM t1 WHERE a = 1; + +EXPLAIN FORMAT=JSON +SELECT * FROM t1 WHERE a < 100; + +SELECT * FROM t1 WHERE a < 100; + +explain +SELECT * FROM t1 WHERE a < 22; + +explain extended +SELECT * FROM t1 WHERE a < 22; + +explain +SELECT * FROM t1 WHERE b < 22; + +explain extended select LENGTH(CONCAT('aa','bbb')); + +# +# Check if disable replay works +# +create function add1(i int) returns int deterministic + return i+1; + +# The following would give this error: +# ReplayTest: Query error: FUNCTION test.add1 does not exist +--disable_replay next_query Don't support SPs. +explain select * from t1 where b< add1(b); + +drop function add1; +DROP TABLE t1; diff --git a/mysql-test/main/sargable_casefold_notembedded.test b/mysql-test/main/sargable_casefold_notembedded.test index b04e25f1cac22..6502c5bb6e8ec 100644 --- a/mysql-test/main/sargable_casefold_notembedded.test +++ b/mysql-test/main/sargable_casefold_notembedded.test @@ -6,6 +6,7 @@ --source include/not_embedded.inc --source include/have_sequence.inc +--disable_replay testfile Need to preserve optimizer trace create table t1 ( col1 varchar(32), diff --git a/mysql-test/main/select.test b/mysql-test/main/select.test index ca248b26b9109..2013bfa3516f1 100644 --- a/mysql-test/main/select.test +++ b/mysql-test/main/select.test @@ -4144,6 +4144,7 @@ SET @cnt := 0; SELECT * FROM t1 WHERE a = f1(); --enable_ps2_protocol SELECT @cnt; +--disable_replay next_query Don't support SPs EXPLAIN EXTENDED SELECT * FROM t1 WHERE a = f1(); DROP TABLE t1, t2; DROP FUNCTION f1; diff --git a/mysql-test/main/selectivity_notembedded.test b/mysql-test/main/selectivity_notembedded.test index ed2e25b8028fe..8ac0c2a1f421b 100644 --- a/mysql-test/main/selectivity_notembedded.test +++ b/mysql-test/main/selectivity_notembedded.test @@ -96,6 +96,7 @@ flush tables; set @tmp=@@optimizer_trace; set optimizer_trace=1; +--disable_replay next_query Need to preserve optimizer trace explain select * from t10 where a in (91303); #Enable after fix MDEV-32034 @@ -240,6 +241,7 @@ from analyze table t1; set optimizer_trace=1; +--disable_replay next_query Need to preserve optimizer trace explain select * from t1 where pk in (1,2,3,4,5) and diff --git a/mysql-test/main/sp.test b/mysql-test/main/sp.test index 773f44d5dfa5f..c178ff8b2988d 100644 --- a/mysql-test/main/sp.test +++ b/mysql-test/main/sp.test @@ -2,6 +2,8 @@ --source include/have_partition.inc --source include/have_sequence.inc +--disable_replay testfile Don't support SPs. + # # Basic stored PROCEDURE tests # diff --git a/mysql-test/main/spatial_utility_function_geohash.test b/mysql-test/main/spatial_utility_function_geohash.test index 4d40e6302eedc..6d5ace7101ccf 100644 --- a/mysql-test/main/spatial_utility_function_geohash.test +++ b/mysql-test/main/spatial_utility_function_geohash.test @@ -632,6 +632,7 @@ SELECT ST_ASTEXT(ST_POINTFROMGEOHASH(hash_value,0)) FROM geohashes; --echo # Test create table from SELECT statement CREATE TABLE t1 AS SELECT ST_POINTFROMGEOHASH("0123", 4326); +--disable_replay next_query This is EXPLAIN TABLE. EXPLAIN t1; DROP TABLE t1; diff --git a/mysql-test/main/status.test b/mysql-test/main/status.test index a3087d9c9c055..912b945e5e5fd 100644 --- a/mysql-test/main/status.test +++ b/mysql-test/main/status.test @@ -210,12 +210,14 @@ INSERT INTO t1 VALUES (1), (2); SELECT a FROM t1 LIMIT 1; SHOW SESSION STATUS LIKE 'Last_query_cost'; +--disable_replay next_query Needs to preserve counters EXPLAIN SELECT a FROM t1; SHOW SESSION STATUS LIKE 'Last_query_cost'; SELECT a FROM t1 UNION SELECT a FROM t1 ORDER BY a; SHOW SESSION STATUS LIKE 'Last_query_cost'; +--disable_replay next_query Needs to preserve counters EXPLAIN SELECT a FROM t1 UNION SELECT a FROM t1 ORDER BY a; SHOW SESSION STATUS LIKE 'Last_query_cost'; diff --git a/mysql-test/main/subselect4.test b/mysql-test/main/subselect4.test index a0fece967ca3e..668823a5a1970 100644 --- a/mysql-test/main/subselect4.test +++ b/mysql-test/main/subselect4.test @@ -2657,6 +2657,7 @@ create table t3 as select * from t1; analyze table t1,t2,t3; select @@expensive_subquery_limit < 200 as DEFAULTS_ARE_SUITABLE; flush status; +--disable_replay testfile Need to preserve counter explain select * from t1 where a<3 or (select max(a) from t2) in (select b from t3); --echo # Must show 0. If this shows 200, this means subquery was executed and you have a bug: show status like 'Handler_read_rnd_next%'; diff --git a/mysql-test/main/subselect_sj_mat.test b/mysql-test/main/subselect_sj_mat.test index 3dcef80b9ffe8..a4fa04fffd099 100644 --- a/mysql-test/main/subselect_sj_mat.test +++ b/mysql-test/main/subselect_sj_mat.test @@ -2323,6 +2323,7 @@ select * from t1 where f2(t1.id, '1980-01-01') in (select id from t2 where x=1); eval $q2; +--disable_replay next_query Don't support SPs eval explain extended $q2; let $q3= @@ -2332,6 +2333,7 @@ select t1.*, t4.*, where f2(t1.id, '1980-01-01') in (select id from t2 where x=1); eval $q3; +--disable_replay next_query Don't support SPs eval explain extended $q3; drop function f1; diff --git a/mysql-test/main/symlink-aria-11902.test b/mysql-test/main/symlink-aria-11902.test index a2a266cbb2544..35f95ec76367f 100644 --- a/mysql-test/main/symlink-aria-11902.test +++ b/mysql-test/main/symlink-aria-11902.test @@ -1,6 +1,7 @@ # # MDEV-11902 mi_open race condition # +--disable_replay testfile Don't support symlink tables source include/have_maria.inc; set default_storage_engine=Aria; source symlink-myisam-11902.test; diff --git a/mysql-test/main/symlink-myisam-11902.test b/mysql-test/main/symlink-myisam-11902.test index c782ef44ec54f..c1a2556078591 100644 --- a/mysql-test/main/symlink-myisam-11902.test +++ b/mysql-test/main/symlink-myisam-11902.test @@ -1,6 +1,7 @@ # # MDEV-11902 mi_open race condition # +--disable_replay testfile Don't support symlink tables source include/have_debug_sync.inc; source include/have_symlink.inc; source include/not_windows.inc; diff --git a/mysql-test/main/tmp_table_count-7586.test b/mysql-test/main/tmp_table_count-7586.test index ff93a25039f70..fdcbe931a7b58 100644 --- a/mysql-test/main/tmp_table_count-7586.test +++ b/mysql-test/main/tmp_table_count-7586.test @@ -3,6 +3,8 @@ --source include/have_perfschema.inc --source include/not_embedded.inc +--disable_replay testfile Need to preserve counters + # Tests will be skipped for the view protocol because the view protocol creates # an additional util connection and other statistics data -- source include/no_view_protocol.inc diff --git a/mysql-test/main/udf.test b/mysql-test/main/udf.test index 7d5712beb9fe5..95767a508dd8e 100644 --- a/mysql-test/main/udf.test +++ b/mysql-test/main/udf.test @@ -5,6 +5,7 @@ # the library are. # +--disable_replay testfile Don't support UDFs --disable_warnings drop table if exists t1; diff --git a/mysql-test/main/user_var.test b/mysql-test/main/user_var.test index 3141490c7a17c..add31bb693f8a 100644 --- a/mysql-test/main/user_var.test +++ b/mysql-test/main/user_var.test @@ -519,6 +519,7 @@ eval select $tmp < $tmp2; # # MDEV-13897 SELECT @a := MAX(col) FROM t requires full index scan # +--disable_replay next_query Will try to create table for sequence engine explain select @a:=max(seq) from seq_1_to_1000000; # End of 10.1 tests diff --git a/mysql-test/mariadb-test-run.pl b/mysql-test/mariadb-test-run.pl index c3816654d08fe..e1988279b968e 100755 --- a/mysql-test/mariadb-test-run.pl +++ b/mysql-test/mariadb-test-run.pl @@ -150,6 +150,11 @@ BEGIN our @global_suppressions; +# Forward declarations for variables referenced in END block +our $opt_replay_server; +our $opt_replay_server_manual; +our $replay_server_parent_pid; # PID of process that started the replay server + END { if ( defined $opt_tmpdir_pid and $opt_tmpdir_pid == $$ ) { @@ -164,6 +169,15 @@ END mtr_warning("tmpdir $opt_tmpdir should be removed after the server has finished"); } } + + # Ensure replay server is stopped on any exit path (success or failure). + # Only run in the parent process that started it; safe to call even if + # already stopped (stop_replay_server clears REPLAY_SERVER_PID). + if (defined $replay_server_parent_pid and $replay_server_parent_pid == $$ + and ($opt_replay_server || $opt_replay_server_manual)) + { + eval { stop_replay_server(); }; + } } sub env_or_val($$) { defined $ENV{$_[0]} ? $ENV{$_[0]} : $_[1] } @@ -281,6 +295,9 @@ END our $opt_gprof; our %gprof_dirs; +# $opt_replay_server and $opt_replay_server_manual are declared earlier in the +# file for the END block. See the "Forward declarations" comment near the top. + my $config; # The currently running config my $current_config_name; # The currently running config file template @@ -425,6 +442,30 @@ sub main { { mysql_install_db(default_mysqld(), "$opt_vardir/install.db"); make_readonly("$opt_vardir/install.db"); + + # Start replay server if --replay-server option is specified. + # Refuse if --parallel > 1 was explicitly requested; the replay server is + # a single shared instance and cannot serve multiple concurrent workers. + # (The "auto" case is resolved later and re-checked below.) + if ( $opt_replay_server || $opt_replay_server_manual ) + { + if ($opt_parallel ne "auto" && $opt_parallel > 1) + { + mtr_error("--replay-server / --replay-server-manual cannot be used " . + "together with --parallel > 1 (parallel=$opt_parallel). " . + "The replay server is a single shared instance and cannot " . + "serve multiple concurrent workers. " . + "Re-run with --parallel=1."); + } + } + if ( $opt_replay_server ) + { + start_replay_server(); + } + elsif ( $opt_replay_server_manual ) + { + start_replay_server_manual(); + } } if ($opt_dry_run) { @@ -469,6 +510,14 @@ sub main { $opt_parallel= 1; } + if ($opt_parallel > 1 && ($opt_replay_server || $opt_replay_server_manual)) { + mtr_error("--replay-server and --replay-server-manual cannot be used " . + "together with --parallel > 1 (parallel=$opt_parallel). " . + "The replay server is a single shared instance and cannot " . + "serve multiple concurrent workers. " . + "Re-run with --parallel=1."); + } + # Create server socket on any free port my $server = new IO::Socket::INET ( @@ -596,6 +645,9 @@ sub main { remove_vardir_subs() if $opt_clean_vardir; + # Stop replay server if it was started + stop_replay_server() if ($opt_replay_server || $opt_replay_server_manual); + exit(0); } @@ -1283,6 +1335,8 @@ sub command_line_setup { 'skip-test-list=s' => \@opt_skip_test_list, 'xml-report=s' => \$opt_xml_report, 'open-files-limit=i', => \$opt_open_files_limit, + 'replay-server' => \$opt_replay_server, + 'replay-server-manual' => \$opt_replay_server_manual, My::Debugger::options(), My::CoreDump::options(), @@ -3031,6 +3085,445 @@ sub initialize_servers { } +sub _install_replay_server_signal_handlers { + # Ensure END block (which stops the replay server) runs on common + # termination signals. Perl END blocks don't run on uncaught signals; + # installing handlers that call exit() lets them run. + for my $sig (qw(INT TERM HUP)) { + $SIG{$sig} = sub { exit(1); }; + } +} + + +# +# Shared PID file so worker-process restarts of the replay server are visible +# to the parent's stop logic (END block). +# +sub _replay_pid_file { + return "$opt_vardir/tmp/replay_server.current_pid"; +} + +sub _write_replay_pid_file { + my ($pid) = @_; + return unless defined $pid; + my $path = _replay_pid_file(); + if (open my $fh, '>', $path) { + print $fh "$pid\n"; + close $fh; + } else { + mtr_warning("Could not write replay pid file $path: $!"); + } +} + +sub _read_replay_pid_file { + my $path = _replay_pid_file(); + return undef unless -f $path; + open my $fh, '<', $path or return undef; + my $pid = <$fh>; + close $fh; + return undef unless defined $pid; + chomp $pid; + return ($pid =~ /^\d+$/) ? $pid : undef; +} + +# +# Ping the replay server with SELECT '' AS next_testcase, enforcing +# a 5-second timeout. Returns 1 on success, 0 on failure/timeout. +# +sub _ping_replay_server { + my ($test_name) = @_; + my $sock = $ENV{REPLAY_SERVER_SOCKET}; + return 0 unless $sock && -S $sock; + + my $mysql_exe = mtr_exe_maybe_exists("$path_client_bindir/mariadb"); + return 0 unless $mysql_exe && -x $mysql_exe; + + # Escape single quotes in test name for SQL. + my $escaped = $test_name; + $escaped =~ s/'/''/g; + my $sql = "SELECT '$escaped' AS next_testcase"; + + my $pid = fork(); + if (!defined $pid) { + mtr_warning("fork() failed in _ping_replay_server: $!"); + return 0; + } + if ($pid == 0) { + # Child: run the client, redirect output to /dev/null, exec. + open(STDIN, '<', '/dev/null'); + open(STDOUT, '>', '/dev/null'); + open(STDERR, '>', '/dev/null'); + exec($mysql_exe, + "--no-defaults", + "--protocol=socket", + "--socket=$sock", + "--user=root", + "--connect-timeout=3", + "-N", "-B", + "-e", $sql) + or POSIX::_exit(127); + } + + # Parent: wait up to 5 seconds. + my $status; + my $timed_out = 0; + eval { + local $SIG{ALRM} = sub { die "timeout\n" }; + alarm(5); + waitpid($pid, 0); + $status = $?; + alarm(0); + }; + if ($@) { + # Timeout. + alarm(0); + $timed_out = 1; + kill 'KILL', $pid; + waitpid($pid, 0); + } + return 0 if $timed_out; + return ($status == 0) ? 1 : 0; +} + +# +# Before each test, verify that the replay server is alive and responsive. +# If not, kill the stale process (if any) and restart the server (for +# --replay-server) or wait for the user to restart it (--replay-server-manual). +# +sub check_replay_server { + my ($test_name) = @_; + return unless ($opt_replay_server || $opt_replay_server_manual); + return unless $ENV{REPLAY_SERVER_SOCKET}; + + return if _ping_replay_server($test_name); + + print STDERR "mysql-test-run: *** WARNING: Replay server unresponsive " . + "before test '$test_name'\n"; + + # Kill any stale process from the old PID. + my $old_pid = _read_replay_pid_file() // $ENV{REPLAY_SERVER_PID}; + if (defined $old_pid && $old_pid =~ /^\d+$/ && kill(0, $old_pid)) { + print STDERR "mysql-test-run: killing stale replay server (pid $old_pid)\n"; + kill 'TERM', $old_pid; + my $waited = 0; + while ($waited < 5 && kill(0, $old_pid)) { + sleep 1; + $waited++; + } + if (kill 0, $old_pid) { + kill 'KILL', $old_pid; + sleep 1; + } + } + + # Remove stale socket / pid / info so restart can succeed. + my $dir = "$opt_vardir/extra_server_1"; + unlink "$dir/mysqld.sock" if -e "$dir/mysqld.sock"; + unlink "$dir/mysqld.pid" if -e "$dir/mysqld.pid"; + unlink "$opt_vardir/tmp/extra_server_1.info" + if -f "$opt_vardir/tmp/extra_server_1.info"; + + if ($opt_replay_server) { + print STDERR "mysql-test-run: restarting replay server...\n"; + start_replay_server(); + _write_replay_pid_file($ENV{REPLAY_SERVER_PID}); + print STDERR "mysql-test-run: replay server restarted (pid " . + ($ENV{REPLAY_SERVER_PID} // "?") . ")\n"; + } else { + # --replay-server-manual: can't auto-restart. Wait for the user. + print STDERR "mysql-test-run: --replay-server-manual is set; " . + "waiting for you to restart the replay server on socket " . + "$ENV{REPLAY_SERVER_SOCKET} ...\n"; + while (!_ping_replay_server($test_name)) { + sleep 2; + } + # Try to refresh the PID from the pid file written by the user's server. + my $pid_file = "$opt_vardir/extra_server_1/mysqld.pid"; + if (-f $pid_file && open my $fh, '<', $pid_file) { + my $new_pid = <$fh>; + close $fh; + chomp $new_pid if defined $new_pid; + if (defined $new_pid && $new_pid =~ /^\d+$/) { + $ENV{REPLAY_SERVER_PID} = $new_pid; + _write_replay_pid_file($new_pid); + } + } + print STDERR "mysql-test-run: replay server is responsive again, " . + "continuing with test '$test_name'\n"; + } +} + + +sub start_replay_server { + mtr_report("Starting replay server..."); + $replay_server_parent_pid = $$; + _install_replay_server_signal_handlers(); + + my $replay_server_num = 1; + my $script = "$glob_mysql_test_dir/lib/start_extra_server.pl"; + + unless (-f $script) { + mtr_error("Replay server script not found: $script"); + } + + # Set required environment variables for the script + # (environment_setup() has already been called, so most are already set) + $ENV{MYSQLTEST_VARDIR} = $opt_vardir unless $ENV{MYSQLTEST_VARDIR}; + $ENV{MASTER_MYPORT} = $baseport unless $ENV{MASTER_MYPORT}; + $ENV{MYSQL_TEST_DIR} = $glob_mysql_test_dir unless $ENV{MYSQL_TEST_DIR}; + # MYSQLD should already be set by environment_setup(), but set it if not + $ENV{MYSQLD} = find_mysqld($basedir) unless $ENV{MYSQLD}; + + # Use a custom socket path in the replay server directory to avoid cleanup + my $replay_socket = "$opt_vardir/extra_server_$replay_server_num/mysqld.sock"; + + # Call the start_extra_server.pl script with custom socket + my $cmd = "perl $script $replay_server_num '' $replay_socket"; + my $result = system($cmd); + + if ($result != 0) { + mtr_error("Failed to start replay server (exit code: $result)"); + } + + # Read connection info + my $info_file = "$opt_vardir/tmp/extra_server_$replay_server_num.info"; + unless (-f $info_file) { + mtr_error("Replay server info file not found: $info_file"); + } + + open my $fh, '<', $info_file or mtr_error("Cannot read $info_file: $!"); + my %info; + while (<$fh>) { + chomp; + if (/^(\w+)=(.+)/) { + $info{$1} = $2; + } + } + close $fh; + + # Store for cleanup and export to environment + $ENV{REPLAY_SERVER_SOCKET} = $info{SOCKET}; + $ENV{REPLAY_SERVER_PID} = $info{PID}; + _write_replay_pid_file($info{PID}); + + mtr_report("Replay server started on socket: $info{SOCKET}"); +} + + +sub start_replay_server_manual { + mtr_report("Starting replay server in manual mode..."); + $replay_server_parent_pid = $$; + _install_replay_server_signal_handlers(); + + my $replay_server_num = 1; + + # Set required environment variables + $ENV{MYSQLTEST_VARDIR} = $opt_vardir unless $ENV{MYSQLTEST_VARDIR}; + $ENV{MASTER_MYPORT} = $baseport unless $ENV{MASTER_MYPORT}; + $ENV{MYSQL_TEST_DIR} = $glob_mysql_test_dir unless $ENV{MYSQL_TEST_DIR}; + $ENV{MYSQLD} = find_mysqld($basedir) unless $ENV{MYSQLD}; + + my $mysqld = $ENV{MYSQLD}; + die "mysqld binary not found at $mysqld\n" unless -x $mysqld; + + # Calculate paths + my $base_port = $ENV{MASTER_MYPORT} || 10000; + my $port = $base_port + 10 + $replay_server_num; + my $socket = "$opt_vardir/extra_server_$replay_server_num/mysqld.sock"; + my $server_dir = "$opt_vardir/extra_server_$replay_server_num"; + my $datadir = "$server_dir/data"; + my $install_db = "$opt_vardir/install.db"; + my $pid_file = "$server_dir/mysqld.pid"; + my $log_file = "$opt_vardir/log/extra_server_$replay_server_num.err"; + my $general_log_file = "$opt_vardir/log/extra_server_$replay_server_num.log"; + + die "install.db not found at $install_db\n" unless -d $install_db; + + # Create server directory + use File::Path qw(make_path remove_tree); + make_path($server_dir) unless -d $server_dir; + make_path("$opt_vardir/log") unless -d "$opt_vardir/log"; + + # Prepare datadir + if (-d $datadir) { + mtr_report("Removing existing datadir: $datadir"); + remove_tree($datadir); + } + + mtr_report("Copying $install_db to $datadir..."); + system("cp", "-a", $install_db, $datadir) == 0 + or die "Failed to copy $install_db to $datadir: $!\n"; + + # Ensure proper permissions + system("chmod", "-R", "u+rwX", $datadir) == 0 + or warn "Warning: Failed to set permissions on $datadir\n"; + + # Build command line + my @mysqld_args = ( + $mysqld, + "--no-defaults", + "--datadir=$datadir", + "--port=$port", + "--socket=$socket", + "--pid-file=$pid_file", + "--log-error=$log_file", + "--general-log=1", + "--general-log-file=$general_log_file", + "--skip-networking=0", + "--skip-grant-tables", + "--key-buffer-size=1M", + "--sort-buffer-size=256K", + "--max-heap-table-size=1M", + "--gdb", + ); + + # Write a gdb init file so the user can run: + # gdb -x var/tmp/gdbinit-replay + # The file contains a single "set args ..." line with all mysqld arguments + # (excluding the mysqld binary itself, which gdb takes separately). + my $gdbinit_file = "$opt_vardir/tmp/gdbinit-replay"; + mtr_tofile($gdbinit_file, + "set args " . join(" ", @mysqld_args[1 .. $#mysqld_args]) . "\n"); + + # Print command line for user + mtr_report("=" x 70); + mtr_report("REPLAY SERVER MANUAL MODE"); + mtr_report("=" x 70); + mtr_report(""); + mtr_report("Please start the replay server with the following command:"); + mtr_report(""); + mtr_report(join(" \\\n ", @mysqld_args)); + mtr_report(""); + mtr_report("Or run under gdb:"); + mtr_report("gdb --args " . join(" \\\n ", @mysqld_args)); + mtr_report(""); + mtr_report("gdb init file written to: $gdbinit_file"); + mtr_report(" gdb -x $gdbinit_file $mysqld"); + mtr_report(""); + mtr_report("Waiting for socket file to appear: $socket"); + mtr_report("(Timeout: 300 seconds)"); + mtr_report("=" x 70); + + # Wait for socket file to appear + my $max_wait = 300; # 5 minutes + my $waited = 0; + my $last_msg = 0; + + while ($waited < $max_wait) { + if (-S $socket) { + mtr_report("Socket file detected: $socket"); + last; + } + + sleep 1; + $waited++; + + # Print progress every 10 seconds + if ($waited - $last_msg >= 10) { + mtr_report("Still waiting for socket... ($waited seconds elapsed)"); + $last_msg = $waited; + } + } + + if ($waited >= $max_wait) { + die "Timeout waiting for replay server socket to appear: $socket\n"; + } + + # Give server a moment to be fully ready + sleep 2; + + # Detect PID + my $pid; + + # Try to read from pid file + if (-f $pid_file) { + open my $fh, '<', $pid_file or warn "Cannot read $pid_file: $!\n"; + if ($fh) { + $pid = <$fh>; + chomp $pid if defined $pid; + close $fh; + } + } + + # Fallback: use lsof to find process using the socket + if (!$pid || !kill(0, $pid)) { + my $lsof_output = `lsof -t $socket 2>/dev/null`; + chomp $lsof_output if $lsof_output; + $pid = $lsof_output if $lsof_output && $lsof_output =~ /^\d+$/; + } + + # Fallback: use fuser + if (!$pid || !kill(0, $pid)) { + my $fuser_output = `fuser $socket 2>/dev/null`; + if ($fuser_output && $fuser_output =~ /(\d+)/) { + $pid = $1; + } + } + + if (!$pid || !kill(0, $pid)) { + die "Could not detect PID of replay server. Please check if it's running.\n"; + } + + # Store for cleanup and export to environment + $ENV{REPLAY_SERVER_SOCKET} = $socket; + $ENV{REPLAY_SERVER_PID} = $pid; + _write_replay_pid_file($pid); + + mtr_report("Replay server detected with PID: $pid"); + mtr_report("Socket: $socket"); + mtr_report("Replay server is ready!"); +} + + +sub stop_replay_server { + return unless ($opt_replay_server || $opt_replay_server_manual); + # Prefer the PID from the shared file (it may be fresher than our env var + # if a worker restarted the server). + my $pid = _read_replay_pid_file() // $ENV{REPLAY_SERVER_PID}; + return unless $pid; + + mtr_report("Stopping replay server..."); + + # Send SIGTERM. The replay server is NOT a direct child of this process + # (start_extra_server.pl is an intermediate), so waitpid() cannot be used + # to poll its exit status. Use `kill 0, $pid` (signal 0 - existence check) + # instead. + if (kill 0, $pid) { + kill 'TERM', $pid; + + # Wait for process to exit (up to 10 seconds) + my $max_wait = 10; + my $waited = 0; + while ($waited < $max_wait && kill(0, $pid)) { + sleep 1; + $waited++; + } + + # Force kill if still running + if (kill 0, $pid) { + kill 'KILL', $pid; + # Wait briefly for KILL to take effect + my $kill_waited = 0; + while ($kill_waited < 3 && kill(0, $pid)) { + sleep 1; + $kill_waited++; + } + } + } + + # Cleanup info file and shared pid file + my $info_file = "$opt_vardir/tmp/extra_server_1.info"; + unlink $info_file if -f $info_file; + my $pid_file = _replay_pid_file(); + unlink $pid_file if -f $pid_file; + + # Mark as stopped so subsequent calls (e.g. from END block) are no-ops + delete $ENV{REPLAY_SERVER_PID}; + + mtr_report("Replay server stopped"); +} + + # # Remove all newline characters expect after semicolon # @@ -3842,6 +4335,9 @@ ($$) $ENV{'MTR_TEST_NAME'} = $tinfo->{name}; resfile_report_test($tinfo) if $opt_resfile; + # Verify the replay server is alive before running the test. + check_replay_server($tinfo->{name}); + for my $key (grep { /^MTR_COMBINATION/ } keys %ENV) { delete $ENV{$key}; @@ -6026,6 +6522,11 @@ ($) timer Show test case execution time. verbose More verbose output(use multiple times for even more) verbose-restart Write when and why servers are restarted + replay-server Start an extra server instance before running tests. + Socket path available via REPLAY_SERVER_SOCKET env var. + replay-server-manual Print replay server command line and wait for user to + start it manually. Useful for running under debugger. + MTR will wait for socket and manage server lifecycle. start Only initialize and start the servers, using the startup settings for the first specified test case Example: diff --git a/mysql-test/suite/merge/merge.test b/mysql-test/suite/merge/merge.test index 42f510d4d1ac0..e79ac62fde212 100644 --- a/mysql-test/suite/merge/merge.test +++ b/mysql-test/suite/merge/merge.test @@ -314,6 +314,7 @@ EXPLAIN SELECT * FROM t2 WHERE fileset_id = 2 AND file_code BETWEEN '0000000115' AND '0000000120' LIMIT 1; EXPLAIN SELECT * FROM t1 WHERE fileset_id = 2 AND file_code BETWEEN '0000000115' AND '0000000120' LIMIT 1; +--disable_replay next_query With MRG_MyISAM tables, one needs to know to INSERT into different table. EXPLAIN SELECT * FROM t2 WHERE fileset_id = 2 AND file_code = '0000000115' LIMIT 1; DROP TABLE t2, t1; diff --git a/sql/opt_context_store_replay.cc b/sql/opt_context_store_replay.cc index 9b4f40a67526a..7355b7d1f4a50 100644 --- a/sql/opt_context_store_replay.cc +++ b/sql/opt_context_store_replay.cc @@ -55,7 +55,7 @@ using namespace json_reader; "list_contexts": [ { "name": "table_name", - "num_of_records": n, + "file_stat_records" : n "file_stat_records": n, "read_cost_io": n, "read_cost_cpu": n, @@ -145,7 +145,7 @@ class records_in_range_call_record : public Sql_alloc class table_context_for_store : public Sql_alloc { public: - /* full name of the table or view i.e db_name.{table|view}_name */ + /* Full name of the table or view i.e db_name.{table|view}_name */ char *name; size_t name_len; List mrr_list; @@ -165,18 +165,20 @@ ST_FIELD_INFO optimizer_context_capture_info[]= { static void append_full_table_name(const TABLE_LIST *tbl, String *buf); static int parse_check_obj_start_in_array(json_engine_t *je, String *err_buf, const char *err_msg); -static int parse_table_context(THD *thd, json_engine_t *je, String *err_buf, +static int parse_table_context(MEM_ROOT *mem_root, json_engine_t *je, + String *err_buf, table_context_for_replay *table_ctx); -static int parse_index_context(THD *thd, json_engine_t *je, String *err_buf, +static int parse_index_context(MEM_ROOT *mem_root, json_engine_t *je, + String *err_buf, index_context_for_replay *index_ctx); -static int parse_range_context(THD *thd, json_engine_t *je, String *err_buf, +static int parse_range_context(MEM_ROOT *mem_root, json_engine_t *je, String *err_buf, Multi_range_read_const_call_record *range_ctx); -static int parse_index_read_cost_context(THD *thd, json_engine_t *je, +static int parse_index_read_cost_context(MEM_ROOT*, json_engine_t *je, String *err_buf, cost_index_read_call_record *out); -static bool parse_range_cost_estimate(THD *thd, json_engine_t *je, +static bool parse_range_cost_estimate(MEM_ROOT*, json_engine_t *je, String *err_buf, Cost_estimate *cost); -static int parse_records_in_range_context(THD *thd, json_engine_t *je, +static int parse_records_in_range_context(MEM_ROOT *mem_root, json_engine_t *je, String *err_buf, records_in_range_call_record *rir_ctx); @@ -243,11 +245,12 @@ static bool is_base_table(const TABLE_LIST *tbl) tbl->table->s->tmp_table != SYSTEM_TMP_TABLE); } -static void dump_range_stats(THD *thd, table_context_for_store *context, - Json_writer *ctx_writer) +static +void dump_mrr_info_calls(List *mrr_list, + Json_writer *ctx_writer) { Json_writer_array list_ranges_wrapper(ctx_writer, "list_ranges"); - List_iterator irc_li(context->mrr_list); + List_iterator irc_li(*mrr_list); while (Multi_range_read_const_call_record *irc= irc_li++) { Json_writer_object irc_wrapper(ctx_writer); @@ -256,23 +259,28 @@ static void dump_range_stats(THD *thd, table_context_for_store *context, Json_writer_array ranges_wrapper(ctx_writer, "ranges"); while (const char *range_str= rc_li++) { - ranges_wrapper.add(range_str, strlen(range_str)); + const String range_info(range_str, strlen(range_str), + system_charset_info); + StringBuffer<128> escaped_range_info; + json_escape_to_string(&range_info, &escaped_range_info); + ranges_wrapper.add(escaped_range_info.c_ptr_safe(), + escaped_range_info.length()); } ranges_wrapper.end(); irc_wrapper.add("num_rows", irc->rows); { - Json_writer_object cost_wrapper(ctx_writer, "cost"); - cost_wrapper.add("avg_io_cost", irc->cost.avg_io_cost); - cost_wrapper.add("cpu_cost", irc->cost.cpu_cost); - cost_wrapper.add("comp_cost", irc->cost.comp_cost); - cost_wrapper.add("copy_cost", irc->cost.copy_cost); - cost_wrapper.add("limit_cost", irc->cost.limit_cost); - cost_wrapper.add("setup_cost", irc->cost.setup_cost); - cost_wrapper.add("index_cost_io", irc->cost.index_cost.io); - cost_wrapper.add("index_cost_cpu", irc->cost.index_cost.cpu); - cost_wrapper.add("row_cost_io", irc->cost.row_cost.io); - cost_wrapper.add("row_cost_cpu", irc->cost.row_cost.cpu); + Json_writer_object obj(ctx_writer, "cost"); + obj.add("avg_io_cost", irc->cost.avg_io_cost); + obj.add("cpu_cost", irc->cost.cpu_cost); + obj.add("comp_cost", irc->cost.comp_cost); + obj.add("copy_cost", irc->cost.copy_cost); + obj.add("limit_cost", irc->cost.limit_cost); + obj.add("setup_cost", irc->cost.setup_cost); + obj.add("index_cost_io", irc->cost.index_cost.io); + obj.add("index_cost_cpu", irc->cost.index_cost.cpu); + obj.add("row_cost_io", irc->cost.row_cost.io); + obj.add("row_cost_cpu", irc->cost.row_cost.cpu); } irc_wrapper.add("max_index_blocks", irc->max_index_blocks); @@ -280,46 +288,48 @@ static void dump_range_stats(THD *thd, table_context_for_store *context, } } -static void dump_index_read_cost(THD *thd, table_context_for_store *context, - Json_writer *ctx_writer) +static void dump_index_read_calls(List *irc_list, + Json_writer *ctx_writer) { Json_writer_array list_irc_wrapper(ctx_writer, "list_index_read_costs"); - List_iterator irc_li(context->irc_list); + List_iterator irc_li(*irc_list); while (cost_index_read_call_record *irc= irc_li++) { - Json_writer_object irc_wrapper(ctx_writer); - irc_wrapper.add("key_number", irc->key); - irc_wrapper.add("num_records", irc->records); - irc_wrapper.add("eq_ref", irc->eq_ref ? 1 : 0); - irc_wrapper.add("index_cost_io", irc->cost.index_cost.io); - irc_wrapper.add("index_cost_cpu", irc->cost.index_cost.cpu); - irc_wrapper.add("row_cost_io", irc->cost.row_cost.io); - irc_wrapper.add("row_cost_cpu", irc->cost.row_cost.cpu); - irc_wrapper.add("max_index_blocks", irc->cost.max_index_blocks); - irc_wrapper.add("max_row_blocks", irc->cost.max_row_blocks); - irc_wrapper.add("copy_cost", irc->cost.copy_cost); + Json_writer_object obj(ctx_writer); + obj.add("key_number", irc->key); + obj.add("num_records", irc->records); + obj.add("eq_ref", irc->eq_ref ? 1 : 0); + obj.add("index_cost_io", irc->cost.index_cost.io); + obj.add("index_cost_cpu", irc->cost.index_cost.cpu); + obj.add("row_cost_io", irc->cost.row_cost.io); + obj.add("row_cost_cpu", irc->cost.row_cost.cpu); + obj.add("max_index_blocks", irc->cost.max_index_blocks); + obj.add("max_row_blocks", irc->cost.max_row_blocks); + obj.add("copy_cost", irc->cost.copy_cost); } } -static void dump_records_in_range(THD *thd, table_context_for_store *context, - Json_writer *ctx_writer) +static +void dump_records_in_range_calls(List *rir_list, + Json_writer *ctx_writer) { Json_writer_array list_irc_wrapper(ctx_writer, "list_records_in_range"); - List_iterator rir_li(context->rir_list); + List_iterator rir_li(*rir_list); while (records_in_range_call_record *rir= rir_li++) { - Json_writer_object rir_wrapper(ctx_writer); - rir_wrapper.add("key_number", rir->keynr); - rir_wrapper.add("min_key", rir->min_key); - rir_wrapper.add("max_key", rir->max_key); - rir_wrapper.add("num_records", rir->records); + Json_writer_object obj(ctx_writer); + obj.add("key_number", rir->keynr); + obj.add("min_key", rir->min_key); + obj.add("max_key", rir->max_key); + obj.add("num_records", rir->records); } } -static void dump_index_stats(THD *thd, uchar *tbl_name, size_t tbl_name_len, - Json_writer *ctx_writer) +static +void dump_recorded_table_calls(THD *thd, uchar *tbl_name, size_t tbl_name_len, + Json_writer *ctx_writer) { table_context_for_store *table_context= thd->opt_ctx_recorder->search(tbl_name, tbl_name_len); @@ -327,9 +337,9 @@ static void dump_index_stats(THD *thd, uchar *tbl_name, size_t tbl_name_len, if (!table_context) return; - dump_range_stats(thd, table_context, ctx_writer); - dump_index_read_cost(thd, table_context, ctx_writer); - dump_records_in_range(thd, table_context, ctx_writer); + dump_mrr_info_calls(&table_context->mrr_list, ctx_writer); + dump_index_read_calls(&table_context->irc_list, ctx_writer); + dump_records_in_range_calls(&table_context->rir_list, ctx_writer); } /* @@ -349,7 +359,6 @@ static void dump_table_stats(THD *thd, TABLE_LIST *tbl, uchar *tbl_name, ha_rows records= table->stat_records(); IO_AND_CPU_COST cost= table->file->ha_scan_time(records); ctx_wrapper.add("name", (char *) tbl_name, tbl_name_len); - ctx_wrapper.add("num_of_records", records); ctx_wrapper.add("file_stat_records", table->file->stats.records); ctx_wrapper.add("read_cost_io", cost.io); ctx_wrapper.add("read_cost_cpu", cost.cpu); @@ -365,13 +374,11 @@ static void dump_table_stats(THD *thd, TABLE_LIST *tbl, uchar *tbl_name, index_wrapper.add("index_name", key->name); Json_writer_array rpk_wrapper(ctx_writer, "rec_per_key"); for (uint i= 0; i < num_key_parts; i++) - { rpk_wrapper.add(key->actual_rec_per_key(i)); - } rpk_wrapper.end(); } indexes_wrapper.end(); - dump_index_stats(thd, tbl_name, tbl_name_len, ctx_writer); + dump_recorded_table_calls(thd, tbl_name, tbl_name_len, ctx_writer); } static void create_view_def(THD *thd, TABLE_LIST *table, String *name, @@ -613,9 +620,23 @@ bool store_optimizer_context(THD *thd) } if (tbl->is_view()) + { + StringBuffer<64> drop; + drop.append(STRING_WITH_LEN("DROP VIEW IF EXISTS ")); + drop.append(full_tbl_name); + drop.append(STRING_WITH_LEN(";\n")); + sql_script.append(drop); + create_view_def(thd, tbl, &full_tbl_name, &ddl); + } else { + StringBuffer<64> drop; + drop.append(STRING_WITH_LEN("DROP TABLE IF EXISTS ")); + drop.append(full_tbl_name); + drop.append(STRING_WITH_LEN(";\n")); + sql_script.append(drop); + if (show_create_table(thd, tbl, &ddl, NULL, WITH_DB_NAME)) { res= true; @@ -678,6 +699,17 @@ bool store_optimizer_context(THD *thd) sql_script.append(STRING_WITH_LEN("\n\';#opt_context_ends\n\n")); sql_script.append(SET_REPLAY_CONTEXT_VAR, strlen(SET_REPLAY_CONTEXT_VAR)); sql_script.append(STRING_WITH_LEN(";\n\n")); + + sql_script.append(STRING_WITH_LEN("SET character_set_client=")); + sql_script.append(thd->variables.character_set_client->cs_name); + sql_script.append(STRING_WITH_LEN(";\n")); + + sql_script.append(STRING_WITH_LEN("SET NAMES ")); + sql_script.append(thd->variables.collation_connection->cs_name); + sql_script.append(STRING_WITH_LEN(" COLLATE ")); + sql_script.append(thd->variables.collation_connection->coll_name); + sql_script.append(STRING_WITH_LEN(";\n")); + sql_script.append(thd->query(), thd->query_length()); sql_script.append(STRING_WITH_LEN(";\n\n")); sql_script.append(STRING_WITH_LEN("set optimizer_replay_context='';\n\n")); @@ -756,7 +788,10 @@ void Optimizer_context_recorder::record_multi_range_read_info_const( ha_rows max_index_blocks, ha_rows max_row_blocks) { - /* Do not record calls made by "Range checked for each record" */ + /* + Do not record calls that are made at execution phase by "Range checked + for each record" + */ if (current_thd->lex->explain->is_query_plan_ready()) return; @@ -784,12 +819,7 @@ void Optimizer_context_recorder::record_multi_range_read_info_const( range_ctx->range_list.push_back(range_str, mem_root); } - /* - Store the ranges of every index of the table into the - table context. - */ table_context_for_store *table_ctx= get_table_context(tbl); - if (unlikely(!table_ctx)) return; // OOM @@ -870,12 +900,23 @@ void Optimizer_context_recorder::record_records_in_range( table_ctx->rir_list.push_back(rec_in_range_ctx, mem_root); } -void Optimizer_context_recorder::record_const_table_row(TABLE *tbl) +void Optimizer_context_recorder::record_table_row(TABLE *tbl, int row_index) { StringBuffer<512> output(&my_charset_utf8mb4_bin); + + /* + The table could have fields that do not have a default value + but are not in the table->read_set. + The record doesn't have values for those. + Use a relaxed sql_mode setting so that REPLACE INTO doesn't fail. + */ + output.append( + STRING_WITH_LEN("SET STATEMENT sql_mode=" + "REPLACE(REPLACE(@@sql_mode,'STRICT_ALL_TABLES','')," + "'STRICT_TRANS_TABLES','') FOR\n")); output.append(STRING_WITH_LEN("REPLACE INTO ")); append_full_table_name(tbl->pos_in_table_list, &output); - format_and_store_row(tbl, tbl->record[1], true, " VALUES ", false, output); + format_and_store_row(tbl, tbl->record[row_index], true, " VALUES ", false, output); table_context_for_store *table_ctx= get_table_context(tbl->pos_in_table_list); @@ -921,10 +962,7 @@ class index_context_for_replay : public Sql_alloc class table_context_for_replay : public Sql_alloc { public: - /* - full name of the table or view - i.e db_name.[table/view]_name - */ + /* Full name of the table or view i.e db_name.{table|view}_name */ char *name; ha_rows total_rows; ha_rows file_stat_records; @@ -943,7 +981,7 @@ class table_context_for_replay : public Sql_alloc They are restored once the query that used replay json stats is done execution. */ -class Saved_Index_stats : public Sql_alloc +class Saved_index_stats : public Sql_alloc { public: KEY *key_info; @@ -958,120 +996,93 @@ class Saved_Index_stats : public Sql_alloc They are restored once the query that used replay json stats is done execution. */ -class Saved_Table_stats : public Sql_alloc +class Saved_table_stats : public Sql_alloc { public: TABLE *table; - ha_rows original_rows; // this is table->used_stat_records - /* saved table->file->stats.records */ - ha_rows original_file_stats_records; - List saved_indexstats_list; -}; - -/* - Extends the Read_value interface to read a container of values, - for eg: array of numbers or strings, an object with several fields, etc... -*/ -class Read_container_value : public Read_value -{ -private: - int before_read(json_engine_t *je, const char *value_name, String *err_buf) - { - if (json_scan_next(je) || je->state != JST_ARRAY_START) - { - err_buf->append(STRING_WITH_LEN("error reading ")); - err_buf->append(value_name, strlen(value_name)); - err_buf->append(STRING_WITH_LEN(" value")); - return 1; - } - return 0; - } - - int after_read(int rc) { return rc > 0; } - -public: - bool read_value(json_engine_t *je, const char *value_name, - String *err_buf) override - { - int rc= before_read(je, value_name, err_buf); - if (rc <= 0) - { - rc= read_container(je, err_buf); - } - return after_read(rc); - } - virtual int read_container(json_engine_t *je, String *err_buf)= 0; + /* + We do not restore table->file->stats.records, they are read from the + storage engine for every query anyway. + */ + List saved_index_stats; }; +// psergey: Reads a JSON object class Read_range_cost_estimate : public Read_value { - THD *thd; + MEM_ROOT *mem_root; Cost_estimate *ptr; public: - Read_range_cost_estimate(THD *thd_arg, Cost_estimate *ptr_arg) - : thd(thd_arg), ptr(ptr_arg) - { - } + Read_range_cost_estimate(MEM_ROOT *mem_root_arg, Cost_estimate *ptr_arg) + : mem_root(mem_root_arg), ptr(ptr_arg) + {} bool read_value(json_engine_t *je, const char *value_name, String *err_buf) override { - return parse_range_cost_estimate(thd, je, err_buf, ptr); + return parse_range_cost_estimate(mem_root, je, err_buf, ptr); } }; -class Read_list_of_ha_rows : public Read_container_value +// psergey: reads an array of integers.. +class Read_list_of_ha_rows : public Read_array { - THD *thd; + MEM_ROOT *mem_root; List *list_values; public: - Read_list_of_ha_rows(THD *thd_arg, List *list_values_arg) - : thd(thd_arg), list_values(list_values_arg) + Read_list_of_ha_rows(MEM_ROOT *mem_root_arg, List *list_values_arg) + : mem_root(mem_root_arg), list_values(list_values_arg) { } - int read_container(json_engine_t *je, String *err_buf) override + int read_container(json_engine_t *je, const char *name, String *err_buf) + override { while (je->state != JST_ARRAY_END) { using json_reader::read_ha_rows_and_check_limit; ha_rows temp_value; - if (read_ha_rows_and_check_limit(je, "rec_per_key", err_buf, temp_value, + if (read_ha_rows_and_check_limit(je, name, err_buf, temp_value, ULONGLONG_MAX, "unsigned longlong", true)) { return 1; } - ha_rows *records_ptr= (ha_rows *) thd->alloc(sizeof(ha_rows)); + ha_rows *records_ptr= (ha_rows *) alloc_root(mem_root, sizeof(ha_rows)); if (unlikely(!records_ptr)) return 1; // OOM *records_ptr= temp_value; - if (list_values->push_back(records_ptr) || json_scan_next(je)) + if (list_values->push_back(records_ptr, mem_root) || json_scan_next(je)) return 1; } return 0; } }; -template class Read_list_of_context : public Read_container_value +/* + Read an array of JSON objects representing object T. + Create instances of T and collect them in a List +*/ +template class Read_array_into_list : public Read_array { - THD *thd; + MEM_ROOT *mem_root; List *list_ctx; - int (*parse_context_fn)(THD *, json_engine_t *, String *, T *); + int (*parse_context_fn)(MEM_ROOT *, json_engine_t *, String *, T *); public: - Read_list_of_context(THD *thd_arg, List *list_ctx_arg, - int (*parse_context_fn_arg)(THD *, json_engine_t *, + Read_array_into_list(MEM_ROOT *mem_root_arg, List *list_ctx_arg, + int (*parse_context_fn_arg)(MEM_ROOT *, json_engine_t *, String *, T *)) - : thd(thd_arg), list_ctx(list_ctx_arg), + : mem_root(mem_root_arg), list_ctx(list_ctx_arg), parse_context_fn(parse_context_fn_arg) { } - int read_container(json_engine_t *je, String *err_buf) override + int read_container(json_engine_t *je, const char *name, String *err_buf) + override { int rc; @@ -1082,50 +1093,16 @@ template class Read_list_of_context : public Read_container_value if (unlikely(!ctx)) return 1; // OOM - rc= parse_context_fn(thd, je, err_buf, ctx); + if ((rc= parse_context_fn(mem_root, je, err_buf, ctx))) + break; // Parse error - if (rc == 0) - { - if (list_ctx->push_back(ctx)) - return 1; // OOM - } - else - break; + if (list_ctx->push_back(ctx, mem_root)) + return 1; // OOM } - return rc; } }; -class Read_list_of_ranges : public Read_container_value -{ - THD *thd; - List *list_ranges; - -public: - Read_list_of_ranges(THD *thd_arg, List *list_ranges_arg) - : thd(thd_arg), list_ranges(list_ranges_arg) - { - } - int read_container(json_engine_t *je, String *err_buf) override - { - if (json_scan_next(je)) - return 1; - - while (je->state != JST_ARRAY_END) - { - char *value; - if (read_string(thd, je, "ranges", err_buf, value)) - return 1; - - list_ranges->push_back(value); - if (json_scan_next(je)) - return 1; - } - - return 0; - } -}; /* check if the next element being parsed is an object within an array. @@ -1193,36 +1170,34 @@ static int parse_context_obj_from_json_array(json_engine_t *je, 1 Parse Error -1 EOF */ -static int parse_table_context(THD *thd, json_engine_t *je, String *err_buf, +static int parse_table_context(MEM_ROOT *mem_root, json_engine_t *je, + String *err_buf, table_context_for_replay *table_ctx) { const char *err_msg= "Expected an object in the list_contexts array"; Read_named_member array[]= { - {"name", Read_string(thd, &table_ctx->name), false}, - {"num_of_records", - Read_non_neg_integer(&table_ctx->total_rows), - false}, + {"name", Read_string(mem_root, &table_ctx->name), false}, {"file_stat_records", Read_non_neg_integer(&table_ctx->file_stat_records), false}, {"read_cost_io", Read_double(&table_ctx->read_cost_io), false}, {"read_cost_cpu", Read_double(&table_ctx->read_cost_cpu), false}, {"indexes", - Read_list_of_context( - thd, &table_ctx->index_list, parse_index_context), + Read_array_into_list( + mem_root, &table_ctx->index_list, parse_index_context), true}, {"list_ranges", - Read_list_of_context( - thd, &table_ctx->ranges_list, parse_range_context), + Read_array_into_list( + mem_root, &table_ctx->ranges_list, parse_range_context), true}, {"list_index_read_costs", - Read_list_of_context( - thd, &table_ctx->irc_list, parse_index_read_cost_context), + Read_array_into_list( + mem_root, &table_ctx->irc_list, parse_index_read_cost_context), true}, {"list_records_in_range", - Read_list_of_context( - thd, &table_ctx->rir_list, parse_records_in_range_context), + Read_array_into_list( + mem_root, &table_ctx->rir_list, parse_records_in_range_context), true}, {NULL, Read_double(NULL), true}}; @@ -1242,14 +1217,15 @@ static int parse_table_context(THD *thd, json_engine_t *je, String *err_buf, 1 Parse Error -1 EOF */ -static int parse_index_context(THD *thd, json_engine_t *je, String *err_buf, +static int parse_index_context(MEM_ROOT *mem_root, json_engine_t *je, + String *err_buf, index_context_for_replay *index_ctx) { const char *err_msg= "Expected an object in the indexes array"; Read_named_member array[]= { - {"index_name", Read_string(thd, &index_ctx->idx_name), false}, - {"rec_per_key", Read_list_of_ha_rows(thd, &index_ctx->list_rec_per_key), + {"index_name", Read_string(mem_root, &index_ctx->idx_name), false}, + {"rec_per_key", Read_list_of_ha_rows(mem_root, &index_ctx->list_rec_per_key), false}, {NULL, Read_double(NULL), true}}; @@ -1269,18 +1245,18 @@ static int parse_index_context(THD *thd, json_engine_t *je, String *err_buf, 1 Parse Error -1 EOF */ -static int parse_range_context(THD *thd, json_engine_t *je, String *err_buf, +static int parse_range_context(MEM_ROOT *mem_root, json_engine_t *je, String *err_buf, Multi_range_read_const_call_record *out) { const char *err_msg= "Expected an object in the list_ranges array"; Read_named_member array[]= { - {"index_name", Read_string(thd, &out->idx_name), false}, - {"ranges", Read_list_of_ranges(thd, &out->range_list), false}, + {"index_name", Read_string(mem_root, &out->idx_name), false}, + {"ranges", Read_array_of_strings(mem_root, &out->range_list), false}, {"num_rows", Read_non_neg_integer(&out->rows), false}, - {"cost", Read_range_cost_estimate(thd, &out->cost), false}, + {"cost", Read_range_cost_estimate(mem_root, &out->cost), false}, {"max_index_blocks", Read_non_neg_integer(&out->max_index_blocks), false}, @@ -1303,7 +1279,7 @@ static int parse_range_context(THD *thd, json_engine_t *je, String *err_buf, 1 Parse Error -1 EOF */ -static bool parse_range_cost_estimate(THD *thd, json_engine_t *je, +static bool parse_range_cost_estimate(MEM_ROOT*, json_engine_t *je, String *err_buf, Cost_estimate *cost) { if (json_scan_next(je) || je->state != JST_OBJ_START) @@ -1342,7 +1318,7 @@ static bool parse_range_cost_estimate(THD *thd, json_engine_t *je, 1 Parse Error -1 EOF */ -static int parse_index_read_cost_context(THD *thd, json_engine_t *je, +static int parse_index_read_cost_context(MEM_ROOT* , json_engine_t *je, String *err_buf, cost_index_read_call_record *out) { @@ -1385,7 +1361,8 @@ static int parse_index_read_cost_context(THD *thd, json_engine_t *je, 1 Parse Error -1 EOF */ -static int parse_records_in_range_context(THD *thd, json_engine_t *je, +static int parse_records_in_range_context(MEM_ROOT *mem_root, + json_engine_t *je, String *err_buf, records_in_range_call_record *out) { @@ -1394,8 +1371,8 @@ static int parse_records_in_range_context(THD *thd, json_engine_t *je, Read_named_member array[]= { {"key_number", Read_non_neg_integer(&out->keynr), false}, - {"min_key", Read_string(thd, &out->min_key), false}, - {"max_key", Read_string(thd, &out->max_key), false}, + {"min_key", Read_string(mem_root, &out->min_key), false}, + {"max_key", Read_string(mem_root, &out->max_key), false}, {"num_records", Read_non_neg_integer(&out->records), false}, {NULL, Read_double(NULL), true}}; @@ -1632,80 +1609,79 @@ bool Optimizer_context_replay::infuse_index_read_cost(const TABLE *tbl, return true; } + /* @brief - Save the current stats of the table and its associated table. + Infuse saved table statistics for a given table. + Current table statistics are saved away to be restored later. + #records is not handled by this function, see infuse_table_rows(). */ void Optimizer_context_replay::infuse_table_stats(TABLE *table) { if (!has_records() || !is_base_table(table->pos_in_table_list)) return; - Saved_Table_stats *saved_ts= new Saved_Table_stats(); + Saved_table_stats *saved_ts= new Saved_table_stats(); if (unlikely(!saved_ts)) return; // OOM saved_ts->table= table; - saved_ts->original_rows= table->used_stat_records; - saved_ts->original_file_stats_records= table->file->stats.records; if (saved_table_stats.push_back(saved_ts)) return; - if (!infuse_table_rows(table)) + KEY *key_info, *key_info_end; + for (key_info= table->key_info, key_info_end= key_info + table->s->keys; + key_info < key_info_end; key_info++) { - KEY *key_info, *key_info_end; - for (key_info= table->key_info, key_info_end= key_info + table->s->keys; - key_info < key_info_end; key_info++) - { - List *index_freq_list= - get_index_rec_per_key_list(table, key_info->name.str); + List *index_freq_list= + get_index_rec_per_key_list(table, key_info->name.str); - if (index_freq_list && !index_freq_list->is_empty()) - { - Saved_Index_stats *saved_is= new Saved_Index_stats(); + if (!index_freq_list || index_freq_list->is_empty()) + continue; - if (unlikely(!saved_is)) - return; // OOM + Saved_index_stats *saved_is= new Saved_index_stats(); - uint i= 0; - uint num_key_parts= key_info->user_defined_key_parts; - Index_statistics *original_read_stats= key_info->read_stats; - bool original_is_statistics_from_stat_tables= - key_info->is_statistics_from_stat_tables; - Index_statistics *new_read_stats= new Index_statistics(); + if (unlikely(!saved_is)) + return; // OOM - if (unlikely(!new_read_stats)) - return; // OOM + uint i= 0; + uint num_key_parts= key_info->user_defined_key_parts; + Index_statistics *original_read_stats= key_info->read_stats; + bool original_is_statistics_from_stat_tables= + key_info->is_statistics_from_stat_tables; + Index_statistics *new_read_stats= new Index_statistics(); - ulonglong *frequencies= - (ulonglong *) thd->alloc(sizeof(ulonglong) * num_key_parts); + if (unlikely(!new_read_stats)) + return; // OOM - if (unlikely(!frequencies)) - return; // OOM + ulonglong *frequencies= + (ulonglong *) thd->alloc(sizeof(ulonglong) * num_key_parts); - new_read_stats->init_avg_frequency(frequencies); - List_iterator li(*index_freq_list); - ha_rows *freq= li++; - key_info->read_stats= new_read_stats; + if (unlikely(!frequencies)) + return; // OOM - while (freq && i < num_key_parts) - { - DBUG_ASSERT(*freq > 0); - key_info->read_stats->set_avg_frequency(i, (double) *freq); - freq= li++; - i++; - } + new_read_stats->init_avg_frequency(frequencies); + List_iterator li(*index_freq_list); + ha_rows *freq= li++; + key_info->read_stats= new_read_stats; - key_info->is_statistics_from_stat_tables= true; - saved_is->key_info= key_info; - saved_is->original_is_statistics_from_stat_tables= - original_is_statistics_from_stat_tables; - saved_is->original_read_stats= original_read_stats; - saved_ts->saved_indexstats_list.push_back(saved_is); - } + while (freq && i < num_key_parts) + { + // Apparently this can be=0 for prefix indexes. + //DBUG_ASSERT(*freq > 0); + key_info->read_stats->set_avg_frequency(i, (double) *freq); + freq= li++; + i++; } + + key_info->is_statistics_from_stat_tables= true; + saved_is->key_info= key_info; + saved_is->original_is_statistics_from_stat_tables= + original_is_statistics_from_stat_tables; + saved_is->original_read_stats= original_read_stats; + saved_ts->saved_index_stats.push_back(saved_is); } } @@ -1762,14 +1738,11 @@ bool Optimizer_context_replay::infuse_records_in_range( */ void Optimizer_context_replay::restore_modified_table_stats() { - List_iterator table_li(saved_table_stats); - while (Saved_Table_stats *saved_ts= table_li++) + List_iterator table_li(saved_table_stats); + while (Saved_table_stats *saved_ts= table_li++) { - saved_ts->table->used_stat_records= saved_ts->original_rows; - saved_ts->table->file->stats.records= saved_ts->original_file_stats_records; - - List_iterator index_li(saved_ts->saved_indexstats_list); - while (Saved_Index_stats *saved_is= index_li++) + List_iterator index_li(saved_ts->saved_index_stats); + while (Saved_index_stats *saved_is= index_li++) { KEY *key= saved_is->key_info; key->is_statistics_from_stat_tables= @@ -1805,8 +1778,8 @@ bool Optimizer_context_replay::parse() LEX_CSTRING varname= {var_name, strlen(var_name)}; Read_named_member array[]= {{"list_contexts", - Read_list_of_context( - thd, &ctx_list, parse_table_context), + Read_array_into_list( + thd->mem_root, &ctx_list, parse_table_context), false}, {NULL, Read_double(NULL), true}}; @@ -1875,7 +1848,6 @@ void Optimizer_context_replay::dbug_print_read_stats() DBUG_PRINT("info", ("New Table Context")); DBUG_PRINT("info", ("-----------------")); DBUG_PRINT("info", ("name: %s", tbl_ctx->name)); - DBUG_PRINT("info", ("num_of_records: %llx", tbl_ctx->total_rows)); DBUG_PRINT("info", ("file_stat_records: %llx", tbl_ctx->file_stat_records)); @@ -1979,7 +1951,7 @@ bool Optimizer_context_replay::infuse_table_rows(TABLE *tbl) if (table_context_for_replay *tbl_ctx= find_table_context(tbl_name.c_ptr_safe())) { - tbl->used_stat_records= tbl_ctx->total_rows; + // Only infuse this one. table->used_stat_records are set by te SQL layer. tbl->file->stats.records= tbl_ctx->file_stat_records; return false; } diff --git a/sql/opt_context_store_replay.h b/sql/opt_context_store_replay.h index 8700e7e4c2b81..916e56356d17f 100644 --- a/sql/opt_context_store_replay.h +++ b/sql/opt_context_store_replay.h @@ -62,12 +62,22 @@ class Optimizer_context_recorder const KEY_PART_INFO *key_part, uint keynr, const key_range *min_range, const key_range *max_range, ha_rows records); - void record_const_table_row(TABLE *tbl); + void record_const_table_row(TABLE *tbl) + { + /* use table->record[1] */ + record_table_row(tbl, 1); + } + void record_current_table_row(TABLE *tbl) + { + /* use table->record[0] */ + record_table_row(tbl, 0); + } bool has_records(); table_context_for_store *search(uchar *tbl_name, size_t tbl_name_len); private: + void record_table_row(TABLE *tbl, int row_index); MEM_ROOT *mem_root; /* Hash table mapping "dbname.table_name" -> pointer to @@ -93,23 +103,29 @@ bool store_optimizer_context(THD *thd); class table_context_for_replay; class index_context_for_replay; -class Saved_Table_stats; +class Saved_table_stats; void init_optimizer_context_replay_if_needed(THD *thd); /* - This class stores the parsed optimizer context information - and then infuses read stats into the optimizer + Optimizer context that's loaded and can be used for replay. - Optimizer Context information that we've read from a JSON document. + - When this object is created, it will parse the context JSON document + from a user variable pointed by @@optimizer_replay_context. - The optimizer can use infuse_XXX() methods to get the saved values. + - The optimizer checks thd->opt_ctx_replay, if it is present, it will call + + thd->opt_ctx_replay->infuse_XXX() + + to get "infuse" the statistics records from the context. */ + class Optimizer_context_replay { public: Optimizer_context_replay(THD *thd); + bool infuse_table_rows(TABLE *tbl); /* Save table's statistics and replace it with data from the context. */ void infuse_table_stats(TABLE *table); /* Restore the saved statistics back (to be done at query end) */ @@ -132,25 +148,26 @@ class Optimizer_context_replay const key_range *max_range, ha_rows *records); private: + bool infuse_table_rows(const TABLE *tbl, ha_rows *rows); + THD *thd; /* Statistics that tables had before we've replaced them with values from the saved context. To be used to restore the original values. */ - List saved_table_stats; + List saved_table_stats; List ctx_list; bool parse(); bool has_records(); -#ifndef DBUG_OFF - void dbug_print_read_stats(); -#endif List *get_index_rec_per_key_list(const TABLE *tbl, const char *idx_name); void store_range_contexts(const TABLE *tbl, const char *idx_name, List *list); - bool infuse_table_rows(TABLE *tbl); table_context_for_replay *find_table_context(const char *name); +#ifndef DBUG_OFF + void dbug_print_read_stats(); +#endif }; /* diff --git a/sql/opt_sum.cc b/sql/opt_sum.cc index 6d5bf8c6f487f..9ab9dcfaabe09 100644 --- a/sql/opt_sum.cc +++ b/sql/opt_sum.cc @@ -52,6 +52,7 @@ #include "sql_priv.h" #include "key.h" // key_cmp_if_same #include "sql_select.h" +#include "opt_context_store_replay.h" static bool find_key_for_maxmin(bool max_fl, TABLE_REF *ref, Field* field, COND *cond, uint *range_fl, @@ -434,6 +435,12 @@ int opt_sum_query(THD *thd, reckey_in_range(is_max, &ref, item_field->field, conds, range_fl, prefix_len)) error= HA_ERR_KEY_NOT_FOUND; + + if (!error) + { + if (Optimizer_context_recorder *rec= thd->opt_ctx_recorder) + rec->record_current_table_row(table); + } if (!table->const_table) { table->file->ha_end_keyread(); diff --git a/sql/sql_json_lib.cc b/sql/sql_json_lib.cc index 952d7eed0e806..8648a17ce6042 100644 --- a/sql/sql_json_lib.cc +++ b/sql/sql_json_lib.cc @@ -12,9 +12,8 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */ +#include "my_global.h" #include "sql_json_lib.h" -#include "mysql.h" -#include "sql_select.h" namespace json_reader { @@ -73,23 +72,13 @@ bool read_double(json_engine_t *je, const char *read_elem_key, String *err_buf, false OK true Parse Error */ -bool read_string(THD *thd, json_engine_t *je, const char *read_elem_key, +bool read_string(MEM_ROOT *mem_root, json_engine_t *je, const char *read_elem_key, String *err_buf, char *&value) { if (check_reading_of_elem_key(je, read_elem_key, err_buf)) return true; - StringBuffer<128> val_buf; - if (json_unescape_to_string((const char *) je->value, je->value_len, - &val_buf)) - { - err_buf->append(STRING_WITH_LEN("un-escaping error of ")); - err_buf->append(read_elem_key, strlen(read_elem_key)); - err_buf->append(STRING_WITH_LEN(" element")); - return true; - } - - value= strdup_root(thd->mem_root, val_buf.c_ptr_safe()); + value= strmake_root(mem_root, (const char *) je->value, je->value_len); return false; } diff --git a/sql/sql_json_lib.h b/sql/sql_json_lib.h index 2c8a7c1c2446a..817d74d1a5aae 100644 --- a/sql/sql_json_lib.h +++ b/sql/sql_json_lib.h @@ -15,10 +15,9 @@ #ifndef SQL_JSON_LIB #define SQL_JSON_LIB -#include "my_global.h" #include "json_lib.h" #include "sql_string.h" -#include "table.h" +#include "mysqld.h" /* system_charset_info */ /* A syntax sugar interface to json_string_t @@ -126,7 +125,7 @@ int json_read_object(json_engine_t *je, Read_named_member *members, namespace json_reader { /* Things to use with Read_named_member */ -bool read_string(THD *thd, json_engine_t *je, const char *read_elem_key, +bool read_string(MEM_ROOT *mem_root, json_engine_t *je, const char *read_elem_key, String *err_buf, char *&value); bool read_double(json_engine_t *je, const char *read_elem_key, String *err_buf, @@ -152,14 +151,16 @@ class Read_value class Read_string : public Read_value { char **ptr; - THD *thd; /* The string will be allocated on thd->mem_root */ + MEM_ROOT *mem_root; /* The string will be allocated on thd->mem_root */ public: - Read_string(THD *thd_arg, char **ptr_arg) : ptr(ptr_arg), thd(thd_arg) {} + Read_string(MEM_ROOT *mem_root_arg, char **ptr_arg) : + ptr(ptr_arg), mem_root(mem_root_arg) + {} bool read_value(json_engine_t *je, const char *value_name, String *err_buf) override { - return read_string(thd, je, value_name, err_buf, *ptr); + return read_string(mem_root, je, value_name, err_buf, *ptr); } }; @@ -230,6 +231,73 @@ class Read_non_neg_integer : public Read_value } }; +/* + Extends the Read_value interface to read an array of elements. + + This class will just start reading the JSON array. + Reading of array members is done by descendant classes in read_container(). +*/ +class Read_array : public Read_value +{ + int before_read(json_engine_t *je, const char *value_name, String *err_buf) + { + if (json_scan_next(je) || je->state != JST_ARRAY_START) + { + err_buf->append(STRING_WITH_LEN("error reading ")); + err_buf->append(value_name, strlen(value_name)); + err_buf->append(STRING_WITH_LEN(" value")); + return 1; + } + return 0; + } + + int after_read(int rc) { return rc > 0; } + +public: + bool read_value(json_engine_t *je, const char *value_name, + String *err_buf) override + { + int rc= before_read(je, value_name, err_buf); + if (rc <= 0) + rc= read_container(je, value_name, err_buf); + return after_read(rc); + } + virtual int read_container(json_engine_t *je, const char *name, + String *err_buf)= 0; +}; + +// +// psergey-todo: why cannot this use Read_array_into_list ? +class Read_array_of_strings : public Read_array +{ + MEM_ROOT *mem_root; + List *strings; + +public: + Read_array_of_strings(MEM_ROOT *mem_root_arg, List *list_ranges_arg) + : mem_root(mem_root_arg), strings(list_ranges_arg) + {} + int read_container(json_engine_t *je, const char *name, String *err_buf) + override + { + if (json_scan_next(je)) + return 1; + + while (je->state != JST_ARRAY_END) + { + char *value; + if (read_string(mem_root, je, name, err_buf, value)) + return 1; + + strings->push_back(value, mem_root); + if (json_scan_next(je)) + return 1; + } + + return 0; + } +}; + }; /* namespace json_reader */ #endif diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index 981c772d83925..a0088b5ae3779 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -4131,6 +4131,12 @@ void set_statistics_for_table(THD *thd, TABLE *table) TABLE_STATISTICS_CB *stats_cb= table->s->stats_cb; Table_statistics *read_stats= stats_cb ? stats_cb->table_stats : 0; + /* + Infuse the table->file->stats.records. + We will set table->used_stat_records right below. + */ + if (thd->opt_ctx_replay) + thd->opt_ctx_replay->infuse_table_rows(table); /* The MAX below is to ensure that we don't return 0 rows for a table if it not guaranteed to be empty. @@ -4188,6 +4194,10 @@ void set_statistics_for_table(THD *thd, TABLE *table) } } } + /* + TODO: infuse_table_rows() call above is done before the EITS-based + adjustments. Should this be moved up, too? + */ if (thd->opt_ctx_replay) thd->opt_ctx_replay->infuse_table_stats(table); } diff --git a/unittest/sql/CMakeLists.txt b/unittest/sql/CMakeLists.txt index 224ce83bae1d2..e32becc781b77 100644 --- a/unittest/sql/CMakeLists.txt +++ b/unittest/sql/CMakeLists.txt @@ -34,3 +34,9 @@ MY_ADD_TEST(mf_iocache) ADD_EXECUTABLE(my_json_writer-t my_json_writer-t.cc dummy_builtins.cc) TARGET_LINK_LIBRARIES(my_json_writer-t sql mytap) MY_ADD_TEST(my_json_writer) + +# Json writer needs String which needs sql library +ADD_EXECUTABLE(json_reader-t json_reader-t.cc dummy_builtins.cc) +TARGET_LINK_LIBRARIES(json_reader-t sql mytap) +MY_ADD_TEST(jons_reader) + diff --git a/unittest/sql/json_reader-t.cc b/unittest/sql/json_reader-t.cc new file mode 100644 index 0000000000000..72e6d1670957b --- /dev/null +++ b/unittest/sql/json_reader-t.cc @@ -0,0 +1,78 @@ +/* + Copyright (c) 2026, MariaDB Corporation. + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335 USA */ + +#include +#include +#include +#include +#include + +/* + Unit tests for json_read_object() +*/ + +#include "../sql/sql_json_lib.h" +#include "../sql/sql_json_lib.cc" + +using namespace json_reader; + +int main(int args, char **argv) +{ + MY_INIT(argv[0]); + + plan(NO_PLAN); + + diag("Testing json_read_object checks"); + MEM_ROOT alloc; + json_engine_t je; + int rc; + init_alloc_root(0, &alloc, 32768, 0, 0); + mem_root_dynamic_array_init(&alloc, 0, &je.stack, + sizeof(int), NULL, JSON_DEPTH_DEFAULT, + JSON_DEPTH_INC, MYF(0)); + system_charset_info= &my_charset_utf8mb3_bin; + const char *js_doc="{ \"str_val\": \"abc\", \"double_val\": 1234.5 }"; + json_scan_start(&je, &my_charset_utf8mb3_bin, (const uchar *) js_doc, + (const uchar *) js_doc + strlen(js_doc)); + + char *parsed_name; + double parsed_dbl; + Read_named_member array[]= { + {"str_val", Read_string(&alloc, &parsed_name), false}, + {"double_val", Read_double(&parsed_dbl), false}, + {NULL, Read_double(NULL), false } + }; + String err_buf; + + rc= json_read_object(&je, array, &err_buf); + ok(!rc, "Basic object read"); + free_root(&alloc, 0); +#if 0 + + { + Json_writer w; + w.start_object(); + w.add_member("foo"); + w.end_object(); + ok(w.invalid_json, "Started a name but didn't add a value"); + } + +#endif + diag("Done"); + + my_end(MY_CHECK_ERROR); + return exit_status(); +}