From 5abdd20ca99101b772f1628dbd399c99294cda8e Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 30 Jan 2018 15:43:20 +0100 Subject: [PATCH 001/139] - Use delayed load for the MongoDB C Drive on Windows modified: storage/connect/CMakeLists.txt modified: storage/connect/cmgoconn.cpp modified: storage/connect/ha_connect.cc - Add FORCE to the connect_type_conv enum values This will translate binary values to TYPE_STRING modified: storage/connect/checklvl.h modified: storage/connect/ha_connect.cc modified: storage/connect/odbconn.cpp - Change the connect_xtrace variable to from int to set modified: storage/connect/array.cpp modified: storage/connect/blkfil.cpp modified: storage/connect/block.h modified: storage/connect/cmgoconn.cpp modified: storage/connect/colblk.cpp modified: storage/connect/connect.cc modified: storage/connect/filamap.cpp modified: storage/connect/filamdbf.cpp modified: storage/connect/filamfix.cpp modified: storage/connect/filamgz.cpp modified: storage/connect/filamtxt.cpp modified: storage/connect/filamvct.cpp modified: storage/connect/filamzip.cpp modified: storage/connect/filter.cpp modified: storage/connect/global.h modified: storage/connect/ha_connect.cc modified: storage/connect/javaconn.cpp modified: storage/connect/jdbconn.cpp modified: storage/connect/jmgfam.cpp modified: storage/connect/jmgoconn.cpp modified: storage/connect/json.cpp modified: storage/connect/jsonudf.cpp modified: storage/connect/mongo.cpp modified: storage/connect/mycat.cc modified: storage/connect/myconn.cpp modified: storage/connect/odbconn.cpp modified: storage/connect/plgdbutl.cpp modified: storage/connect/plugutil.cpp modified: storage/connect/reldef.cpp modified: storage/connect/tabcol.cpp modified: storage/connect/tabdos.cpp modified: storage/connect/tabext.cpp modified: storage/connect/tabfix.cpp modified: storage/connect/tabfmt.cpp modified: storage/connect/tabjdbc.cpp modified: storage/connect/tabjson.cpp modified: storage/connect/table.cpp modified: storage/connect/tabmul.cpp modified: storage/connect/tabmysql.cpp modified: storage/connect/tabodbc.cpp modified: storage/connect/tabpivot.cpp modified: storage/connect/tabsys.cpp modified: storage/connect/tabtbl.cpp modified: storage/connect/tabutil.cpp modified: storage/connect/tabvct.cpp modified: storage/connect/tabwmi.cpp modified: storage/connect/tabxml.cpp modified: storage/connect/user_connect.cc modified: storage/connect/valblk.cpp modified: storage/connect/value.cpp modified: storage/connect/xindex.cpp - Restore connect_enable_mongo variable (but undocumented) modified: storage/connect/ha_connect.cc modified: storage/connect/mycat.cc modified: storage/connect/mysql-test/connect/r/json_java_2.result modified: storage/connect/mysql-test/connect/r/json_java_3.result modified: storage/connect/mysql-test/connect/r/json_mongo_c.result modified: storage/connect/mysql-test/connect/r/mongo_c.result modified: storage/connect/mysql-test/connect/r/mongo_java_2.result modified: storage/connect/mysql-test/connect/r/mongo_java_3.result modified: storage/connect/mysql-test/connect/r/tbl_thread.result modified: storage/connect/mysql-test/connect/t/mongo.inc modified: storage/connect/mysql-test/connect/t/mongo_test.inc modified: storage/connect/mysql-test/connect/t/tbl_thread.test --- storage/connect/CMakeLists.txt | 7 + storage/connect/array.cpp | 20 +- storage/connect/blkfil.cpp | 8 +- storage/connect/block.h | 4 +- storage/connect/checklvl.h | 3 +- storage/connect/cmgoconn.cpp | 18 +- storage/connect/colblk.cpp | 8 +- storage/connect/connect.cc | 42 ++-- storage/connect/filamap.cpp | 20 +- storage/connect/filamdbf.cpp | 12 +- storage/connect/filamfix.cpp | 68 +++---- storage/connect/filamgz.cpp | 12 +- storage/connect/filamtxt.cpp | 60 +++--- storage/connect/filamvct.cpp | 162 ++++++++-------- storage/connect/filamzip.cpp | 4 +- storage/connect/filter.cpp | 32 ++-- storage/connect/global.h | 5 +- storage/connect/ha_connect.cc | 179 ++++++++++-------- storage/connect/javaconn.cpp | 6 +- storage/connect/jdbconn.cpp | 20 +- storage/connect/jmgfam.cpp | 2 +- storage/connect/jmgoconn.cpp | 14 +- storage/connect/json.cpp | 6 +- storage/connect/jsonudf.cpp | 22 +-- storage/connect/mongo.cpp | 4 +- storage/connect/mycat.cc | 44 +++-- storage/connect/myconn.cpp | 8 +- .../mysql-test/connect/r/json_java_2.result | 2 + .../mysql-test/connect/r/json_java_3.result | 2 + .../mysql-test/connect/r/json_mongo_c.result | 2 + .../mysql-test/connect/r/mongo_c.result | 2 + .../mysql-test/connect/r/mongo_java_2.result | 2 + .../mysql-test/connect/r/mongo_java_3.result | 2 + .../mysql-test/connect/r/tbl_thread.result | 6 +- .../connect/mysql-test/connect/t/mongo.inc | 4 +- .../mysql-test/connect/t/mongo_test.inc | 8 +- .../mysql-test/connect/t/tbl_thread.test | 6 +- storage/connect/odbconn.cpp | 97 +++++----- storage/connect/plgdbutl.cpp | 141 +++++++------- storage/connect/plugutil.cpp | 26 +-- storage/connect/reldef.cpp | 2 +- storage/connect/tabcol.cpp | 8 +- storage/connect/tabdos.cpp | 50 ++--- storage/connect/tabext.cpp | 6 +- storage/connect/tabfix.cpp | 8 +- storage/connect/tabfmt.cpp | 30 +-- storage/connect/tabjdbc.cpp | 22 +-- storage/connect/tabjson.cpp | 12 +- storage/connect/table.cpp | 16 +- storage/connect/tabmul.cpp | 22 +-- storage/connect/tabmysql.cpp | 22 +-- storage/connect/tabodbc.cpp | 18 +- storage/connect/tabpivot.cpp | 2 +- storage/connect/tabsys.cpp | 12 +- storage/connect/tabtbl.cpp | 41 ++-- storage/connect/tabutil.cpp | 8 +- storage/connect/tabvct.cpp | 8 +- storage/connect/tabwmi.cpp | 4 +- storage/connect/tabxml.cpp | 24 +-- storage/connect/user_connect.cc | 2 +- storage/connect/valblk.cpp | 2 +- storage/connect/value.cpp | 46 ++--- storage/connect/xindex.cpp | 88 ++++----- 63 files changed, 798 insertions(+), 745 deletions(-) diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index 3a1585e3e8f..9cc9d16ba4d 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -343,6 +343,13 @@ IF(NOT TARGET connect) RETURN() ENDIF() +IF(WIN32) + IF (libmongoc-1.0_FOUND) + SET_TARGET_PROPERTIES(connect PROPERTIES LINK_FLAGS + "/DELAYLOAD:libbson-1.0.dll /DELAYLOAD:libmongoc-1.0.dll") + ENDIF(libmongoc-1.0_FOUND) +ENDIF(WIN32) + # Install some extra files that belong to connect engine IF(WIN32) # install ha_connect.lib diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp index 639edf63a1a..c779fcef816 100644 --- a/storage/connect/array.cpp +++ b/storage/connect/array.cpp @@ -82,7 +82,7 @@ PARRAY MakeValueArray(PGLOBAL g, PPARM pp) if ((valtyp = pp->Type) != TYPE_STRING) len = 1; - if (trace) + if (trace(1)) htrc("valtyp=%d len=%d\n", valtyp, len); /*********************************************************************/ @@ -287,7 +287,7 @@ bool ARRAY::AddValue(PGLOBAL g, PSZ strp) return true; } // endif Type - if (trace) + if (trace(1)) htrc(" adding string(%d): '%s'\n", Nval, strp); //Value->SetValue_psz(strp); @@ -306,7 +306,7 @@ bool ARRAY::AddValue(PGLOBAL g, void *p) return true; } // endif Type - if (trace) + if (trace(1)) htrc(" adding pointer(%d): %p\n", Nval, p); Vblp->SetValue((PSZ)p, Nval++); @@ -323,7 +323,7 @@ bool ARRAY::AddValue(PGLOBAL g, short n) return true; } // endif Type - if (trace) + if (trace(1)) htrc(" adding SHORT(%d): %hd\n", Nval, n); //Value->SetValue(n); @@ -342,7 +342,7 @@ bool ARRAY::AddValue(PGLOBAL g, int n) return true; } // endif Type - if (trace) + if (trace(1)) htrc(" adding int(%d): %d\n", Nval, n); //Value->SetValue(n); @@ -361,7 +361,7 @@ bool ARRAY::AddValue(PGLOBAL g, double d) return true; } // endif Type - if (trace) + if (trace(1)) htrc(" adding float(%d): %lf\n", Nval, d); Value->SetValue(d); @@ -380,7 +380,7 @@ bool ARRAY::AddValue(PGLOBAL g, PXOB xp) return true; } // endif Type - if (trace) + if (trace(1)) htrc(" adding (%d) from xp=%p\n", Nval, xp); //AddValue(xp->GetValue()); @@ -399,7 +399,7 @@ bool ARRAY::AddValue(PGLOBAL g, PVAL vp) return true; } // endif Type - if (trace) + if (trace(1)) htrc(" adding (%d) from vp=%p\n", Nval, vp); Vblp->SetValue(vp, Nval++); @@ -990,7 +990,7 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g) len += strlen(tp); } // enfor i - if (trace) + if (trace(1)) htrc("Arraylist: len=%d\n", len); p = (char *)PlugSubAlloc(g, NULL, len); @@ -1003,7 +1003,7 @@ PSZ ARRAY::MakeArrayList(PGLOBAL g) strcat(p, (++i == Nval) ? ")" : ","); } // enfor i - if (trace) + if (trace(1)) htrc("Arraylist: newlen=%d\n", strlen(p)); return p; diff --git a/storage/connect/blkfil.cpp b/storage/connect/blkfil.cpp index 802095f2f82..76c9d09ac93 100644 --- a/storage/connect/blkfil.cpp +++ b/storage/connect/blkfil.cpp @@ -241,7 +241,7 @@ int BLKFILARI::BlockEval(PGLOBAL) break; } // endswitch Opc - if (trace) + if (trace(1)) htrc("BlockEval: op=%d n=%d rc=%d\n", Opc, n, Result); return Result; @@ -338,7 +338,7 @@ int BLKFILAR2::BlockEval(PGLOBAL) break; } // endswitch Opc - if (trace) + if (trace(1)) htrc("BlockEval2: op=%d n=%d rc=%d\n", Opc, n, Result); return Result; @@ -474,7 +474,7 @@ int BLKFILMR2::BlockEval(PGLOBAL) break; } // endswitch Opc - if (trace) + if (trace(1)) htrc("BlockEval2: op=%d n=%d rc=%d\n", Opc, n, Result); return Result; @@ -567,7 +567,7 @@ int BLKSPCARI::BlockEval(PGLOBAL) break; } // endswitch Opc - if (trace) + if (trace(1)) htrc("BlockEval: op=%d n=%d rc=%d\n", Opc, n, Result); return Result; diff --git a/storage/connect/block.h b/storage/connect/block.h index 8ac7be80988..737c74c1293 100644 --- a/storage/connect/block.h +++ b/storage/connect/block.h @@ -38,8 +38,8 @@ typedef class BLOCK *PBLOCK; class DllExport BLOCK { public: void * operator new(size_t size, PGLOBAL g, void *p = NULL) { -// if (trace > 3) -// htrc("New BLOCK: size=%d g=%p p=%p\n", size, g, p); + if (trace(256)) + htrc("New BLOCK: size=%d g=%p p=%p\n", size, g, p); return (PlugSubAlloc(g, p, size)); } // end of new diff --git a/storage/connect/checklvl.h b/storage/connect/checklvl.h index 0c234dfb8b8..9029e616bb6 100644 --- a/storage/connect/checklvl.h +++ b/storage/connect/checklvl.h @@ -45,6 +45,7 @@ enum USETEMP {TMP_NO = 0, /* Never */ /***********************************************************************/ enum TYPCONV {TPC_NO = 0, /* Never */ TPC_YES = 1, /* Always */ - TPC_SKIP = 2}; /* Skip TEXT columns */ + TPC_FORCE = 2, /* Also convert BLOBs */ + TPC_SKIP = 3}; /* Skip TEXT columns */ #endif // _CHKLVL_DEFINED_ diff --git a/storage/connect/cmgoconn.cpp b/storage/connect/cmgoconn.cpp index 44fac56137f..edee1874b97 100644 --- a/storage/connect/cmgoconn.cpp +++ b/storage/connect/cmgoconn.cpp @@ -280,7 +280,7 @@ bool CMgoConn::MakeCursor(PGLOBAL g) all = true; if (Pcg->Pipe) { - if (trace) + if (trace(1)) htrc("Pipeline: %s\n", options); p = strrchr(options, ']'); @@ -330,7 +330,7 @@ bool CMgoConn::MakeCursor(PGLOBAL g) *(char*)p = ']'; // Restore Colist for discovery p = s->GetStr(); - if (trace) + if (trace(33)) htrc("New Pipeline: %s\n", p); Query = bson_new_from_json((const uint8_t *)p, -1, &Error); @@ -350,7 +350,7 @@ bool CMgoConn::MakeCursor(PGLOBAL g) } else { if (Pcg->Filter || filp) { - if (trace) { + if (trace(1)) { if (Pcg->Filter) htrc("Filter: %s\n", Pcg->Filter); @@ -377,7 +377,7 @@ bool CMgoConn::MakeCursor(PGLOBAL g) tp->SetFilter(NULL); // Not needed anymore } // endif To_Filter - if (trace) + if (trace(33)) htrc("selector: %s\n", s->GetStr()); s->Resize(s->GetLength() + 1); @@ -393,7 +393,7 @@ bool CMgoConn::MakeCursor(PGLOBAL g) if (!all) { if (options && *options) { - if (trace) + if (trace(1)) htrc("options=%s\n", options); p = options; @@ -450,10 +450,10 @@ int CMgoConn::ReadNext(PGLOBAL g) if (!Cursor && MakeCursor(g)) { rc = RC_FX; } else if (mongoc_cursor_next(Cursor, &Document)) { - if (trace > 1) { + if (trace(512)) { bson_iter_t iter; ShowDocument(&iter, Document, ""); - } else if (trace == 1) + } else if (trace(1)) htrc("%s\n", GetDocument(g)); } else if (mongoc_cursor_error(Cursor, &Error)) { @@ -589,7 +589,7 @@ int CMgoConn::Write(PGLOBAL g) if (DocWrite(g, Fpc)) return RC_FX; - if (trace) { + if (trace(2)) { char *str = bson_as_json(Fpc->Child, NULL); htrc("Inserting: %s\n", str); bson_free(str); @@ -623,7 +623,7 @@ int CMgoConn::Write(PGLOBAL g) } // endif iter if (b) { - if (trace) { + if (trace(2)) { char *str = bson_as_json(query, NULL); htrc("update query: %s\n", str); bson_free(str); diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp index 2ffe51d2009..fa5c29aff74 100644 --- a/storage/connect/colblk.cpp +++ b/storage/connect/colblk.cpp @@ -76,7 +76,7 @@ COLBLK::COLBLK(PCOL col1, PTDB tdbp) //To_Orig = col1; To_Tdb = tdbp; - if (trace > 1) + if (trace(2)) htrc(" copying COLBLK %s from %p to %p\n", Name, col1, this); if (tdbp) @@ -115,7 +115,7 @@ bool COLBLK::SetFormat(PGLOBAL, FORMAT& fmt) { fmt = Format; - if (trace > 1) + if (trace(2)) htrc("COLBLK: %p format=%c(%d,%d)\n", this, *fmt.Type, fmt.Length, fmt.Prec); @@ -128,7 +128,7 @@ bool COLBLK::SetFormat(PGLOBAL, FORMAT& fmt) /***********************************************************************/ bool COLBLK::Eval(PGLOBAL g) { - if (trace > 1) + if (trace(2)) htrc("Col Eval: %s status=%.4X\n", Name, Status); if (!GetStatus(BUF_READ)) { @@ -165,7 +165,7 @@ bool COLBLK::InitValue(PGLOBAL g) AddStatus(BUF_READY); Value->SetNullable(Nullable); - if (trace > 1) + if (trace(2)) htrc(" colp=%p type=%d value=%p coluse=%.4X status=%.4X\n", this, Buf_Type, Value, ColUse, Status); diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index 6b7236a2f83..4ce382ca024 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -91,7 +91,7 @@ void CntEndDB(PGLOBAL g) free(dbuserp); - if (trace) + if (trace(1)) htrc("CntEndDB: Freeing Dup\n"); g->Activityp->Aptr = NULL; @@ -111,14 +111,14 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname) bool rc= false; PDBUSER dbuserp= PlgGetUser(g); - if (trace) { + if (trace(1)) { printf("CntCheckDB: dbuserp=%p\n", dbuserp); } // endif trace if (!dbuserp || !handler) return true; - if (trace) + if (trace(1)) printf("cat=%p oldhandler=%p newhandler=%p\n", dbuserp->Catalog, (dbuserp->Catalog) ? ((MYCAT*)dbuserp->Catalog)->GetHandler() : NULL, handler); @@ -149,7 +149,7 @@ bool CntCheckDB(PGLOBAL g, PHC handler, const char *pathname) /*********************************************************************/ sprintf(g->Message, MSG(DATABASE_LOADED), "???"); - if (trace) + if (trace(1)) printf("msg=%s\n", g->Message); return rc; @@ -197,7 +197,7 @@ PTDB CntGetTDB(PGLOBAL g, LPCSTR name, MODE mode, PHC h) PDBUSER dup = PlgGetUser(g); volatile PCATLG cat = (dup) ? dup->Catalog : NULL; // Safe over throw - if (trace) + if (trace(1)) printf("CntGetTDB: name=%s mode=%d cat=%p\n", name, mode, cat); if (!cat) @@ -207,7 +207,7 @@ PTDB CntGetTDB(PGLOBAL g, LPCSTR name, MODE mode, PHC h) // Get table object from the catalog tabp = new(g) XTAB(name); - if (trace) + if (trace(1)) printf("CntGetTDB: tabp=%p\n", tabp); // Perhaps this should be made thread safe @@ -217,13 +217,13 @@ PTDB CntGetTDB(PGLOBAL g, LPCSTR name, MODE mode, PHC h) printf("CntGetTDB: %s\n", g->Message); } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); } catch (const char *msg) { strcpy(g->Message, msg); } // end catch - if (trace) + if (trace(1)) printf("Returning tdbp=%p mode=%d\n", tdbp, mode); return tdbp; @@ -242,7 +242,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, //PCOLUMN cp; PDBUSER dup= PlgGetUser(g); - if (trace) + if (trace(1)) printf("CntOpenTable: tdbp=%p mode=%d\n", tdbp, mode); if (!tdbp) { @@ -259,7 +259,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, } else for (p = c1; *p; p += n) { // Allocate only used column blocks - if (trace) + if (trace(1)) printf("Allocating column %s\n", p); g->Message[0] = 0; // To check whether ColDB made an error message @@ -324,7 +324,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, tdbp->SetSetCols(tdbp->GetColumns()); // Now do open the physical table - if (trace) + if (trace(1)) printf("Opening table %s in mode %d tdbp=%p\n", tdbp->GetName(), mode, tdbp); @@ -340,7 +340,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, } // endif del - if (trace) + if (trace(1)) printf("About to open the table: tdbp=%p\n", tdbp); if (mode != MODE_ANY && mode != MODE_ALTER) { @@ -355,7 +355,7 @@ bool CntOpenTable(PGLOBAL g, PTDB tdbp, MODE mode, char *c1, char *c2, rcop = false; } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); } catch (const char *msg) { strcpy(g->Message, msg); @@ -398,7 +398,7 @@ RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool reset, bool mrr) } // endfor colp } catch (int n) { - if (trace) + if (trace(1)) printf("Error %d reading columns: %s\n", n, g->Message); rc = RC_FX; @@ -548,7 +548,7 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort) return rc; } // endif !USE_OPEN - if (trace) + if (trace(1)) printf("CntCloseTable: tdbp=%p mode=%d nox=%d abort=%d\n", tdbp, tdbp->GetMode(), nox, abort); @@ -578,11 +578,11 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort) tdbp->CloseDB(g); tdbp->SetAbort(false); - if (trace > 1) + if (trace(2)) printf("Table %s closed\n", tdbp->GetName()); if (!nox && tdbp->GetMode() != MODE_READ && tdbp->GetMode() != MODE_ANY) { - if (trace > 1) + if (trace(2)) printf("About to reset opt\n"); if (!tdbp->IsRemote()) { @@ -602,7 +602,7 @@ int CntCloseTable(PGLOBAL g, PTDB tdbp, bool nox, bool abort) rc = RC_FX; } // end catch - if (trace > 1) + if (trace(2)) htrc("Done rc=%d\n", rc); return (rc == RC_OK || rc == RC_INFO) ? 0 : rc; @@ -921,7 +921,7 @@ int CntIndexRange(PGLOBAL g, PTDB ptdb, const uchar* *key, uint *len, valp->SetBinValue((void*)p); #endif // !WORDS_BIGENDIAN - if (trace) { + if (trace(1)) { char bf[32]; printf("i=%d n=%d key=%s\n", i, n, valp->GetCharString(bf)); } // endif trace @@ -943,7 +943,7 @@ int CntIndexRange(PGLOBAL g, PTDB ptdb, const uchar* *key, uint *len, xbp->SetNval(n); - if (trace) + if (trace(1)) printf("xbp=%p Nval=%d i=%d incl=%d\n", xbp, n, i, incl[i]); k[i]= xbp->Range(g, i + 1, incl[i]); @@ -952,7 +952,7 @@ int CntIndexRange(PGLOBAL g, PTDB ptdb, const uchar* *key, uint *len, } // endfor i - if (trace) + if (trace(1)) printf("k1=%d k0=%d\n", k[1], k[0]); return k[1] - k[0]; diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp index 84dff422db7..956a70578f5 100644 --- a/storage/connect/filamap.cpp +++ b/storage/connect/filamap.cpp @@ -90,7 +90,7 @@ int MAPFAM::GetFileLength(PGLOBAL g) len = (To_Fb && To_Fb->Count) ? To_Fb->Length : TXTFAM::GetFileLength(g); - if (trace) + if (trace(1)) htrc("Mapped file length=%d\n", len); return len; @@ -128,7 +128,7 @@ bool MAPFAM::OpenTableFile(PGLOBAL g) && fp->Count && fp->Mode == mode) break; - if (trace) + if (trace(1)) htrc("Mapping file, fp=%p\n", fp); } else @@ -166,7 +166,7 @@ bool MAPFAM::OpenTableFile(PGLOBAL g) sprintf(g->Message, MSG(OPEN_MODE_ERROR), "map", (int) rc, filename); - if (trace) + if (trace(1)) htrc("CreateFileMap: %s\n", g->Message); return (mode == MODE_READ && rc == ENOENT) @@ -227,7 +227,7 @@ bool MAPFAM::OpenTableFile(PGLOBAL g) Fpos = Mempos = Memory; Top = Memory + len; - if (trace) + if (trace(1)) htrc("fp=%p count=%d MapView=%p len=%d Top=%p\n", fp, fp->Count, Memory, len, Top); @@ -407,7 +407,7 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) { int n; - if (trace) + if (trace(1)) htrc("MAP DeleteDB: irc=%d mempos=%p tobuf=%p Tpos=%p Spos=%p\n", irc, Mempos, To_Buf, Tpos, Spos); @@ -417,7 +417,7 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) /*******************************************************************/ Fpos = Top; - if (trace) + if (trace(1)) htrc("Fpos placed at file top=%p\n", Fpos); } // endif irc @@ -435,7 +435,7 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) memmove(Tpos, Spos, n); Tpos += n; - if (trace) + if (trace(1)) htrc("move %d bytes\n", n); } // endif n @@ -443,7 +443,7 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) if (irc == RC_OK) { Spos = Mempos; // New start position - if (trace) + if (trace(1)) htrc("after: Tpos=%p Spos=%p\n", Tpos, Spos); } else if (To_Fb) { // Can be NULL for deleted files @@ -473,7 +473,7 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) return RC_FX; } // endif - if (trace) + if (trace(1)) htrc("done, Tpos=%p newsize=%d drc=%d\n", Tpos, n, drc); if (!SetEndOfFile(fp->Handle)) { @@ -511,7 +511,7 @@ void MAPFAM::CloseTableFile(PGLOBAL g, bool) PlugCloseFile(g, To_Fb); //To_Fb = NULL; // To get correct file size in Cardinality - if (trace) + if (trace(1)) htrc("MAP Close: closing %s count=%d\n", To_File, (To_Fb) ? To_Fb->Count : 0); diff --git a/storage/connect/filamdbf.cpp b/storage/connect/filamdbf.cpp index 44abd962c56..893e3da0d46 100644 --- a/storage/connect/filamdbf.cpp +++ b/storage/connect/filamdbf.cpp @@ -203,7 +203,7 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) PQRYRES qrp; PCOLRES crp; - if (trace) + if (trace(1)) htrc("DBFColumns: File %s\n", SVP(fn)); if (!info) { @@ -245,7 +245,7 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) return qrp; } // endif info - if (trace) { + if (trace(1)) { htrc("Structure of %s\n", filename); htrc("headlen=%hd reclen=%hd degree=%d\n", mainhead.Headlen(), mainhead.Reclen(), fields); @@ -271,7 +271,7 @@ PQRYRES DBFColumns(PGLOBAL g, PCSZ dp, PCSZ fn, bool info) } else len = thisfield.Length; - if (trace) + if (trace(1)) htrc("%-11s %c %6ld %3d %2d %3d %3d\n", thisfield.Name, thisfield.Type, thisfield.Offset, len, thisfield.Decimals, thisfield.Setfield, thisfield.Mdxfield); @@ -522,14 +522,14 @@ bool DBFFAM::OpenTableFile(PGLOBAL g) PlugSetPath(filename, To_File, Tdbp->GetPath()); if (!(Stream = PlugOpenFile(g, filename, opmode))) { - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return (mode == MODE_READ && errno == ENOENT) ? PushWarning(g, Tdbp) : true; } // endif Stream - if (trace) + if (trace(1)) htrc("File %s is open in mode %s\n", filename, opmode); To_Fb = dbuserp->Openlist; // Keep track of File block @@ -938,7 +938,7 @@ void DBFFAM::CloseTableFile(PGLOBAL g, bool abort) rc = PlugCloseFile(g, To_Fb); fin: - if (trace) + if (trace(1)) htrc("DBF CloseTableFile: closing %s mode=%d wrc=%d rc=%d\n", To_File, mode, wrc, rc); diff --git a/storage/connect/filamfix.cpp b/storage/connect/filamfix.cpp index 1d6194b154d..0a98ec5b54a 100644 --- a/storage/connect/filamfix.cpp +++ b/storage/connect/filamfix.cpp @@ -322,7 +322,7 @@ int FIXFAM::ReadBuffer(PGLOBAL g) return RC_FX; } // endif fseek - if (trace > 1) + if (trace(2)) htrc("File position is now %d\n", ftell(Stream)); if (Padded) @@ -344,7 +344,7 @@ int FIXFAM::ReadBuffer(PGLOBAL g) sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(errno)); #endif - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return RC_FX; @@ -361,7 +361,7 @@ int FIXFAM::ReadBuffer(PGLOBAL g) /***********************************************************************/ int FIXFAM::WriteBuffer(PGLOBAL g) { - if (trace > 1) + if (trace(2)) htrc("FIX WriteDB: Mode=%d buf=%p line=%p Nrec=%d Rbuf=%d CurNum=%d\n", Tdbp->GetMode(), To_Buf, Tdbp->GetLine(), Nrec, Rbuf, CurNum); @@ -374,7 +374,7 @@ int FIXFAM::WriteBuffer(PGLOBAL g) return RC_OK; // We write only full blocks } // endif CurNum - if (trace > 1) + if (trace(2)) htrc(" First line is '%.*s'\n", Lrecl - 2, To_Buf); // Now start the writing process. @@ -388,7 +388,7 @@ int FIXFAM::WriteBuffer(PGLOBAL g) CurNum = 0; Tdbp->SetLine(To_Buf); - if (trace > 1) + if (trace(2)) htrc("write done\n"); } else { // Mode == MODE_UPDATE @@ -431,7 +431,7 @@ int FIXFAM::DeleteRecords(PGLOBAL g, int irc) /* file, and at the end erase all trailing records. */ /* This will be experimented. */ /*********************************************************************/ - if (trace > 1) + if (trace(2)) htrc("DOS DeleteDB: rc=%d UseTemp=%d Fpos=%d Tpos=%d Spos=%d\n", irc, UseTemp, Fpos, Tpos, Spos); @@ -441,7 +441,7 @@ int FIXFAM::DeleteRecords(PGLOBAL g, int irc) /*******************************************************************/ Fpos = Tdbp->Cardinality(g); - if (trace > 1) + if (trace(2)) htrc("Fpos placed at file end=%d\n", Fpos); } else // Fpos is the deleted line position @@ -491,7 +491,7 @@ int FIXFAM::DeleteRecords(PGLOBAL g, int irc) OldBlk = -2; // To force fseek to be executed on next block } // endif moved - if (trace > 1) + if (trace(2)) htrc("after: Tpos=%d Spos=%d\n", Tpos, Spos); } else { @@ -540,7 +540,7 @@ int FIXFAM::DeleteRecords(PGLOBAL g, int irc) close(h); - if (trace > 1) + if (trace(2)) htrc("done, h=%d irc=%d\n", h, irc); } // endif UseTemp @@ -572,7 +572,7 @@ bool FIXFAM::MoveIntermediateLines(PGLOBAL g, bool *b) req = (size_t)MY_MIN(n, Dbflen); len = fread(DelBuf, Lrecl, req, Stream); - if (trace > 1) + if (trace(2)) htrc("after read req=%d len=%d\n", req, len); if (len != req) { @@ -591,13 +591,13 @@ bool FIXFAM::MoveIntermediateLines(PGLOBAL g, bool *b) return true; } // endif - if (trace > 1) + if (trace(2)) htrc("after write pos=%d\n", ftell(Stream)); Tpos += (int)req; Spos += (int)req; - if (trace > 1) + if (trace(2)) htrc("loop: Tpos=%d Spos=%d\n", Tpos, Spos); *b = true; @@ -648,7 +648,7 @@ void FIXFAM::CloseTableFile(PGLOBAL g, bool abort) rc = PlugCloseFile(g, To_Fb); fin: - if (trace) + if (trace(1)) htrc("FIX CloseTableFile: closing %s mode=%d wrc=%d rc=%d\n", To_File, mode, wrc, rc); @@ -718,7 +718,7 @@ int BGXFAM::BigRead(PGLOBAL g __attribute__((unused)), DWORD nbr, drc, len = (DWORD)req; bool brc = ReadFile(h, inbuf, len, &nbr, NULL); - if (trace > 1) + if (trace(2)) htrc("after read req=%d brc=%d nbr=%d\n", req, brc, nbr); if (!brc) { @@ -730,7 +730,7 @@ int BGXFAM::BigRead(PGLOBAL g __attribute__((unused)), (LPTSTR)buf, sizeof(buf), NULL); sprintf(g->Message, MSG(READ_ERROR), To_File, buf); - if (trace > 1) + if (trace(2)) htrc("BIGREAD: %s\n", g->Message); rc = -1; @@ -757,7 +757,7 @@ bool BGXFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req) DWORD nbw, drc, len = (DWORD)req; bool brc = WriteFile(h, inbuf, len, &nbw, NULL); - if (trace > 1) + if (trace(2)) htrc("after write req=%d brc=%d nbw=%d\n", req, brc, nbw); if (!brc || nbw != len) { @@ -775,7 +775,7 @@ bool BGXFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req) sprintf(g->Message, MSG(WRITE_STRERROR), fn, buf); - if (trace > 1) + if (trace(2)) htrc("BIGWRITE: nbw=%d len=%d errno=%d %s\n", nbw, len, drc, g->Message); @@ -790,7 +790,7 @@ bool BGXFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req) sprintf(g->Message, MSG(WRITE_STRERROR), fn, strerror(errno)); - if (trace > 1) + if (trace(2)) htrc("BIGWRITE: nbw=%d len=%d errno=%d %s\n", nbw, len, errno, g->Message); @@ -828,7 +828,7 @@ bool BGXFAM::OpenTableFile(PGLOBAL g) PlugSetPath(filename, To_File, Tdbp->GetPath()); - if (trace) + if (trace(1)) htrc("OpenTableFile: filename=%s mode=%d\n", filename, mode); #if defined(__WIN__) @@ -888,7 +888,7 @@ bool BGXFAM::OpenTableFile(PGLOBAL g) } else rc = 0; - if (trace > 1) + if (trace(2)) htrc(" rc=%d access=%p share=%p creation=%d handle=%p fn=%s\n", rc, access, share, creation, Hfile, filename); @@ -942,7 +942,7 @@ bool BGXFAM::OpenTableFile(PGLOBAL g) } else rc = 0; - if (trace > 1) + if (trace(2)) htrc(" rc=%d oflag=%p tmode=%p handle=%p fn=%s\n", rc, oflag, tmode, Hfile, filename); @@ -1026,11 +1026,11 @@ int BGXFAM::Cardinality(PGLOBAL g) if (Hfile == INVALID_HANDLE_VALUE) { int h = open64(filename, O_RDONLY, 0); - if (trace) + if (trace(1)) htrc(" h=%d\n", h); if (h == INVALID_HANDLE_VALUE) { - if (trace) + if (trace(1)) htrc(" errno=%d ENOENT=%d\n", errno, ENOENT); if (errno != ENOENT) { @@ -1074,7 +1074,7 @@ int BGXFAM::Cardinality(PGLOBAL g) } else card = (int)(fsize / (BIGINT)Lrecl); // Fixed length file - if (trace) + if (trace(1)) htrc(" Computed max_K=%d fsize=%lf lrecl=%d\n", card, (double)fsize, Lrecl); @@ -1181,7 +1181,7 @@ int BGXFAM::ReadBuffer(PGLOBAL g) if (BigSeek(g, Hfile, (BIGINT)Fpos * (BIGINT)Lrecl)) return RC_FX; - if (trace > 1) + if (trace(2)) htrc("File position is now %d\n", Fpos); nbr = BigRead(g, Hfile, To_Buf, (Padded) ? Blksize : Lrecl * Nrec); @@ -1205,7 +1205,7 @@ int BGXFAM::ReadBuffer(PGLOBAL g) /***********************************************************************/ int BGXFAM::WriteBuffer(PGLOBAL g) { - if (trace > 1) + if (trace(2)) htrc("BIG WriteDB: Mode=%d buf=%p line=%p Nrec=%d Rbuf=%d CurNum=%d\n", Tdbp->GetMode(), To_Buf, Tdbp->GetLine(), Nrec, Rbuf, CurNum); @@ -1218,7 +1218,7 @@ int BGXFAM::WriteBuffer(PGLOBAL g) return RC_OK; // We write only full blocks } // endif CurNum - if (trace > 1) + if (trace(2)) htrc(" First line is '%.*s'\n", Lrecl - 2, To_Buf); // Now start the writing process. @@ -1229,7 +1229,7 @@ int BGXFAM::WriteBuffer(PGLOBAL g) CurNum = 0; Tdbp->SetLine(To_Buf); - if (trace > 1) + if (trace(2)) htrc("write done\n"); } else { // Mode == MODE_UPDATE @@ -1270,7 +1270,7 @@ int BGXFAM::DeleteRecords(PGLOBAL g, int irc) /* file, and at the end erase all trailing records. */ /* This will be experimented. */ /*********************************************************************/ - if (trace > 1) + if (trace(2)) htrc("BGX DeleteDB: rc=%d UseTemp=%d Fpos=%d Tpos=%d Spos=%d\n", irc, UseTemp, Fpos, Tpos, Spos); @@ -1280,7 +1280,7 @@ int BGXFAM::DeleteRecords(PGLOBAL g, int irc) /*******************************************************************/ Fpos = Tdbp->Cardinality(g); - if (trace > 1) + if (trace(2)) htrc("Fpos placed at file end=%d\n", Fpos); } else // Fpos is the deleted line position @@ -1318,7 +1318,7 @@ int BGXFAM::DeleteRecords(PGLOBAL g, int irc) return RC_FX; if (irc == RC_OK) { - if (trace) + if (trace(1)) assert(Spos == Fpos); Spos++; // New start position is on next line @@ -1330,7 +1330,7 @@ int BGXFAM::DeleteRecords(PGLOBAL g, int irc) OldBlk = -2; // To force fseek to be executed on next block } // endif moved - if (trace > 1) + if (trace(2)) htrc("after: Tpos=%d Spos=%d\n", Tpos, Spos); } else if (irc != RC_OK) { @@ -1459,7 +1459,7 @@ bool BGXFAM::MoveIntermediateLines(PGLOBAL g, bool *b) Tpos += (int)req; Spos += (int)req; - if (trace > 1) + if (trace(2)) htrc("loop: Tpos=%d Spos=%d\n", Tpos, Spos); *b = true; @@ -1510,7 +1510,7 @@ void BGXFAM::CloseTableFile(PGLOBAL g, bool abort) rc = PlugCloseFile(g, To_Fb); fin: - if (trace) + if (trace(1)) htrc("BGX CloseTableFile: closing %s mode=%d wrc=%d rc=%d\n", To_File, mode, wrc, rc); diff --git a/storage/connect/filamgz.cpp b/storage/connect/filamgz.cpp index df366ef15f9..fccda772fea 100644 --- a/storage/connect/filamgz.cpp +++ b/storage/connect/filamgz.cpp @@ -203,7 +203,7 @@ bool GZFAM::AllocateBuffer(PGLOBAL g) Buflen = Lrecl + 2; // Lrecl does not include CRLF //Buflen *= ((Mode == MODE_DELETE) ? DOS_BUFF_LEN : 1); NIY - if (trace) + if (trace(1)) htrc("SubAllocating a buffer of %d bytes\n", Buflen); To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen); @@ -347,7 +347,7 @@ int GZFAM::ReadBuffer(PGLOBAL g) } else rc = Zerror(g); - if (trace > 1) + if (trace(2)) htrc(" Read: '%s' rc=%d\n", To_Buf, rc); return rc; @@ -389,7 +389,7 @@ void GZFAM::CloseTableFile(PGLOBAL, bool) { int rc = gzclose(Zfile); - if (trace) + if (trace(1)) htrc("GZ CloseDB: closing %s rc=%d\n", To_File, rc); Zfile = NULL; // So we can know whether table is open @@ -702,7 +702,7 @@ void ZBKFAM::CloseTableFile(PGLOBAL g, bool) } else rc = gzclose(Zfile); - if (trace) + if (trace(1)) htrc("GZ CloseDB: closing %s rc=%d\n", To_File, rc); Zfile = NULL; // So we can know whether table is open @@ -1382,7 +1382,7 @@ void ZLBFAM::CloseTableFile(PGLOBAL g, bool) } else rc = fclose(Stream); - if (trace) + if (trace(1)) htrc("ZLB CloseTableFile: closing %s mode=%d rc=%d\n", To_File, Tdbp->GetMode(), rc); @@ -1408,7 +1408,7 @@ void ZLBFAM::Rewind(void) rewind(Stream); - if (!(st = fread(Zlenp, sizeof(int), 1, Stream)) && trace) + if (!(st = fread(Zlenp, sizeof(int), 1, Stream)) && trace(1)) htrc("fread error %d in Rewind", errno); fseek(Stream, *Zlenp + sizeof(int), SEEK_SET); diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index c456ee9e9b7..e6c9a627df0 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -194,12 +194,12 @@ int TXTFAM::GetFileLength(PGLOBAL g) PlugSetPath(filename, To_File, Tdbp->GetPath()); h= global_open(g, MSGID_OPEN_MODE_STRERROR, filename, _O_RDONLY); - if (trace) + if (trace(1)) htrc("GetFileLength: fn=%s h=%d\n", filename, h); if (h == -1) { if (errno != ENOENT) { - if (trace) + if (trace(1)) htrc("%s\n", g->Message); len = -1; @@ -249,7 +249,7 @@ int TXTFAM::Cardinality(PGLOBAL g) } // endif Padded - if (trace) + if (trace(1)) htrc(" Computed max_K=%d Filen=%d lrecl=%d\n", card, len, Lrecl); @@ -390,7 +390,7 @@ int TXTFAM::UpdateSortedRows(PGLOBAL g) return RC_OK; err: - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return RC_FX; @@ -439,7 +439,7 @@ int TXTFAM::DeleteSortedRows(PGLOBAL g) return RC_OK; err: - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return RC_FX; @@ -512,7 +512,7 @@ int DOSFAM::GetFileLength(PGLOBAL g) if ((len = _filelength(_fileno(Stream))) < 0) sprintf(g->Message, MSG(FILELEN_ERROR), "_filelength", To_File); - if (trace) + if (trace(1)) htrc("File length=%d\n", len); return len; @@ -598,14 +598,14 @@ bool DOSFAM::OpenTableFile(PGLOBAL g) PlugSetPath(filename, To_File, Tdbp->GetPath()); if (!(Stream = PlugOpenFile(g, filename, opmode))) { - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return (mode == MODE_READ && errno == ENOENT) ? PushWarning(g, Tdbp) : true; } // endif Stream - if (trace) + if (trace(1)) htrc("File %s open Stream=%p mode=%s\n", filename, Stream, opmode); To_Fb = dbuserp->Openlist; // Keep track of File block @@ -628,7 +628,7 @@ bool DOSFAM::AllocateBuffer(PGLOBAL g) // Lrecl does not include line ending Buflen = Lrecl + Ending + ((Bin) ? 1 : 0) + 1; // Sergei - if (trace) + if (trace(1)) htrc("SubAllocating a buffer of %d bytes\n", Buflen); To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen); @@ -768,7 +768,7 @@ int DOSFAM::ReadBuffer(PGLOBAL g) if (!Stream) return RC_EF; - if (trace > 1) + if (trace(2)) htrc("ReadBuffer: Tdbp=%p To_Line=%p Placed=%d\n", Tdbp, Tdbp->To_Line, Placed); @@ -782,7 +782,7 @@ int DOSFAM::ReadBuffer(PGLOBAL g) CurBlk = (int)Rows++; - if (trace > 1) + if (trace(2)) htrc("ReadBuffer: CurBlk=%d\n", CurBlk); /********************************************************************/ @@ -803,14 +803,14 @@ int DOSFAM::ReadBuffer(PGLOBAL g) } else Placed = false; - if (trace > 1) + if (trace(2)) htrc(" About to read: stream=%p To_Buf=%p Buflen=%d\n", Stream, To_Buf, Buflen); if (fgets(To_Buf, Buflen, Stream)) { p = To_Buf + strlen(To_Buf) - 1; - if (trace > 1) + if (trace(2)) htrc(" Read: To_Buf=%p p=%c\n", To_Buf, To_Buf, p); #if defined(__WIN__) @@ -838,7 +838,7 @@ int DOSFAM::ReadBuffer(PGLOBAL g) } else if (*p == '\n') *p = '\0'; // Eliminate ending new-line character - if (trace > 1) + if (trace(2)) htrc(" To_Buf='%s'\n", To_Buf); strcpy(Tdbp->To_Line, To_Buf); @@ -853,13 +853,13 @@ int DOSFAM::ReadBuffer(PGLOBAL g) sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(0)); #endif - if (trace) + if (trace(1)) htrc("%s\n", g->Message); rc = RC_FX; } // endif's fgets - if (trace > 1) + if (trace(2)) htrc("ReadBuffer: rc=%d\n", rc); IsRead = true; @@ -895,7 +895,7 @@ int DOSFAM::WriteBuffer(PGLOBAL g) /*******************************************************************/ curpos = ftell(Stream); - if (trace) + if (trace(1)) htrc("Last : %d cur: %d\n", Fpos, curpos); if (UseTemp) { @@ -937,7 +937,7 @@ int DOSFAM::WriteBuffer(PGLOBAL g) return RC_FX; } // endif - if (trace) + if (trace(1)) htrc("write done\n"); return RC_OK; @@ -960,7 +960,7 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc) /* file, and at the end erase all trailing records. */ /* This will be experimented. */ /*********************************************************************/ - if (trace) + if (trace(1)) htrc( "DOS DeleteDB: rc=%d UseTemp=%d curpos=%d Fpos=%d Tpos=%d Spos=%d\n", irc, UseTemp, curpos, Fpos, Tpos, Spos); @@ -972,7 +972,7 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc) fseek(Stream, 0, SEEK_END); Fpos = ftell(Stream); - if (trace) + if (trace(1)) htrc("Fpos placed at file end=%d\n", Fpos); } // endif irc @@ -1015,7 +1015,7 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc) Spos = GetNextPos(); // New start position - if (trace) + if (trace(1)) htrc("after: Tpos=%d Spos=%d\n", Tpos, Spos); } else { @@ -1058,7 +1058,7 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc) close(h); - if (trace) + if (trace(1)) htrc("done, h=%d irc=%d\n", h, irc); } // endif !UseTemp @@ -1083,7 +1083,7 @@ bool DOSFAM::OpenTempFile(PGLOBAL g) strcat(PlugRemoveType(tempname, tempname), ".t"); if (!(T_Stream = PlugOpenFile(g, tempname, "wb"))) { - if (trace) + if (trace(1)) htrc("%s\n", g->Message); rc = true; @@ -1112,7 +1112,7 @@ bool DOSFAM::MoveIntermediateLines(PGLOBAL g, bool *b) req = (size_t)MY_MIN(n, Dbflen); len = fread(DelBuf, 1, req, Stream); - if (trace) + if (trace(1)) htrc("after read req=%d len=%d\n", req, len); if (len != req) { @@ -1131,13 +1131,13 @@ bool DOSFAM::MoveIntermediateLines(PGLOBAL g, bool *b) return true; } // endif - if (trace) + if (trace(1)) htrc("after write pos=%d\n", ftell(Stream)); Tpos += (int)req; Spos += (int)req; - if (trace) + if (trace(1)) htrc("loop: Tpos=%d Spos=%d\n", Tpos, Spos); *b = true; @@ -1217,7 +1217,7 @@ void DOSFAM::CloseTableFile(PGLOBAL g, bool abort) } else { rc = PlugCloseFile(g, To_Fb); - if (trace) + if (trace(1)) htrc("DOS Close: closing %s rc=%d\n", To_File, rc); } // endif UseTemp @@ -1452,7 +1452,7 @@ int BLKFAM::ReadBuffer(PGLOBAL g) // Calculate the length of block to read BlkLen = BlkPos[CurBlk + 1] - BlkPos[CurBlk]; - if (trace) + if (trace(1)) htrc("File position is now %d\n", ftell(Stream)); // Read the entire next block @@ -1486,7 +1486,7 @@ int BLKFAM::ReadBuffer(PGLOBAL g) sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(errno)); #endif - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return RC_FX; @@ -1636,7 +1636,7 @@ void BLKFAM::CloseTableFile(PGLOBAL g, bool abort) rc = PlugCloseFile(g, To_Fb); - if (trace) + if (trace(1)) htrc("BLK CloseTableFile: closing %s mode=%d wrc=%d rc=%d\n", To_File, Tdbp->GetMode(), wrc, rc); diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp index 871613cb4b4..244acfdc5c8 100755 --- a/storage/connect/filamvct.cpp +++ b/storage/connect/filamvct.cpp @@ -336,7 +336,7 @@ int VCTFAM::Cardinality(PGLOBAL g) else sprintf(g->Message, MSG(NOT_FIXED_LEN), To_File, len, clen); - if (trace) + if (trace(1)) htrc(" Computed max_K=%d Filen=%d Clen=%d\n", card, len, clen); } else @@ -469,14 +469,14 @@ bool VCTFAM::OpenTableFile(PGLOBAL g) PlugSetPath(filename, To_File, Tdbp->GetPath()); if (!(Stream = PlugOpenFile(g, filename, opmode))) { - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return (mode == MODE_READ && errno == ENOENT) ? PushWarning(g, Tdbp) : true; } // endif Stream - if (trace) + if (trace(1)) htrc("File %s is open in mode %s\n", filename, opmode); To_Fb = dbuserp->Openlist; // Keep track of File block @@ -581,7 +581,7 @@ bool VCTFAM::InitInsert(PGLOBAL g) cp->ReadBlock(g); } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); rc = true; } catch (const char *msg) { @@ -652,7 +652,7 @@ int VCTFAM::ReadBuffer(PGLOBAL g) OldBlk = CurBlk; // Last block actually read } // endif oldblk - if (trace) + if (trace(1)) htrc(" Read: CurNum=%d CurBlk=%d rc=%d\n", CurNum, CurBlk, RC_OK); return rc; @@ -663,7 +663,7 @@ int VCTFAM::ReadBuffer(PGLOBAL g) /***********************************************************************/ int VCTFAM::WriteBuffer(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("VCT WriteBuffer: R%d Mode=%d CurNum=%d CurBlk=%d\n", Tdbp->GetTdb_No(), Tdbp->GetMode(), CurNum, CurBlk); @@ -756,7 +756,7 @@ int VCTFAM::DeleteRecords(PGLOBAL g, int irc) { bool eof = false; - if (trace) + if (trace(1)) htrc("VCT DeleteDB: rc=%d UseTemp=%d Fpos=%d Tpos=%d Spos=%d\n", irc, UseTemp, Fpos, Tpos, Spos); @@ -766,7 +766,7 @@ int VCTFAM::DeleteRecords(PGLOBAL g, int irc) /*******************************************************************/ Fpos = (Block - 1) * Nrec + Last; - if (trace) + if (trace(1)) htrc("Fpos placed at file end=%d\n", Fpos); eof = UseTemp && !MaxBlk; @@ -807,7 +807,7 @@ int VCTFAM::DeleteRecords(PGLOBAL g, int irc) #endif Spos++; // New start position is on next line - if (trace) + if (trace(1)) htrc("after: Tpos=%d Spos=%d\n", Tpos, Spos); } else { @@ -856,7 +856,7 @@ int VCTFAM::DeleteRecords(PGLOBAL g, int irc) close(h); - if (trace) + if (trace(1)) htrc("done, h=%d irc=%d\n", h, irc); } else @@ -899,7 +899,7 @@ bool VCTFAM::OpenTempFile(PGLOBAL g) opmode = "wb"; if (!(T_Stream = PlugOpenFile(g, tempname, opmode))) { - if (trace) + if (trace(1)) htrc("%s\n", g->Message); rc = true; @@ -947,7 +947,7 @@ bool VCTFAM::MoveIntermediateLines(PGLOBAL g, bool *b) len = fread(To_Buf, Clens[i], req, Stream); - if (trace) + if (trace(1)) htrc("after read req=%d len=%d\n", req, len); if (len != req) { @@ -976,7 +976,7 @@ bool VCTFAM::MoveIntermediateLines(PGLOBAL g, bool *b) } // endif UseTemp - if (trace) + if (trace(1)) htrc("after write pos=%d\n", ftell(Stream)); } // endfor i @@ -1007,7 +1007,7 @@ bool VCTFAM::MoveIntermediateLines(PGLOBAL g, bool *b) } // endif UseTemp - if (trace) + if (trace(1)) htrc("loop: Tpos=%d Spos=%d\n", Tpos, Spos); } // endfor n @@ -1144,7 +1144,7 @@ void VCTFAM::CloseTableFile(PGLOBAL g, bool abort) if (!(UseTemp && T_Stream)) rc = PlugCloseFile(g, To_Fb); - if (trace) + if (trace(1)) htrc("VCT CloseTableFile: closing %s wrc=%d rc=%d\n", To_File, wrc, rc); @@ -1217,7 +1217,7 @@ bool VCTFAM::ReadBlock(PGLOBAL g, PVCTCOL colp) else // Blocked vector format len = Nrec * (colp->Deplac + Lrecl * CurBlk); - if (trace) + if (trace(1)) htrc("len=%d Nrec=%d Deplac=%d Lrecl=%d CurBlk=%d maxblk=%d\n", len, Nrec, colp->Deplac, Lrecl, CurBlk, MaxBlk); @@ -1236,13 +1236,13 @@ bool VCTFAM::ReadBlock(PGLOBAL g, PVCTCOL colp) sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(errno)); - if (trace) + if (trace(1)) htrc(" Read error: %s\n", g->Message); return true; } // endif - if (trace) + if (trace(1)) num_read++; return false; @@ -1268,7 +1268,7 @@ bool VCTFAM::WriteBlock(PGLOBAL g, PVCTCOL colp) else // Old VCT format len = Nrec * (colp->Deplac + Lrecl * colp->ColBlk); - if (trace) + if (trace(1)) htrc("modif=%d len=%d Nrec=%d Deplac=%d Lrecl=%d colblk=%d\n", Modif, len, Nrec, colp->Deplac, Lrecl, colp->ColBlk); @@ -1287,7 +1287,7 @@ bool VCTFAM::WriteBlock(PGLOBAL g, PVCTCOL colp) sprintf(g->Message, MSG(WRITE_STRERROR), (UseTemp) ? To_Fbt->Fname : To_File, strerror(errno)); - if (trace) + if (trace(1)) htrc("Write error: %s\n", strerror(errno)); return true; @@ -1358,7 +1358,7 @@ bool VCMFAM::OpenTableFile(PGLOBAL g) && fp->Count && fp->Mode == mode) break; - if (trace) + if (trace(1)) htrc("Mapping VCM file, fp=%p cnt=%d\n", fp, fp->Count); } else @@ -1416,7 +1416,7 @@ bool VCMFAM::OpenTableFile(PGLOBAL g) sprintf(g->Message, MSG(OPEN_MODE_ERROR), "map", (int) rc, filename); - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return (mode == MODE_READ && rc == ENOENT) @@ -1467,7 +1467,7 @@ bool VCMFAM::OpenTableFile(PGLOBAL g) To_Fb = fp; // Useful when closing - if (trace) + if (trace(1)) htrc("fp=%p count=%d MapView=%p len=%d Top=%p\n", fp, fp->Count, Memory, len); @@ -1551,7 +1551,7 @@ bool VCMFAM::InitInsert(PGLOBAL g) cp->ReadBlock(g); } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); rc = true; } catch (const char *msg) { @@ -1567,7 +1567,7 @@ bool VCMFAM::InitInsert(PGLOBAL g) /***********************************************************************/ int VCMFAM::WriteBuffer(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("VCM WriteBuffer: R%d Mode=%d CurNum=%d CurBlk=%d\n", Tdbp->GetTdb_No(), Tdbp->GetMode(), CurNum, CurBlk); @@ -1608,7 +1608,7 @@ int VCMFAM::WriteBuffer(PGLOBAL g) /***********************************************************************/ int VCMFAM::DeleteRecords(PGLOBAL g, int irc) { - if (trace) + if (trace(1)) htrc("VCM DeleteDB: irc=%d tobuf=%p Tpos=%p Spos=%p\n", irc, To_Buf, Tpos, Spos); @@ -1618,7 +1618,7 @@ int VCMFAM::DeleteRecords(PGLOBAL g, int irc) /*******************************************************************/ Fpos = (Block - 1) * Nrec + Last; - if (trace) + if (trace(1)) htrc("Fpos placed at file top=%p\n", Fpos); } else // Fpos is the Deleted line position @@ -1636,7 +1636,7 @@ int VCMFAM::DeleteRecords(PGLOBAL g, int irc) if (irc == RC_OK) { Spos = Fpos + 1; // New start position - if (trace) + if (trace(1)) htrc("after: Tpos=%p Spos=%p\n", Tpos, Spos); } else { @@ -1680,7 +1680,7 @@ int VCMFAM::DeleteRecords(PGLOBAL g, int irc) return RC_FX; } // endif - if (trace) + if (trace(1)) htrc("done, Tpos=%p newsize=%d drc=%d\n", Tpos, n, drc); if (!SetEndOfFile(fp->Handle)) { @@ -1755,7 +1755,7 @@ bool VCMFAM::MoveIntermediateLines(PGLOBAL, bool *) Tpos += n; } // endif MaxBlk - if (trace) + if (trace(1)) htrc("move %d bytes\n", n); } // endif n @@ -1812,14 +1812,14 @@ bool VCMFAM::ReadBlock(PGLOBAL, PVCTCOL colp) /*********************************************************************/ mempos = Memcol[i] + n * CurBlk; - if (trace) + if (trace(1)) htrc("mempos=%p i=%d Nrec=%d Clen=%d CurBlk=%d\n", mempos, i, Nrec, colp->Clen, CurBlk); if (colp->GetStatus(BUF_MAPPED)) colp->Blk->SetValPointer(mempos); - if (trace) + if (trace(1)) num_read++; return false; @@ -1843,7 +1843,7 @@ bool VCMFAM::WriteBlock(PGLOBAL, PVCTCOL colp __attribute__((unused))) /*********************************************************************/ mempos = Memcol[i] + n * CurBlk; - if (trace) + if (trace(1)) htrc("modif=%d mempos=%p i=%d Nrec=%d Clen=%d colblk=%d\n", Modif, mempos, i, Nrec, colp->Clen, colp->ColBlk); @@ -2008,14 +2008,14 @@ bool VECFAM::OpenColumnFile(PGLOBAL g, PCSZ opmode, int i) sprintf(filename, Colfn, i+1); if (!(Streams[i] = PlugOpenFile(g, filename, opmode))) { - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return (Tdbp->GetMode() == MODE_READ && errno == ENOENT) ? PushWarning(g, Tdbp) : true; } // endif Streams - if (trace) + if (trace(1)) htrc("File %s is open in mode %s\n", filename, opmode); To_Fbs[i] = dup->Openlist; // Keep track of File blocks @@ -2163,7 +2163,7 @@ void VECFAM::ResetBuffer(PGLOBAL g) /***********************************************************************/ int VECFAM::WriteBuffer(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("VCT WriteBuffer: R%d Mode=%d CurNum=%d CurBlk=%d\n", Tdbp->GetTdb_No(), Tdbp->GetMode(), CurNum, CurBlk); @@ -2205,7 +2205,7 @@ int VECFAM::WriteBuffer(PGLOBAL g) /***********************************************************************/ int VECFAM::DeleteRecords(PGLOBAL g, int irc) { - if (trace) + if (trace(1)) htrc("VEC DeleteDB: rc=%d UseTemp=%d Fpos=%d Tpos=%d Spos=%d\n", irc, UseTemp, Fpos, Tpos, Spos); @@ -2215,7 +2215,7 @@ int VECFAM::DeleteRecords(PGLOBAL g, int irc) /*******************************************************************/ Fpos = Cardinality(g); - if (trace) + if (trace(1)) htrc("Fpos placed at file end=%d\n", Fpos); } else // Fpos is the Deleted line position @@ -2251,7 +2251,7 @@ int VECFAM::DeleteRecords(PGLOBAL g, int irc) #endif Spos++; // New start position is on next line - if (trace) + if (trace(1)) htrc("after: Tpos=%d Spos=%d\n", Tpos, Spos); } else { @@ -2294,7 +2294,7 @@ int VECFAM::DeleteRecords(PGLOBAL g, int irc) close(h); - if (trace) + if (trace(1)) htrc("done, h=%d irc=%d\n", h, irc); } // endfor i @@ -2332,7 +2332,7 @@ bool VECFAM::OpenTempFile(PGLOBAL g) sprintf(tempname, Tempat, i+1); if (!(T_Streams[i] = PlugOpenFile(g, tempname, "wb"))) { - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return true; @@ -2391,7 +2391,7 @@ bool VECFAM::MoveIntermediateLines(PGLOBAL g, bool *) len = fread(To_Buf, Clens[i], req, Streams[i]); - if (trace) + if (trace(1)) htrc("after read req=%d len=%d\n", req, len); if (len != req) { @@ -2410,7 +2410,7 @@ bool VECFAM::MoveIntermediateLines(PGLOBAL g, bool *) return true; } // endif - if (trace) + if (trace(1)) htrc("after write pos=%d\n", ftell(Streams[i])); } // endfor i @@ -2418,7 +2418,7 @@ bool VECFAM::MoveIntermediateLines(PGLOBAL g, bool *) Tpos += (int)req; Spos += (int)req; - if (trace) + if (trace(1)) htrc("loop: Tpos=%d Spos=%d\n", Tpos, Spos); b = true; @@ -2541,7 +2541,7 @@ void VECFAM::CloseTableFile(PGLOBAL g, bool abort) To_Fbs[i] = NULL; } // endif Streams - if (trace) + if (trace(1)) htrc("VCT CloseTableFile: closing %s wrc=%d rc=%d\n", To_File, wrc, rc); } // end of CloseTableFile @@ -2560,7 +2560,7 @@ bool VECFAM::ReadBlock(PGLOBAL g, PVCTCOL colp) len = Nrec * colp->Clen * CurBlk; i = colp->Index - 1; - if (trace) + if (trace(1)) htrc("len=%d i=%d Nrec=%d Deplac=%d Lrecl=%d CurBlk=%d\n", len, i, Nrec, colp->Deplac, Lrecl, CurBlk); @@ -2586,13 +2586,13 @@ bool VECFAM::ReadBlock(PGLOBAL g, PVCTCOL colp) sprintf(g->Message, MSG(READ_ERROR), fn, strerror(errno)); - if (trace) + if (trace(1)) htrc(" Read error: %s\n", g->Message); return true; } // endif - if (trace) + if (trace(1)) num_read++; return false; @@ -2615,7 +2615,7 @@ bool VECFAM::WriteBlock(PGLOBAL g, PVCTCOL colp) len = Nrec * colp->Clen * colp->ColBlk; i = colp->Index - 1; - if (trace) + if (trace(1)) htrc("modif=%d len=%d i=%d Nrec=%d Deplac=%d Lrecl=%d colblk=%d\n", Modif, len, i, Nrec, colp->Deplac, Lrecl, colp->ColBlk); @@ -2638,7 +2638,7 @@ bool VECFAM::WriteBlock(PGLOBAL g, PVCTCOL colp) sprintf(fn, (UseTemp) ? Tempat : Colfn, colp->Index); sprintf(g->Message, MSG(WRITE_STRERROR), fn, strerror(errno)); - if (trace) + if (trace(1)) htrc("Write error: %s\n", strerror(errno)); return true; @@ -2782,7 +2782,7 @@ bool VMPFAM::MapColumnFile(PGLOBAL g, MODE mode, int i) && fp->Count && fp->Mode == mode) break; - if (trace) + if (trace(1)) htrc("Mapping file, fp=%p\n", fp); } else @@ -2807,7 +2807,7 @@ bool VMPFAM::MapColumnFile(PGLOBAL g, MODE mode, int i) if (!(*g->Message)) sprintf(g->Message, MSG(OPEN_MODE_ERROR), "map", (int) rc, filename); - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return (mode == MODE_READ && rc == ENOENT) @@ -2858,7 +2858,7 @@ bool VMPFAM::MapColumnFile(PGLOBAL g, MODE mode, int i) To_Fbs[i] = fp; // Useful when closing - if (trace) + if (trace(1)) htrc("fp=%p count=%d MapView=%p len=%d\n", fp, fp->Count, Memcol[i], len); @@ -2903,7 +2903,7 @@ int VMPFAM::DeleteRecords(PGLOBAL g, int irc) int i; int m, n; - if (trace) + if (trace(1)) htrc("VMP DeleteDB: irc=%d tobuf=%p Tpos=%p Spos=%p\n", irc, To_Buf, Tpos, Spos); @@ -2913,7 +2913,7 @@ int VMPFAM::DeleteRecords(PGLOBAL g, int irc) /*******************************************************************/ Fpos = (Block - 1) * Nrec + Last; - if (trace) + if (trace(1)) htrc("Fpos placed at file top=%p\n", Fpos); } else // Fpos is the Deleted line position @@ -2936,7 +2936,7 @@ int VMPFAM::DeleteRecords(PGLOBAL g, int irc) Tpos += n; - if (trace) + if (trace(1)) htrc("move %d bytes\n", n); } // endif n @@ -2944,7 +2944,7 @@ int VMPFAM::DeleteRecords(PGLOBAL g, int irc) if (irc == RC_OK) { Spos = Fpos + 1; // New start position - if (trace) + if (trace(1)) htrc("after: Tpos=%p Spos=%p\n", Tpos, Spos); } else { @@ -2981,7 +2981,7 @@ int VMPFAM::DeleteRecords(PGLOBAL g, int irc) return RC_FX; } // endif - if (trace) + if (trace(1)) htrc("done, Tpos=%p newsize=%d drc=%d\n", Tpos, n, drc); if (!SetEndOfFile(fp->Handle)) { @@ -3088,7 +3088,7 @@ bool BGVFAM::BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req) DWORD nbr, drc, len = (DWORD)req; bool brc = ReadFile(h, inbuf, len, &nbr, NULL); - if (trace) + if (trace(1)) htrc("after read req=%d brc=%d nbr=%d\n", req, brc, nbr); if (!brc || nbr != len) { @@ -3105,7 +3105,7 @@ bool BGVFAM::BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req) sprintf(g->Message, MSG(READ_ERROR), To_File, buf); - if (trace) + if (trace(1)) htrc("BIGREAD: %s\n", g->Message); rc = true; @@ -3119,7 +3119,7 @@ bool BGVFAM::BigRead(PGLOBAL g, HANDLE h, void *inbuf, int req) sprintf(g->Message, MSG(READ_ERROR), fn, strerror(errno)); - if (trace) + if (trace(1)) htrc("BIGREAD: nbr=%d len=%d errno=%d %s\n", nbr, len, errno, g->Message); @@ -3141,7 +3141,7 @@ bool BGVFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req) DWORD nbw, drc, len = (DWORD)req; bool brc = WriteFile(h, inbuf, len, &nbw, NULL); - if (trace) + if (trace(1)) htrc("after write req=%d brc=%d nbw=%d\n", req, brc, nbw); if (!brc || nbw != len) { @@ -3159,7 +3159,7 @@ bool BGVFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req) sprintf(g->Message, MSG(WRITE_STRERROR), fn, buf); - if (trace) + if (trace(1)) htrc("BIGWRITE: nbw=%d len=%d errno=%d %s\n", nbw, len, drc, g->Message); @@ -3174,7 +3174,7 @@ bool BGVFAM::BigWrite(PGLOBAL g, HANDLE h, void *inbuf, int req) sprintf(g->Message, MSG(WRITE_STRERROR), fn, strerror(errno)); - if (trace) + if (trace(1)) htrc("BIGWRITE: nbw=%d len=%d errno=%d %s\n", nbw, len, errno, g->Message); @@ -3224,7 +3224,7 @@ int BGVFAM::GetBlockInfo(PGLOBAL g) if (h == INVALID_HANDLE_VALUE || !_filelength(h)) { #endif // !__WIN__ // Consider this is a void table - if (trace) + if (trace(1)) htrc("Void table h=%d\n", h); Last = Nrec; @@ -3248,7 +3248,7 @@ int BGVFAM::GetBlockInfo(PGLOBAL g) Block = (vh.NumRec > 0) ? (vh.NumRec + Nrec - 1) / Nrec : 0; Last = (vh.NumRec + Nrec - 1) % Nrec + 1; - if (trace) + if (trace(1)) htrc("Block=%d Last=%d\n", Block, Last); } // endif's @@ -3348,7 +3348,7 @@ bool BGVFAM::MakeEmptyFile(PGLOBAL g, PCSZ fn) of.QuadPart = (BIGINT)n + (BIGINT)MaxBlk * (BIGINT)Blksize - (BIGINT)1; - if (trace) + if (trace(1)) htrc("MEF: of=%lld n=%d maxblk=%d blksize=%d\n", of.QuadPart, n, MaxBlk, Blksize); @@ -3394,7 +3394,7 @@ bool BGVFAM::MakeEmptyFile(PGLOBAL g, PCSZ fn) pos = (BIGINT)n + (BIGINT)MaxBlk * (BIGINT)Blksize - (BIGINT)1; - if (trace) + if (trace(1)) htrc("MEF: pos=%lld n=%d maxblk=%d blksize=%d\n", pos, n, MaxBlk, Blksize); @@ -3439,7 +3439,7 @@ bool BGVFAM::OpenTableFile(PGLOBAL g) PlugSetPath(filename, To_File, Tdbp->GetPath()); - if (trace) + if (trace(1)) htrc("OpenTableFile: filename=%s mode=%d Last=%d\n", filename, mode, Last); @@ -3516,7 +3516,7 @@ bool BGVFAM::OpenTableFile(PGLOBAL g) strcat(g->Message, filename); } // endif Hfile - if (trace) + if (trace(1)) htrc(" rc=%d access=%p share=%p creation=%d handle=%p fn=%s\n", rc, access, share, creation, Hfile, filename); @@ -3605,7 +3605,7 @@ bool BGVFAM::OpenTableFile(PGLOBAL g) strcat(g->Message, strerror(errno)); } // endif Hfile - if (trace) + if (trace(1)) htrc(" rc=%d oflag=%p mode=%p handle=%d fn=%s\n", rc, oflag, mode, Hfile, filename); #endif // UNIX @@ -3626,7 +3626,7 @@ bool BGVFAM::OpenTableFile(PGLOBAL g) To_Fb->Mode = mode; To_Fb->Handle = Hfile; - if (trace) + if (trace(1)) htrc("File %s is open in mode %d\n", filename, mode); if (del) @@ -3729,7 +3729,7 @@ bool BGVFAM::AllocateBuffer(PGLOBAL g) /***********************************************************************/ int BGVFAM::WriteBuffer(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("BGV WriteDB: R%d Mode=%d CurNum=%d CurBlk=%d\n", Tdbp->GetTdb_No(), Tdbp->GetMode(), CurNum, CurBlk); @@ -3829,7 +3829,7 @@ int BGVFAM::DeleteRecords(PGLOBAL g, int irc) /* 2 - directly move the not deleted lines inside the original */ /* file, and at the end erase all trailing records. */ /*********************************************************************/ - if (trace) + if (trace(1)) htrc("BGV DeleteDB: irc=%d UseTemp=%d Fpos=%d Tpos=%d Spos=%d\n", irc, UseTemp, Fpos, Tpos, Spos); @@ -3839,7 +3839,7 @@ int BGVFAM::DeleteRecords(PGLOBAL g, int irc) /*******************************************************************/ Fpos = (Block - 1) * Nrec + Last; - if (trace) + if (trace(1)) htrc("Fpos placed at file end=%d\n", Fpos); eof = UseTemp && !MaxBlk; @@ -3878,7 +3878,7 @@ int BGVFAM::DeleteRecords(PGLOBAL g, int irc) #endif Spos++; // New start position is on next line - if (trace) + if (trace(1)) htrc("after: Tpos=%d Spos=%d\n", Tpos, Spos); } else { @@ -4065,7 +4065,7 @@ bool BGVFAM::MoveIntermediateLines(PGLOBAL g, bool *b) } // endif Usetemp... - if (trace) + if (trace(1)) htrc("loop: Tpos=%d Spos=%d\n", Tpos, Spos); } // endfor n @@ -4201,7 +4201,7 @@ void BGVFAM::CloseTableFile(PGLOBAL g, bool abort) if (Hfile != INVALID_HANDLE_VALUE) rc = PlugCloseFile(g, To_Fb); - if (trace) + if (trace(1)) htrc("BGV CloseTableFile: closing %s wrc=%d rc=%d\n", To_File, wrc, rc); @@ -4247,7 +4247,7 @@ bool BGVFAM::ReadBlock(PGLOBAL g, PVCTCOL colp) pos = (BIGINT)Nrec * ((BIGINT)colp->Deplac + (BIGINT)Lrecl * (BIGINT)CurBlk); - if (trace) + if (trace(1)) htrc("RB: offset=%lld Nrec=%d Deplac=%d Lrecl=%d CurBlk=%d MaxBlk=%d\n", pos, Nrec, colp->Deplac, Lrecl, CurBlk, MaxBlk); @@ -4257,7 +4257,7 @@ bool BGVFAM::ReadBlock(PGLOBAL g, PVCTCOL colp) if (BigRead(g, Hfile, colp->Blk->GetValPointer(), colp->Clen * Nrec)) return true; - if (trace) + if (trace(1)) num_read++; return false; @@ -4284,7 +4284,7 @@ bool BGVFAM::WriteBlock(PGLOBAL g, PVCTCOL colp) pos = (BIGINT)Nrec * ((BIGINT)colp->Deplac + (BIGINT)Lrecl * (BIGINT)colp->ColBlk); - if (trace) + if (trace(1)) htrc("WB: offset=%lld Nrec=%d Deplac=%d Lrecl=%d ColBlk=%d\n", pos, Nrec, colp->Deplac, Lrecl, colp->ColBlk); diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index dfd9343af76..53e30d0fa02 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -699,7 +699,7 @@ bool UNZIPUTL::openEntry(PGLOBAL g) entryopen = true; } // endif rc - if (trace) + if (trace(1)) htrc("Openning entry%s %s\n", fn, (entryopen) ? "oked" : "failed"); return !entryopen; @@ -751,7 +751,7 @@ int UNZFAM::GetFileLength(PGLOBAL g) int len = (zutp && zutp->entryopen) ? Top - Memory : TXTFAM::GetFileLength(g) * 3; - if (trace) + if (trace(1)) htrc("Zipped file length=%d\n", len); return len; diff --git a/storage/connect/filter.cpp b/storage/connect/filter.cpp index 53935d8d3ba..47fead660fd 100644 --- a/storage/connect/filter.cpp +++ b/storage/connect/filter.cpp @@ -298,7 +298,7 @@ PFIL FILTER::Link(PGLOBAL g, PFIL fil2) { PFIL fil1; - if (trace) + if (trace(1)) htrc("Linking filter %p with op=%d... to filter %p with op=%d\n", this, Opc, fil2, (fil2) ? fil2->Opc : 0); @@ -352,7 +352,7 @@ int FILTER::CheckColumn(PGLOBAL g, PSQL sqlp, PXOB &p, int &ag) char errmsg[MAX_STR] = ""; int agg, k, n = 0; - if (trace) + if (trace(1)) htrc("FILTER CheckColumn: sqlp=%p ag=%d\n", sqlp, ag); switch (Opc) { @@ -537,7 +537,7 @@ PFIL FILTER::SortJoin(PGLOBAL g) bool FILTER::FindJoinFilter(POPJOIN opj, PFIL fprec, bool teq, bool tek, bool tk2, bool tc2, bool tix, bool thx) { - if (trace) + if (trace(1)) htrc("FindJoinFilter: opj=%p fprec=%p tests=(%d,%d,%d,%d)\n", opj, fprec, teq, tek, tk2, tc2); @@ -864,7 +864,7 @@ bool FILTER::CheckLocal(PTDB tdbp) { bool local = TRUE; - if (trace) { + if (trace(1)) { if (tdbp) htrc("CheckLocal: filp=%p R%d\n", this, tdbp->GetTdb_No()); else @@ -874,7 +874,7 @@ bool FILTER::CheckLocal(PTDB tdbp) for (int i = 0; local && i < 2; i++) local = Arg(i)->CheckLocal(tdbp); - if (trace) + if (trace(1)) htrc("FCL: returning %d\n", local); return (local); @@ -980,7 +980,7 @@ bool FILTER::Convert(PGLOBAL g, bool having) { int i, comtype = TYPE_ERROR; - if (trace) + if (trace(1)) htrc("converting(?) %s %p opc=%d\n", (having) ? "having" : "filter", this, Opc); @@ -1011,7 +1011,7 @@ bool FILTER::Convert(PGLOBAL g, bool having) return TRUE; } // endswitch - if (trace) + if (trace(1)) htrc("Filter(%d): Arg type=%d\n", i, GetArgType(i)); // Set default values @@ -1056,7 +1056,7 @@ bool FILTER::Convert(PGLOBAL g, bool having) return TRUE; } // endif - if (trace) + if (trace(1)) htrc(" comtype=%d, B_T(%d)=%d Val(%d)=%p\n", comtype, i, Test[i].B_T, i, Val(i)); @@ -1064,7 +1064,7 @@ bool FILTER::Convert(PGLOBAL g, bool having) // Set or allocate the filter argument values and buffers for (i = 0; i < 2; i++) { - if (trace) + if (trace(1)) htrc(" conv type %d ? i=%d B_T=%d comtype=%d\n", GetArgType(i), i, Test[i].B_T, comtype); @@ -1141,7 +1141,7 @@ bool FILTER::Convert(PGLOBAL g, bool having) TEST: // Test for possible Eval optimization - if (trace) + if (trace(1)) htrc("Filp %p op=%d argtypes=(%d,%d)\n", this, Opc, GetArgType(0), GetArgType(1)); @@ -1230,7 +1230,7 @@ bool FILTER::Eval(PGLOBAL g) else if (Test[i].Conv) Val(i)->SetValue_pval(Arg(i)->GetValue()); - if (trace) + if (trace(1)) htrc(" Filter: op=%d type=%d %d B_T=%d %d val=%p %p\n", Opc, GetArgType(0), GetArgType(1), Test[0].B_T, Test[1].B_T, Val(0), Val(1)); @@ -1270,7 +1270,7 @@ bool FILTER::Eval(PGLOBAL g) goto FilterError; } // endswitch Type - if (trace) { + if (trace(1)) { htrc(" IN filtering: ap=%p\n", ap); if (ap) @@ -1360,7 +1360,7 @@ bool FILTER::Eval(PGLOBAL g) goto FilterError; } // endswitch Opc - if (trace) + if (trace(1)) htrc("Eval: filter %p Opc=%d result=%d\n", this, Opc, Value->GetIntValue()); @@ -1692,7 +1692,7 @@ PFIL PrepareFilter(PGLOBAL g, PFIL fp, bool having) { PFIL filp = NULL; - if (trace) + if (trace(1)) htrc("PrepareFilter: fp=%p having=%d\n", fp, having); while (fp) { @@ -1714,7 +1714,7 @@ PFIL PrepareFilter(PGLOBAL g, PFIL fp, bool having) filp->Next = NULL; } // endwhile - if (trace) + if (trace(1)) htrc(" returning filp=%p\n", filp); return filp; @@ -1740,7 +1740,7 @@ DllExport bool ApplyFilter(PGLOBAL g, PFIL filp) if (filp->Eval(g)) throw (int)TYPE_FILTER; - if (trace > 1) + if (trace(2)) htrc("PlugFilter filp=%p result=%d\n", filp, filp->GetResult()); diff --git a/storage/connect/global.h b/storage/connect/global.h index e4b00786efa..63d8782ee72 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -52,7 +52,7 @@ /***********************************************************************/ /* Define access to the thread based trace value. */ /***********************************************************************/ -#define trace GetTraceValue() +#define trace(T) (bool)(GetTraceValue() & (uint)T) /***********************************************************************/ /* Miscellaneous Constants */ @@ -224,7 +224,8 @@ DllExport void *PlugSubAlloc(PGLOBAL, void *, size_t); DllExport char *PlugDup(PGLOBAL g, const char *str); DllExport void *MakePtr(void *, OFFSET); DllExport void htrc(char const *fmt, ...); -DllExport int GetTraceValue(void); +//DllExport int GetTraceValue(void); +DllExport uint GetTraceValue(void); #if defined(__cplusplus) } // extern "C" diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 25957909856..1896da97c3e 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -174,9 +174,9 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.06.0005 October 14, 2017"; + char version[]= "Version 1.06.0006 January 30, 2018"; #if defined(__WIN__) - char compver[]= "Version 1.06.0005 " __DATE__ " " __TIME__; + char compver[]= "Version 1.06.0006 " __DATE__ " " __TIME__; char slash= '\\'; #else // !__WIN__ char slash= '/'; @@ -266,12 +266,29 @@ static char *strz(PGLOBAL g, LEX_STRING &ls) /***********************************************************************/ /* CONNECT session variables definitions. */ /***********************************************************************/ -// Tracing: 0 no, 1 yes, >1 more tracing -static MYSQL_THDVAR_INT(xtrace, - PLUGIN_VAR_RQCMDARG, "Console trace value.", - NULL, NULL, 0, 0, INT_MAX, 1); +// Tracing: 0 no, 1 yes, 2 more, 4 index... 511 all +const char *xtrace_names[] = +{ + "YES", "MORE", "INDEX", "MEMORY", "SUBALLOC", + "QUERY", "STMT", "HANDLER", "BLOCK", "MONGO", NullS +}; -// Getting exact info values +TYPELIB xtrace_typelib = +{ + array_elements(xtrace_names) - 1, "xtrace_typelib", + xtrace_names, NULL +}; + +static MYSQL_THDVAR_SET( + xtrace, // name + PLUGIN_VAR_RQCMDARG, // opt + "Trace values.", // comment + NULL, // check + NULL, // update function + 0, // def (NO) + &xtrace_typelib); // typelib + + // Getting exact info values static MYSQL_THDVAR_BOOL(exact_info, PLUGIN_VAR_RQCMDARG, "Getting exact info values", NULL, NULL, 0); @@ -314,17 +331,18 @@ static MYSQL_THDVAR_UINT(work_size, static MYSQL_THDVAR_INT(conv_size, PLUGIN_VAR_RQCMDARG, // opt "Size used when converting TEXT columns.", - NULL, NULL, SZCONV, 0, 65500, 1); + NULL, NULL, SZCONV, 0, 65500, 8192); /** Type conversion: no: Unsupported types -> TYPE_ERROR yes: TEXT -> VARCHAR + force: Do it also for ODBC BINARY and BLOBs skip: skip unsupported type columns in Discovery */ const char *xconv_names[]= { - "NO", "YES", "SKIP", NullS + "NO", "YES", "FORCE", "SKIP", NullS }; TYPELIB xconv_typelib= @@ -339,7 +357,7 @@ static MYSQL_THDVAR_ENUM( "Unsupported types conversion.", // comment NULL, // check NULL, // update function - 0, // def (no) + 1, // def (yes) &xconv_typelib); // typelib // Null representation for JSON values @@ -364,12 +382,17 @@ static MYSQL_THDVAR_STR(java_wrapper, NULL, NULL, "wrappers/JdbcInterface"); #endif // JAVA_SUPPORT -#if 0 // This is apparently not acceptable for a plugin +// This is apparently not acceptable for a plugin so it is undocumented +#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) // Enabling MONGO table type +#if defined(MONGO_SUPPORT) || (MYSQL_VERSION_ID > 100200) static MYSQL_THDVAR_BOOL(enable_mongo, PLUGIN_VAR_RQCMDARG, - "Enabling the MongoDB access", - NULL, NULL, MONGO_ENABLED); -#endif // 0 + "Enabling the MongoDB access", NULL, NULL, 1); +#else // !version 2,3 +static MYSQL_THDVAR_BOOL(enable_mongo, PLUGIN_VAR_RQCMDARG, + "Enabling the MongoDB access", NULL, NULL, 0); +#endif // !version 2,3 +#endif // JAVA_SUPPORT || CMGO_SUPPORT #if defined(XMSG) || defined(NEWMSG) const char *language_names[]= @@ -401,8 +424,8 @@ handlerton *connect_hton= NULL; /***********************************************************************/ /* Function to export session variable values to other source files. */ /***********************************************************************/ -extern "C" int GetTraceValue(void) - {return connect_hton ? THDVAR(current_thd, xtrace) : 0;} +uint GetTraceValue(void) + {return connect_hton ? THDVAR(current_thd, xtrace) : 0;} bool ExactInfo(void) {return THDVAR(current_thd, exact_info);} USETEMP UseTemp(void) {return (USETEMP)THDVAR(current_thd, use_tempfile);} int GetConvSize(void) {return THDVAR(current_thd, conv_size);} @@ -419,22 +442,20 @@ void SetWorkSize(uint) push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, 0, "Work size too big, try setting a smaller value"); } // end of SetWorkSize -#if defined(XMSG) || defined(NEWMSG) -extern "C" const char *msglang(void) -{ - return language_names[THDVAR(current_thd, msg_lang)]; -} // end of msglang -#else // !XMSG && !NEWMSG #if defined(JAVA_SUPPORT) char *GetJavaWrapper(void) {return connect_hton ? THDVAR(current_thd, java_wrapper) : (char*)"wrappers/JdbcInterface";} #endif // JAVA_SUPPORT -#if defined(JAVA_SUPPORT) -//bool MongoEnabled(void) { return THDVAR(current_thd, enable_mongo); } -#endif // JAVA_SUPPORT +#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) +bool MongoEnabled(void) {return THDVAR(current_thd, enable_mongo);} +#endif // JAVA_SUPPORT || CMGO_SUPPORT +#if defined(XMSG) || defined(NEWMSG) +extern "C" const char *msglang(void) + {return language_names[THDVAR(current_thd, msg_lang)];} +#else // !XMSG && !NEWMSG extern "C" const char *msglang(void) { #if defined(FRENCH) @@ -726,7 +747,7 @@ static int connect_init_func(void *p) connect_hton->tablefile_extensions= ha_connect_exts; connect_hton->discover_table_structure= connect_assisted_discovery; - if (trace) + if (trace(128)) sql_print_information("connect_init: hton=%p", p); DTVAL::SetTimeShift(); // Initialize time zone shift once for all @@ -818,7 +839,7 @@ static handler* connect_create_handler(handlerton *hton, { handler *h= new (mem_root) ha_connect(hton, table); - if (trace) + if (trace(128)) htrc("New CONNECT %p, table: %.*s\n", h, table ? table->table_name.length : 6, table ? table->table_name.str : ""); @@ -874,7 +895,7 @@ ha_connect::ha_connect(handlerton *hton, TABLE_SHARE *table_arg) /****************************************************************************/ ha_connect::~ha_connect(void) { - if (trace) + if (trace(128)) htrc("Delete CONNECT %p, table: %.*s, xp=%p count=%d\n", this, table ? table->s->table_name.length : 6, table ? table->s->table_name.str : "", @@ -1658,7 +1679,7 @@ PIXDEF ha_connect::GetIndexInfo(TABLE_SHARE *s) s= table->s; for (int n= 0; (unsigned)n < s->keynames.count; n++) { - if (trace) + if (trace(1)) htrc("Getting created index %d info\n", n + 1); // Find the index to describe @@ -2004,7 +2025,7 @@ bool ha_connect::CheckColumnList(PGLOBAL g) } // endif } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); brc = true; } catch (const char *msg) { @@ -2061,7 +2082,7 @@ int ha_connect::MakeRecord(char *buf) PCOL colp= NULL; DBUG_ENTER("ha_connect::MakeRecord"); - if (trace > 1) + if (trace(2)) htrc("Maps: read=%08X write=%08X vcol=%08X defr=%08X defw=%08X\n", *table->read_set->bitmap, *table->write_set->bitmap, (table->vcol_set) ? *table->vcol_set->bitmap : 0, @@ -2585,14 +2606,14 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) if (!cond) return NULL; - if (trace) + if (trace(1)) htrc("Cond type=%d\n", cond->type()); if (cond->type() == COND::COND_ITEM) { PFIL fp; Item_cond *cond_item= (Item_cond *)cond; - if (trace) + if (trace(1)) htrc("Cond: Ftype=%d name=%s\n", cond_item->functype(), cond_item->func_name()); @@ -2626,7 +2647,7 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) Item_func *condf= (Item_func *)cond; Item* *args= condf->arguments(); - if (trace) + if (trace(1)) htrc("Func type=%d argnum=%d\n", condf->functype(), condf->argument_count()); @@ -2655,11 +2676,11 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) return NULL; for (i= 0; i < condf->argument_count(); i++) { - if (trace) + if (trace(1)) htrc("Argtype(%d)=%d\n", i, args[i]->type()); if (i >= 2 && !ismul) { - if (trace) + if (trace(1)) htrc("Unexpected arg for vop=%d\n", vop); continue; @@ -2689,7 +2710,7 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) break; } // endswitch type - if (trace) { + if (trace(1)) { htrc("Field index=%d\n", pField->field->field_index); htrc("Field name=%s\n", pField->field->field_name); } // endif trace @@ -2736,7 +2757,7 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) return NULL; } // endswitch type - if (trace) + if (trace(1)) htrc("Value type=%hd\n", pp->Type); // Append the value to the argument list @@ -2754,7 +2775,7 @@ PFIL ha_connect::CondFilter(PGLOBAL g, Item *cond) filp= MakeFilter(g, colp, pop, pfirst, neg); } else { - if (trace) + if (trace(1)) htrc("Unsupported condition\n"); return NULL; @@ -2780,7 +2801,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) if (!cond) return NULL; - if (trace) + if (trace(1)) htrc("Cond type=%d\n", cond->type()); if (cond->type() == COND::COND_ITEM) { @@ -2793,7 +2814,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) else pb0= pb1= pb2= ph0= ph1= ph2= NULL; - if (trace) + if (trace(1)) htrc("Cond: Ftype=%d name=%s\n", cond_item->functype(), cond_item->func_name()); @@ -2879,7 +2900,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) filp->Bd = filp->Hv = false; - if (trace) + if (trace(1)) htrc("Func type=%d argnum=%d\n", condf->functype(), condf->argument_count()); @@ -2916,11 +2937,11 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) return NULL; for (i= 0; i < condf->argument_count(); i++) { - if (trace) + if (trace(1)) htrc("Argtype(%d)=%d\n", i, args[i]->type()); if (i >= 2 && !ismul) { - if (trace) + if (trace(1)) htrc("Unexpected arg for vop=%d\n", vop); continue; @@ -2963,7 +2984,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) } // endif's - if (trace) { + if (trace(1)) { htrc("Field index=%d\n", pField->field->field_index); htrc("Field name=%s\n", pField->field->field_name); htrc("Field type=%d\n", pField->field->type()); @@ -3001,7 +3022,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) if ((res= pval->val_str(&tmp)) == NULL) return NULL; // To be clarified - if (trace) + if (trace(1)) htrc("Value=%.*s\n", res->length(), res->ptr()); // IN and BETWEEN clauses should be col VOP list @@ -3142,7 +3163,7 @@ PCFIL ha_connect::CheckCond(PGLOBAL g, PCFIL filp, const Item *cond) filp->Bd = true; } else { - if (trace) + if (trace(1)) htrc("Unsupported condition\n"); return NULL; @@ -3209,7 +3230,7 @@ const COND *ha_connect::cond_push(const COND *cond) if (filp->Having && strlen(filp->Having) > 255) goto fin; // Memory collapse - if (trace) + if (trace(1)) htrc("cond_push: %s\n", filp->Body); tdbp->SetCond(cond); @@ -3235,7 +3256,7 @@ const COND *ha_connect::cond_push(const COND *cond) } // endif tty } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); } catch (const char *msg) { strcpy(g->Message, msg); @@ -3288,7 +3309,7 @@ bool ha_connect::get_error_message(int error, String* buf) &my_charset_latin1, &dummy_errors); - if (trace) + if (trace(1)) htrc("GEM(%d): len=%u %s\n", error, len, g->Message); msg[len]= '\0'; @@ -3340,7 +3361,7 @@ int ha_connect::open(const char *name, int mode, uint test_if_locked) int rc= 0; DBUG_ENTER("ha_connect::open"); - if (trace) + if (trace(1)) htrc("open: name=%s mode=%d test=%u\n", name, mode, test_if_locked); if (!(share= get_share())) @@ -3415,7 +3436,7 @@ int ha_connect::optimize(THD* thd, HA_CHECK_OPT*) rc = HA_ERR_INTERNAL_ERROR; } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); rc = HA_ERR_INTERNAL_ERROR; } catch (const char *msg) { @@ -3563,7 +3584,7 @@ int ha_connect::update_row(const uchar *old_data, uchar *new_data) PGLOBAL& g= xp->g; DBUG_ENTER("ha_connect::update_row"); - if (trace > 1) + if (trace(2)) htrc("update_row: old=%s new=%s\n", old_data, new_data); // Check values for possible change in indexed column @@ -3624,7 +3645,7 @@ int ha_connect::index_init(uint idx, bool sorted) PGLOBAL& g= xp->g; DBUG_ENTER("index_init"); - if (trace) + if (trace(1)) htrc("index_init: this=%p idx=%u sorted=%d\n", this, idx, sorted); if (GetIndexType(GetRealType()) == 2) { @@ -3677,7 +3698,7 @@ int ha_connect::index_init(uint idx, bool sorted) rc= 0; } // endif indexing - if (trace) + if (trace(1)) htrc("index_init: rc=%d indexing=%d active_index=%d\n", rc, indexing, active_index); @@ -3724,7 +3745,7 @@ int ha_connect::ReadIndexed(uchar *buf, OPVAL op, const key_range *kr) break; } // endswitch RC - if (trace > 1) + if (trace(2)) htrc("ReadIndexed: op=%d rc=%d\n", op, rc); table->status= (rc == RC_OK) ? 0 : STATUS_NOT_FOUND; @@ -3767,7 +3788,7 @@ int ha_connect::index_read(uchar * buf, const uchar * key, uint key_len, default: DBUG_RETURN(-1); break; } // endswitch find_flag - if (trace > 1) + if (trace(2)) htrc("%p index_read: op=%d\n", this, op); if (indexing > 0) { @@ -3931,7 +3952,7 @@ int ha_connect::rnd_init(bool scan) alter= 1; } // endif xmod - if (trace) + if (trace(1)) htrc("rnd_init: this=%p scan=%d xmod=%d alter=%d\n", this, scan, xmod, alter); @@ -4037,7 +4058,7 @@ int ha_connect::rnd_next(uchar *buf) break; } // endswitch RC - if (trace > 1 && (rc || !(xp->nrd++ % 16384))) { + if (trace(2) && (rc || !(xp->nrd++ % 16384))) { ulonglong tb2= my_interval_timer(); double elapsed= (double) (tb2 - xp->tb1) / 1000000000ULL; DBUG_PRINT("rnd_next", ("rc=%d nrd=%u fnd=%u nfd=%u sec=%.3lf\n", @@ -4081,7 +4102,7 @@ void ha_connect::position(const uchar *) DBUG_ENTER("ha_connect::position"); my_store_ptr(ref, ref_length, (my_off_t)tdbp->GetRecpos()); - if (trace > 1) + if (trace(2)) htrc("position: pos=%d\n", tdbp->GetRecpos()); DBUG_VOID_RETURN; @@ -4111,7 +4132,7 @@ int ha_connect::rnd_pos(uchar *buf, uchar *pos) DBUG_ENTER("ha_connect::rnd_pos"); if (!tdbp->SetRecpos(xp->g, (int)my_get_ptr(pos, ref_length))) { - if (trace) + if (trace(1)) htrc("rnd_pos: %d\n", tdbp->GetRecpos()); tdbp->SetFilter(NULL); @@ -4177,7 +4198,7 @@ int ha_connect::info(uint flag) DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } // endif g - if (trace) + if (trace(1)) htrc("%p In info: flag=%u valid_info=%d\n", this, flag, valid_info); // tdbp must be available to get updated info @@ -4454,7 +4475,7 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, #if defined(DEVELOPMENT) if (true) { #else - if (trace) { + if (trace(65)) { #endif LEX_STRING *query_string= thd_query_string(thd); htrc("%p check_mode: cmdtype=%d\n", this, thd_sql_command(thd)); @@ -4575,7 +4596,7 @@ MODE ha_connect::CheckMode(PGLOBAL g, THD *thd, } // endif's newmode - if (trace) + if (trace(1)) htrc("New mode=%d\n", newmode); return newmode; @@ -4653,7 +4674,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) DBUG_ASSERT(thd == current_thd); - if (trace) + if (trace(1)) htrc("external_lock: this=%p thd=%p xp=%p g=%p lock_type=%d\n", this, thd, xp, g, lock_type); @@ -4846,7 +4867,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) if (cras) g->Createas= 1; // To tell created table to ignore FLAG - if (trace) { + if (trace(1)) { #if 0 htrc("xcheck=%d cras=%d\n", xcheck, cras); @@ -4879,7 +4900,7 @@ int ha_connect::external_lock(THD *thd, int lock_type) // Delay open until used fields are known } // endif tdbp - if (trace) + if (trace(1)) htrc("external_lock: rc=%d\n", rc); DBUG_RETURN(rc); @@ -5015,7 +5036,7 @@ int ha_connect::delete_or_rename_table(const char *name, const char *to) THD *thd= current_thd; int sqlcom= thd_sql_command(thd); - if (trace) { + if (trace(1)) { if (to) htrc("rename_table: this=%p thd=%p sqlcom=%d from=%s to=%s\n", this, thd, sqlcom, name, to); @@ -5126,7 +5147,7 @@ ha_rows ha_connect::records_in_range(uint inx, key_range *min_key, if (index_init(inx, false)) DBUG_RETURN(HA_POS_ERROR); - if (trace) + if (trace(1)) htrc("records_in_range: inx=%d indexing=%d\n", inx, indexing); if (indexing > 0) { @@ -5155,7 +5176,7 @@ ha_rows ha_connect::records_in_range(uint inx, key_range *min_key, else rows= HA_POS_ERROR; - if (trace) + if (trace(1)) htrc("records_in_range: rows=%llu\n", rows); DBUG_RETURN(rows); @@ -5377,7 +5398,7 @@ static int init_table_share(THD* thd, } // endif charset - if (trace) + if (trace(1)) htrc("s_init: %.*s\n", sql->length(), sql->ptr()); return table_s->init_from_sql_statement_string(thd, true, @@ -6090,7 +6111,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, } // endif ok } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); rc = HA_ERR_INTERNAL_ERROR; } catch (const char *msg) { @@ -6184,7 +6205,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, table= table_arg; // Used by called functions - if (trace) + if (trace(1)) htrc("create: this=%p thd=%p xp=%p g=%p sqlcom=%d name=%s\n", this, thd, xp, g, sqlcom, GetTableName()); @@ -6573,7 +6594,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, } // endif sqlcom - if (trace) + if (trace(1)) htrc("xchk=%p createas=%d\n", g->Xchk, g->Createas); if (options->zipped) { @@ -6948,7 +6969,7 @@ ha_connect::check_if_supported_inplace_alter(TABLE *altered_table, xcp->newsep= xcp->SetName(g, GetStringOption("optname")); tshp= NULL; - if (trace && g->Xchk) + if (trace(1) && g->Xchk) htrc( "oldsep=%d newsep=%d oldopn=%s newopn=%s oldpix=%p newpix=%p\n", xcp->oldsep, xcp->newsep, @@ -7212,9 +7233,9 @@ static struct st_mysql_sys_var* connect_system_variables[]= { MYSQL_SYSVAR(class_path), MYSQL_SYSVAR(java_wrapper), #endif // JAVA_SUPPORT -#if defined(JAVA_SUPPORT) -//MYSQL_SYSVAR(enable_mongo), -#endif // JAVA_SUPPORT +#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) + MYSQL_SYSVAR(enable_mongo), +#endif // JAVA_SUPPORT || CMGO_SUPPORT NULL }; @@ -7231,7 +7252,7 @@ maria_declare_plugin(connect) 0x0106, /* version number (1.05) */ NULL, /* status variables */ connect_system_variables, /* system variables */ - "1.06.0005", /* string version */ + "1.06.0006", /* string version */ MariaDB_PLUGIN_MATURITY_STABLE /* maturity */ } maria_declare_plugin_end; diff --git a/storage/connect/javaconn.cpp b/storage/connect/javaconn.cpp index 90f834ef9a7..d1be0ca1848 100644 --- a/storage/connect/javaconn.cpp +++ b/storage/connect/javaconn.cpp @@ -363,7 +363,7 @@ bool JAVAConn::GetJVM(PGLOBAL g) bool JAVAConn::Open(PGLOBAL g) { bool brc = true, err = false; - jboolean jt = (trace > 0); + jboolean jt = (trace(1)); // Link or check whether jvm library was linked if (GetJVM(g)) @@ -430,7 +430,7 @@ bool JAVAConn::Open(PGLOBAL g) jpop->Append(cp); } // endif cp - if (trace) { + if (trace(1)) { htrc("ClassPath=%s\n", ClassPath); htrc("CLASSPATH=%s\n", cp); htrc("%s\n", jpop->GetStr()); @@ -486,7 +486,7 @@ bool JAVAConn::Open(PGLOBAL g) break; } // endswitch rc - if (trace) + if (trace(1)) htrc("%s\n", g->Message); if (brc) diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index 4c21c2c9681..4e0cf401ed4 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -270,7 +270,7 @@ PQRYRES JDBCColumns(PGLOBAL g, PCSZ db, PCSZ table, PCSZ colpat, length[11] = 255; } // endif jcp - if (trace) + if (trace(1)) htrc("JDBCColumns: max=%d len=%d,%d,%d,%d\n", maxres, length[0], length[1], length[2], length[3]); @@ -287,7 +287,7 @@ PQRYRES JDBCColumns(PGLOBAL g, PCSZ db, PCSZ table, PCSZ colpat, if (info || !qrp) // Info table return qrp; - if (trace) + if (trace(1)) htrc("Getting col results ncol=%d\n", qrp->Nbcol); if (!(cap = AllocCatInfo(g, JCAT_COL, db, table, qrp))) @@ -303,7 +303,7 @@ PQRYRES JDBCColumns(PGLOBAL g, PCSZ db, PCSZ table, PCSZ colpat, qrp->Nblin = n; // ResetNullValues(cap); - if (trace) + if (trace(1)) htrc("Columns: NBCOL=%d NBLIN=%d\n", qrp->Nbcol, qrp->Nblin); } else @@ -394,7 +394,7 @@ PQRYRES JDBCTables(PGLOBAL g, PCSZ db, PCSZ tabpat, PCSZ tabtyp, length[4] = 255; } // endif info - if (trace) + if (trace(1)) htrc("JDBCTables: max=%d len=%d,%d\n", maxres, length[0], length[1]); /************************************************************************/ @@ -417,7 +417,7 @@ PQRYRES JDBCTables(PGLOBAL g, PCSZ db, PCSZ tabpat, PCSZ tabtyp, cap->Pat = tabtyp; - if (trace) + if (trace(1)) htrc("Getting table results ncol=%d\n", cap->Qrp->Nbcol); /************************************************************************/ @@ -427,7 +427,7 @@ PQRYRES JDBCTables(PGLOBAL g, PCSZ db, PCSZ tabpat, PCSZ tabtyp, qrp->Nblin = n; // ResetNullValues(cap); - if (trace) + if (trace(1)) htrc("Tables: NBCOL=%d NBLIN=%d\n", qrp->Nbcol, qrp->Nblin); } else @@ -475,7 +475,7 @@ PQRYRES JDBCDrivers(PGLOBAL g, int maxres, bool info) } else maxres = 0; - if (trace) + if (trace(1)) htrc("JDBCDrivers: max=%d len=%d\n", maxres, length[0]); /************************************************************************/ @@ -586,7 +586,7 @@ bool JDBConn::Connect(PJPARM sop) int irc = RC_FX; bool err = false; jint rc; - jboolean jt = (trace > 0); + jboolean jt = (trace(1)); PGLOBAL& g = m_G; /*******************************************************************/ @@ -1443,7 +1443,7 @@ bool JDBConn::SetParam(JDBCCOL *colp) // Not used anymore env->DeleteLocalRef(parms); - if (trace) + if (trace(1)) htrc("Method %s returned %d columns\n", fnc, ncol); // n because we no more ignore the first column @@ -1488,7 +1488,7 @@ bool JDBConn::SetParam(JDBCCOL *colp) sprintf(g->Message, "Fetch: %s", Msg); return -1; } if (rc == 0) { - if (trace) + if (trace(1)) htrc("End of fetches i=%d\n", i); break; diff --git a/storage/connect/jmgfam.cpp b/storage/connect/jmgfam.cpp index c7115cdd720..30f6279146d 100644 --- a/storage/connect/jmgfam.cpp +++ b/storage/connect/jmgfam.cpp @@ -298,7 +298,7 @@ int JMGFAM::ReadBuffer(PGLOBAL g) PSZ str = Jcp->GetDocument(); if (str) { - if (trace == 1) + if (trace(1)) htrc("%s\n", str); strncpy(Tdbp->GetLine(), str, Lrecl); diff --git a/storage/connect/jmgoconn.cpp b/storage/connect/jmgoconn.cpp index 4736641ef3f..1731ccbeb8c 100644 --- a/storage/connect/jmgoconn.cpp +++ b/storage/connect/jmgoconn.cpp @@ -254,7 +254,7 @@ bool JMgoConn::MakeCursor(PGLOBAL g, PTDB tdbp, PCSZ options, all = true; if (pipe && Options) { - if (trace) + if (trace(1)) htrc("Pipeline: %s\n", Options); p = strrchr(Options, ']'); @@ -312,13 +312,13 @@ bool JMgoConn::MakeCursor(PGLOBAL g, PTDB tdbp, PCSZ options, *(char*)p = ']'; // Restore Colist for discovery p = s->GetStr(); - if (trace) + if (trace(33)) htrc("New Pipeline: %s\n", p); return AggregateCollection(p); } else { if (filter || filp) { - if (trace) { + if (trace(1)) { if (filter) htrc("Filter: %s\n", filter); @@ -346,7 +346,7 @@ bool JMgoConn::MakeCursor(PGLOBAL g, PTDB tdbp, PCSZ options, tdbp->SetFilter(NULL); // Not needed anymore } // endif To_Filter - if (trace) + if (trace(33)) htrc("selector: %s\n", s->GetStr()); s->Resize(s->GetLength() + 1); @@ -355,7 +355,7 @@ bool JMgoConn::MakeCursor(PGLOBAL g, PTDB tdbp, PCSZ options, if (!all) { if (Options && *Options) { - if (trace) + if (trace(1)) htrc("options=%s\n", Options); op = Options; @@ -751,7 +751,7 @@ int JMgoConn::DocUpdate(PGLOBAL g, PTDB tdbp) jlong ar = env->CallLongMethod(job, updateid, upd); - if (trace) + if (trace(1)) htrc("DocUpdate: ar = %ld\n", ar); if (Check((int)ar)) { @@ -770,7 +770,7 @@ int JMgoConn::DocDelete(PGLOBAL g, bool all) int rc = RC_OK; jlong ar = env->CallLongMethod(job, deleteid, all); - if (trace) + if (trace(1)) htrc("DocDelete: ar = %ld\n", ar); if (Check((int)ar)) { diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index b86d2da21b7..f6ed48c4d06 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -97,7 +97,7 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) PJSON jsp = NULL; STRG src; - if (trace) + if (trace(1)) htrc("ParseJson: s=%.10s len=%d\n", s, len); if (!s || !len) { @@ -178,7 +178,7 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) } // endif ptyp } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); jsp = NULL; } catch (const char *msg) { @@ -652,7 +652,7 @@ PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty) } // endif's } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); str = NULL; } catch (const char *msg) { diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 550b442dd40..952cd76ef8d 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -182,7 +182,7 @@ my_bool JSNX::SetArrayOptions(PGLOBAL g, char *p, int i, PSZ nm) // Set concat intermediate string p[n - 1] = 0; - if (trace) + if (trace(1)) htrc("Concat string=%s\n", p + 1); jnp->CncVal = AllocateValue(g, p + 1, TYPE_STRING); @@ -246,7 +246,7 @@ my_bool JSNX::ParseJpath(PGLOBAL g) // Jpath = Name; return true; - if (trace) + if (trace(1)) htrc("ParseJpath %s\n", SVP(Jpath)); if (!(pbuf = PlgDBDup(g, Jpath))) @@ -309,7 +309,7 @@ my_bool JSNX::ParseJpath(PGLOBAL g) Nod = i; MulVal = AllocateValue(g, Value); - if (trace) + if (trace(1)) for (i = 0; i < Nod; i++) htrc("Node(%d) Key=%s Op=%d Rank=%d\n", i, SVP(Nodes[i].Key), Nodes[i].Op, Nodes[i].Rank); @@ -506,13 +506,13 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n) vp->Reset(); - if (trace) + if (trace(1)) htrc("CalculateArray size=%d op=%d\n", ars, op); for (i = 0; i < ars; i++) { jvrp = arp->GetValue(i); - if (trace) + if (trace(1)) htrc("i=%d nv=%d\n", i, nv); if (!jvrp->IsNull() || (op == OP_CNC && GetJsonNull())) { @@ -525,7 +525,7 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n) } else jvp = jvrp; - if (trace) + if (trace(1)) htrc("jvp=%s null=%d\n", jvp->GetString(g), jvp->IsNull() ? 1 : 0); @@ -561,7 +561,7 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n) if (err) vp->Reset(); - if (trace) { + if (trace(1)) { char buf(32); htrc("vp='%s' err=%d\n", @@ -3220,7 +3220,7 @@ char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, g->Activityp = (PACTIVITY)str; } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); PUSH_WARNING(g->Message); @@ -3561,7 +3561,7 @@ char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result, g->Activityp = (PACTIVITY)path; } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); PUSH_WARNING(g->Message); @@ -3686,7 +3686,7 @@ char *json_locate_all(UDF_INIT *initid, UDF_ARGS *args, char *result, g->Activityp = (PACTIVITY)path; } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); PUSH_WARNING(g->Message); @@ -3961,7 +3961,7 @@ char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, g->Activityp = (PACTIVITY)str; } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); PUSH_WARNING(g->Message); diff --git a/storage/connect/mongo.cpp b/storage/connect/mongo.cpp index 088dc2d29d1..53e2bf377c4 100644 --- a/storage/connect/mongo.cpp +++ b/storage/connect/mongo.cpp @@ -172,7 +172,7 @@ PQRYRES MGOColumns(PGLOBAL g, PCSZ db, PCSZ uri, PTOS topt, bool info) goto err; skipit: - if (trace) + if (trace(1)) htrc("MGOColumns: n=%d len=%d\n", n, length[0]); /*********************************************************************/ @@ -276,7 +276,7 @@ int MGODISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ uri, PTOS topt) tdp->Wrapname = (PSZ)GetStringTableOption(g, topt, "Wrapper", (tdp->Version == 2) ? "Mongo2Interface" : "Mongo3Interface"); - if (trace) + if (trace(1)) htrc("Uri %s coll=%s db=%s colist=%s filter=%s lvl=%d\n", tdp->Uri, tdp->Tabname, tdp->Tabschema, tdp->Colist, tdp->Filter, lvl); diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc index bb77512be62..230c0a4aa6f 100644 --- a/storage/connect/mycat.cc +++ b/storage/connect/mycat.cc @@ -94,9 +94,9 @@ #if defined(XML_SUPPORT) #include "tabxml.h" #endif // XML_SUPPORT -#if defined(JAVA_SUPPORT) +#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) #include "mongo.h" -#endif // JAVA_SUPPORT +#endif // JAVA_SUPPORT || CMGO_SUPPORT #if defined(ZIP_SUPPORT) #include "tabzip.h" #endif // ZIP_SUPPORT @@ -109,9 +109,10 @@ extern "C" HINSTANCE s_hModule; // Saved module handle #endif // !__WIN__ -#if defined(JAVA_SUPPORT) -//bool MongoEnabled(void); -#endif // JAVA_SUPPORT +#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) +bool MongoEnabled(void); +#endif // JAVA_SUPPORT || CMGO_SUPPORT + PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); /***********************************************************************/ @@ -144,7 +145,9 @@ TABTYPE GetTypeID(const char *type) #endif #if defined(JAVA_SUPPORT) : (!stricmp(type, "JDBC")) ? TAB_JDBC - : (!stricmp(type, "MONGO")) ? TAB_MONGO +#endif +#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) + : (!stricmp(type, "MONGO") && MongoEnabled()) ? TAB_MONGO #endif : (!stricmp(type, "MYSQL")) ? TAB_MYSQL : (!stricmp(type, "MYPRX")) ? TAB_MYSQL @@ -488,7 +491,7 @@ void MYCAT::Reset(void) PRELDEF MYCAT::GetTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR type, PRELDEF *) { - if (trace) + if (trace(1)) printf("GetTableDesc: name=%s am=%s\n", tablep->GetName(), SVP(type)); // If not specified get the type of this table @@ -509,7 +512,7 @@ PRELDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am) LPCSTR schema = (PSZ)PlugDup(g, tablep->GetSchema()); PRELDEF tdp= NULL; - if (trace) + if (trace(1)) printf("MakeTableDesc: name=%s schema=%s am=%s\n", name, SVP(schema), SVP(am)); @@ -552,18 +555,17 @@ PRELDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am) case TAB_PIVOT: tdp= new(g) PIVOTDEF; break; case TAB_VIR: tdp= new(g) VIRDEF; break; case TAB_JSON: tdp= new(g) JSONDEF; break; -#if defined(MONGO_SUPPORT) - case TAB_MONGO: -// if (MongoEnabled()) - tdp = new(g) MGODEF; -// else -// strcpy(g->Message, "MONGO type not enabled"); - - break; -#endif // MONGO_SUPPORT #if defined(ZIP_SUPPORT) - case TAB_ZIP: tdp= new(g) ZIPDEF; break; + case TAB_ZIP: tdp = new(g) ZIPDEF; break; #endif // ZIP_SUPPORT +#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) + case TAB_MONGO: + if (MongoEnabled()) { + tdp = new(g) MGODEF; + break; + } // endif enabled + // fall through +#endif // JAVA_SUPPORT || CMGO_SUPPORT default: sprintf(g->Message, MSG(BAD_TABLE_TYPE), am, name); } // endswitch @@ -584,14 +586,14 @@ PTDB MYCAT::GetTable(PGLOBAL g, PTABLE tablep, MODE mode, LPCSTR type) PTDB tdbp= NULL; // LPCSTR name= tablep->GetName(); - if (trace) + if (trace(1)) printf("GetTableDB: name=%s\n", tablep->GetName()); // Look for the description of the requested table tdp= GetTableDesc(g, tablep, type); if (tdp) { - if (trace) + if (trace(1)) printf("tdb=%p type=%s\n", tdp, tdp->GetType()); if (tablep->GetSchema()) @@ -601,7 +603,7 @@ PTDB MYCAT::GetTable(PGLOBAL g, PTABLE tablep, MODE mode, LPCSTR type) } // endif tdp if (tdbp) { - if (trace) + if (trace(1)) printf("tdbp=%p name=%s amtype=%d\n", tdbp, tdbp->GetName(), tdbp->GetAmType()); tablep->SetTo_Tdb(tdbp); diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp index 08bb24e14df..9da9c268c3d 100644 --- a/storage/connect/myconn.cpp +++ b/storage/connect/myconn.cpp @@ -177,7 +177,7 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, return NULL; } // endif b - if (trace) + if (trace(1)) htrc("MyColumns: cmd='%s'\n", cmd.GetStr()); if ((n = myc.GetResultSize(g, cmd.GetStr())) < 0) { @@ -482,7 +482,7 @@ int MYSQLC::Open(PGLOBAL g, const char *host, const char *db, return RC_FX; } // endif m_DB - if (trace) + if (trace(1)) htrc("MYSQLC Open: m_DB=%.4X size=%d\n", m_DB, (int)sizeof(*m_DB)); // Removed to do like FEDERATED do @@ -744,7 +744,7 @@ int MYSQLC::ExecSQL(PGLOBAL g, const char *query, int *w) m_Fields = mysql_num_fields(m_Res); m_Rows = (!m_Use) ? (int)mysql_num_rows(m_Res) : 0; - if (trace) + if (trace(1)) htrc("ExecSQL: m_Res=%.4X size=%d m_Fields=%d m_Rows=%d\n", m_Res, sizeof(*m_Res), m_Fields, m_Rows); @@ -1067,7 +1067,7 @@ void MYSQLC::Close(void) { FreeResult(); - if (trace) + if (trace(1)) htrc("MYSQLC Close: m_DB=%.4X\n", m_DB); mysql_close(m_DB); diff --git a/storage/connect/mysql-test/connect/r/json_java_2.result b/storage/connect/mysql-test/connect/r/json_java_2.result index 966330248e5..6c578b35d6f 100644 --- a/storage/connect/mysql-test/connect/r/json_java_2.result +++ b/storage/connect/mysql-test/connect/r/json_java_2.result @@ -1,4 +1,5 @@ SET GLOBAL connect_class_path='C:/MariaDB-10.2/MariaDB/storage/connect/mysql-test/connect/std_data/Mongo2.jar'; +set connect_enable_mongo=1; # # Test the MONGO table type # @@ -381,3 +382,4 @@ planner 167 41.75 postcard 23 5.75 DROP TABLE t1; true +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/r/json_java_3.result b/storage/connect/mysql-test/connect/r/json_java_3.result index f8e9e161475..4c5fc94fca6 100644 --- a/storage/connect/mysql-test/connect/r/json_java_3.result +++ b/storage/connect/mysql-test/connect/r/json_java_3.result @@ -1,4 +1,5 @@ SET GLOBAL connect_class_path='C:/MariaDB-10.2/MariaDB/storage/connect/mysql-test/connect/std_data/Mongo3.jar'; +set connect_enable_mongo=1; # # Test the MONGO table type # @@ -381,3 +382,4 @@ planner 167 41.75 postcard 23 5.75 DROP TABLE t1; true +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/r/json_mongo_c.result b/storage/connect/mysql-test/connect/r/json_mongo_c.result index 8adc006a51b..550e94f286e 100644 --- a/storage/connect/mysql-test/connect/r/json_mongo_c.result +++ b/storage/connect/mysql-test/connect/r/json_mongo_c.result @@ -1,3 +1,4 @@ +set connect_enable_mongo=1; # # Test the MONGO table type # @@ -380,3 +381,4 @@ planner 167 41.75 postcard 23 5.75 DROP TABLE t1; true +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/r/mongo_c.result b/storage/connect/mysql-test/connect/r/mongo_c.result index c7aadcf1165..132bb34ce64 100644 --- a/storage/connect/mysql-test/connect/r/mongo_c.result +++ b/storage/connect/mysql-test/connect/r/mongo_c.result @@ -1,3 +1,4 @@ +set connect_enable_mongo=1; # # Test the MONGO table type # @@ -376,3 +377,4 @@ planner 167 41.750000 postcard 23 5.750000 DROP TABLE t1; true +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/r/mongo_java_2.result b/storage/connect/mysql-test/connect/r/mongo_java_2.result index 8c58a9eceea..67c67653e88 100644 --- a/storage/connect/mysql-test/connect/r/mongo_java_2.result +++ b/storage/connect/mysql-test/connect/r/mongo_java_2.result @@ -1,4 +1,5 @@ SET GLOBAL connect_class_path='C:/MariaDB-10.2/MariaDB/storage/connect/mysql-test/connect/std_data/Mongo2.jar'; +set connect_enable_mongo=1; # # Test the MONGO table type # @@ -377,3 +378,4 @@ planner 167 41.75 postcard 23 5.75 DROP TABLE t1; true +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/r/mongo_java_3.result b/storage/connect/mysql-test/connect/r/mongo_java_3.result index 1dd3048b49b..665178bd3ea 100644 --- a/storage/connect/mysql-test/connect/r/mongo_java_3.result +++ b/storage/connect/mysql-test/connect/r/mongo_java_3.result @@ -1,4 +1,5 @@ SET GLOBAL connect_class_path='C:/MariaDB-10.2/MariaDB/storage/connect/mysql-test/connect/std_data/Mongo3.jar'; +set connect_enable_mongo=1; # # Test the MONGO table type # @@ -377,3 +378,4 @@ planner 167 41.75 postcard 23 5.75 DROP TABLE t1; true +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/r/tbl_thread.result b/storage/connect/mysql-test/connect/r/tbl_thread.result index f53ccd25b97..3526fc43ffc 100644 --- a/storage/connect/mysql-test/connect/r/tbl_thread.result +++ b/storage/connect/mysql-test/connect/r/tbl_thread.result @@ -87,7 +87,7 @@ a b CREATE TABLE total (a int, b char(10)) ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2,t3,t4,t5' OPTION_LIST='thread=yes,port=PORT'; -set connect_xtrace=1; +set connect_xtrace=96; SELECT * FROM total order by a desc; a b 19 test19 @@ -129,7 +129,7 @@ SELECT * FROM t2; v 22 CREATE TABLE total (v BIGINT(20) UNSIGNED NOT NULL) ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2' OPTION_LIST='thread=yes,port=PORT';; -set connect_xtrace=1; +set connect_xtrace=96; SELECT * FROM total order by v desc; v 22 @@ -148,7 +148,7 @@ SELECT * FROM t2; v 22 CREATE TABLE total (v BIGINT(20) UNSIGNED NOT NULL) ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2' OPTION_LIST='thread=yes,port=PORT';; -set connect_xtrace=1; +set connect_xtrace=96; SELECT * FROM total order by v desc; v 22 diff --git a/storage/connect/mysql-test/connect/t/mongo.inc b/storage/connect/mysql-test/connect/t/mongo.inc index 2d7cbcfa8bd..fab2ca84139 100644 --- a/storage/connect/mysql-test/connect/t/mongo.inc +++ b/storage/connect/mysql-test/connect/t/mongo.inc @@ -1,3 +1,3 @@ -let $MONGO= C:/PROGRA~1/MongoDB/Server/3.4/bin/mongo; -let $MONGOIMPORT= C:/PROGRA~1/MongoDB/Server/3.4/bin/mongoimport; +let $MONGO= C:/Applic/MongoDB/Server/3.6/bin/mongo; +let $MONGOIMPORT= C:/Applic/MongoDB/Server/3.6/bin/mongoimport; diff --git a/storage/connect/mysql-test/connect/t/mongo_test.inc b/storage/connect/mysql-test/connect/t/mongo_test.inc index dfc223e9074..357fa55240b 100644 --- a/storage/connect/mysql-test/connect/t/mongo_test.inc +++ b/storage/connect/mysql-test/connect/t/mongo_test.inc @@ -1,4 +1,4 @@ -#set connect_enable_mongo=1; +set connect_enable_mongo=1; --echo # --echo # Test the MONGO table type @@ -130,7 +130,9 @@ DROP TABLE t1; --echo # --echo # try CRUD operations --echo # +--disable_query_log --exec $MONGO --eval "db.testcoll.drop()" --quiet +--enable_query_log eval CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64)) ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME='testcoll' OPTION_LIST='Driver=$DRV,Version=$VERS' $CONN; @@ -147,7 +149,9 @@ DROP TABLE t1; --echo # --echo # List states whose population is equal or more than 10 millions --echo # +--disable_query_log --exec $MONGO --eval "db.cities.drop()" --quiet +--enable_query_log --exec $MONGOIMPORT --quiet $MTR_SUITE_DIR/std_data/cities.json eval CREATE TABLE t1 ( _id char(5) NOT NULL, @@ -204,4 +208,4 @@ SELECT * FROM t1; DROP TABLE t1; --exec $MONGO --eval "db.testcoll.drop()" --quiet -#set connect_enable_mongo=0; +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/t/tbl_thread.test b/storage/connect/mysql-test/connect/t/tbl_thread.test index 68a0ebcd44d..05409c695fb 100644 --- a/storage/connect/mysql-test/connect/t/tbl_thread.test +++ b/storage/connect/mysql-test/connect/t/tbl_thread.test @@ -56,7 +56,7 @@ SELECT * FROM t5; eval CREATE TABLE total (a int, b char(10)) ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2,t3,t4,t5' OPTION_LIST='thread=yes,port=$PORT'; -set connect_xtrace=1; +set connect_xtrace=96; SELECT * FROM total order by a desc; set connect_xtrace=0; @@ -85,7 +85,7 @@ SELECT * FROM t2; --replace_result $PORT PORT --eval CREATE TABLE total (v BIGINT(20) UNSIGNED NOT NULL) ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2' OPTION_LIST='thread=yes,port=$PORT'; -set connect_xtrace=1; +set connect_xtrace=96; SELECT * FROM total order by v desc; set connect_xtrace=0; DROP TABLE t1,t2,total; @@ -101,7 +101,7 @@ SELECT * FROM t2; --replace_result $PORT PORT --eval CREATE TABLE total (v BIGINT(20) UNSIGNED NOT NULL) ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t1,t2' OPTION_LIST='thread=yes,port=$PORT'; -set connect_xtrace=1; +set connect_xtrace=96; SELECT * FROM total order by v desc; set connect_xtrace=0; diff --git a/storage/connect/odbconn.cpp b/storage/connect/odbconn.cpp index 70a0a6a1450..cfd7546524e 100644 --- a/storage/connect/odbconn.cpp +++ b/storage/connect/odbconn.cpp @@ -137,10 +137,10 @@ int TranslateSQLType(int stp, int prec, int& len, char& v, bool& w) case SQL_WLONGVARCHAR: // (-10) w = true; case SQL_LONGVARCHAR: // (-1) - if (GetTypeConv() == TPC_YES) { + if (GetTypeConv() == TPC_YES || GetTypeConv() == TPC_FORCE) { v = 'V'; type = TYPE_STRING; - len = MY_MIN(abs(len), GetConvSize()); + len = (len) ? MY_MIN(abs(len), GetConvSize()) : GetConvSize(); } else type = TYPE_ERROR; @@ -190,12 +190,23 @@ int TranslateSQLType(int stp, int prec, int& len, char& v, bool& w) case SQL_BIGINT: // (-5) type = TYPE_BIGINT; break; - case SQL_UNKNOWN_TYPE: // 0 case SQL_BINARY: // (-2) case SQL_VARBINARY: // (-3) case SQL_LONGVARBINARY: // (-4) - case SQL_GUID: // (-11) - default: + if (GetTypeConv() == TPC_FORCE) { + v = 'V'; + type = TYPE_STRING; + len = (len) ? MY_MIN(abs(len), GetConvSize()) : GetConvSize(); + } else + type = TYPE_ERROR; + + break; + case SQL_GUID: // (-11) + type = TYPE_STRING; + len = 36; + break; + case SQL_UNKNOWN_TYPE: // 0 + default: type = TYPE_ERROR; len = 0; } // endswitch type @@ -364,7 +375,7 @@ PQRYRES ODBCColumns(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ table, length[11] = 255; } // endif ocp - if (trace) + if (trace(1)) htrc("ODBCColumns: max=%d len=%d,%d,%d,%d\n", maxres, length[0], length[1], length[2], length[3]); @@ -381,7 +392,7 @@ PQRYRES ODBCColumns(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ table, if (info || !qrp) // Info table return qrp; - if (trace) + if (trace(1)) htrc("Getting col results ncol=%d\n", qrp->Nbcol); if (!(cap = AllocCatInfo(g, CAT_COL, db, table, qrp))) @@ -396,7 +407,7 @@ PQRYRES ODBCColumns(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ table, qrp->Nblin = n; // ResetNullValues(cap); - if (trace) + if (trace(1)) htrc("Columns: NBCOL=%d NBLIN=%d\n", qrp->Nbcol, qrp->Nblin); } else @@ -536,7 +547,7 @@ PQRYRES ODBCDrivers(PGLOBAL g, int maxres, bool info) } else maxres = 0; - if (trace) + if (trace(1)) htrc("ODBCDrivers: max=%d len=%d\n", maxres, length[0]); /************************************************************************/ @@ -593,7 +604,7 @@ PQRYRES ODBCDataSources(PGLOBAL g, int maxres, bool info) maxres = 0; } // endif info - if (trace) + if (trace(1)) htrc("ODBCDataSources: max=%d len=%d\n", maxres, length[0]); /************************************************************************/ @@ -666,7 +677,7 @@ PQRYRES ODBCTables(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ tabpat, PCSZ tabtyp, length[4] = 255; } // endif info - if (trace) + if (trace(1)) htrc("ODBCTables: max=%d len=%d,%d\n", maxres, length[0], length[1]); /************************************************************************/ @@ -687,7 +698,7 @@ PQRYRES ODBCTables(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ tabpat, PCSZ tabtyp, cap->Pat = tabtyp; - if (trace) + if (trace(1)) htrc("Getting table results ncol=%d\n", cap->Qrp->Nbcol); /************************************************************************/ @@ -697,7 +708,7 @@ PQRYRES ODBCTables(PGLOBAL g, PCSZ dsn, PCSZ db, PCSZ tabpat, PCSZ tabtyp, qrp->Nblin = n; // ResetNullValues(cap); - if (trace) + if (trace(1)) htrc("Tables: NBCOL=%d NBLIN=%d\n", qrp->Nbcol, qrp->Nblin); } else @@ -755,7 +766,7 @@ PQRYRES ODBCPrimaryKeys(PGLOBAL g, ODBConn *op, char *dsn, char *table) n = ocp->GetMaxValue(SQL_MAX_COLUMN_NAME_LEN); length[3] = (n) ? (n + 1) : 128; - if (trace) + if (trace(1)) htrc("ODBCPrimaryKeys: max=%d len=%d,%d,%d\n", maxres, length[0], length[1], length[2]); @@ -765,7 +776,7 @@ PQRYRES ODBCPrimaryKeys(PGLOBAL g, ODBConn *op, char *dsn, char *table) qrp = PlgAllocResult(g, ncol, maxres, IDS_PKEY, buftyp, NULL, length, false, true); - if (trace) + if (trace(1)) htrc("Getting pkey results ncol=%d\n", qrp->Nbcol); cap = AllocCatInfo(g, CAT_KEY, NULL, table, qrp); @@ -777,7 +788,7 @@ PQRYRES ODBCPrimaryKeys(PGLOBAL g, ODBConn *op, char *dsn, char *table) qrp->Nblin = n; // ResetNullValues(cap); - if (trace) + if (trace(1)) htrc("PrimaryKeys: NBCOL=%d NBLIN=%d\n", qrp->Nbcol, qrp->Nblin); } else @@ -838,7 +849,7 @@ PQRYRES ODBCStatistics(PGLOBAL g, ODBConn *op, char *dsn, char *pat, n = ocp->GetMaxValue(SQL_MAX_COLUMN_NAME_LEN); length[7] = (n) ? (n + 1) : 128; - if (trace) + if (trace(1)) htrc("SemStatistics: max=%d pat=%s\n", maxres, SVP(pat)); /************************************************************************/ @@ -847,7 +858,7 @@ PQRYRES ODBCStatistics(PGLOBAL g, ODBConn *op, char *dsn, char *pat, qrp = PlgAllocResult(g, ncol, maxres, IDS_STAT, buftyp, NULL, length, false, true); - if (trace) + if (trace(1)) htrc("Getting stat results ncol=%d\n", qrp->Nbcol); cap = AllocCatInfo(g, CAT_STAT, NULL, pat, qrp); @@ -861,7 +872,7 @@ PQRYRES ODBCStatistics(PGLOBAL g, ODBConn *op, char *dsn, char *pat, qrp->Nblin = n; // ResetNullValues(cap); - if (trace) + if (trace(1)) htrc("Statistics: NBCOL=%d NBLIN=%d\n", qrp->Nbcol, qrp->Nblin); } else @@ -918,7 +929,7 @@ bool DBX::BuildErrorMessage(ODBConn* pdb, HSTMT hstmt) && strcmp((char*)state, "00000"); i++) { m_ErrMsg[i] = (PSZ)PlugDup(g, (char*)msg); - if (trace) + if (trace(1)) htrc("%s: %s, Native=%d\n", state, msg, native); rc = SQLError(pdb->m_henv, pdb->m_hdbc, hstmt, state, @@ -932,7 +943,7 @@ bool DBX::BuildErrorMessage(ODBConn* pdb, HSTMT hstmt) MSG(BAD_HANDLE_VAL)); m_ErrMsg[0] = (PSZ)PlugDup(g, (char*)msg); - if (trace) + if (trace(1)) htrc("%s: rc=%hd\n", SVP(m_ErrMsg[0]), m_RC); return true; @@ -941,7 +952,7 @@ bool DBX::BuildErrorMessage(ODBConn* pdb, HSTMT hstmt) } else m_ErrMsg[0] = "No connexion address provided"; - if (trace) + if (trace(1)) htrc("%s: rc=%hd (%s)\n", SVP(m_Msg), m_RC, SVP(m_ErrMsg[0])); return true; @@ -1004,7 +1015,7 @@ bool ODBConn::Check(RETCODE rc) { switch (rc) { case SQL_SUCCESS_WITH_INFO: - if (trace) { + if (trace(1)) { DBX x(rc); if (x.BuildErrorMessage(this, m_hstmt)) @@ -1223,7 +1234,7 @@ void ODBConn::AllocConnect(DWORD Options) if ((signed)m_LoginTimeout >= 0) { rc = SQLSetConnectOption(m_hdbc, SQL_LOGIN_TIMEOUT, m_LoginTimeout); - if (trace && rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO) + if (trace(1) && rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO) htrc("Warning: Failure setting login timeout\n"); } // endif Timeout @@ -1231,7 +1242,7 @@ void ODBConn::AllocConnect(DWORD Options) if (!m_Updatable) { rc = SQLSetConnectOption(m_hdbc, SQL_ACCESS_MODE, SQL_MODE_READ_ONLY); - if (trace && rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO) + if (trace(1) && rc != SQL_SUCCESS && rc != SQL_SUCCESS_WITH_INFO) htrc("Warning: Failure setting read only access mode\n"); } // endif @@ -1385,7 +1396,7 @@ void ODBConn::GetConnectInfo() else m_Updatable = false; - if (trace) + if (trace(1)) htrc("Warning: data source is readonly\n"); } else // Make data source is !Updatable @@ -1397,7 +1408,7 @@ void ODBConn::GetConnectInfo() rc = SQLGetInfo(m_hdbc, SQL_IDENTIFIER_QUOTE_CHAR, m_IDQuoteChar, sizeof(m_IDQuoteChar), &nResult); - if (trace) + if (trace(1)) htrc("DBMS: %s, Version: %s, rc=%d\n", GetStringInfo(SQL_DBMS_NAME), GetStringInfo(SQL_DBMS_VER), rc); @@ -1447,7 +1458,7 @@ int ODBConn::ExecDirectSQL(char *sql, ODBCCOL *tocols) OnSetOptions(hstmt); b = true; - if (trace) + if (trace(1)) htrc("ExecDirect hstmt=%p %.256s\n", hstmt, sql); if (m_Tdb->Srcdef) { @@ -1510,7 +1521,7 @@ int ODBConn::ExecDirectSQL(char *sql, ODBCCOL *tocols) ThrowDBX(m_G->Message); } // endif tp - if (trace) + if (trace(1)) htrc("Binding col=%u type=%d buf=%p len=%d slen=%p\n", n, tp, buffer, len, colp->GetStrLen()); @@ -1523,7 +1534,7 @@ int ODBConn::ExecDirectSQL(char *sql, ODBCCOL *tocols) } // endif pcol } catch(DBX *x) { - if (trace) + if (trace(1)) for (int i = 0; i < MAX_NUM_OF_MSG && x->m_ErrMsg[i]; i++) htrc(x->m_ErrMsg[i]); @@ -1569,7 +1580,7 @@ int ODBConn::GetResultSize(char *sql, ODBCCOL *colp) } catch(DBX *x) { strcpy(m_G->Message, x->GetErrorMessage(0)); - if (trace) + if (trace(1)) for (int i = 0; i < MAX_NUM_OF_MSG && x->m_ErrMsg[i]; i++) htrc(x->m_ErrMsg[i]); @@ -1610,7 +1621,7 @@ int ODBConn::Fetch(int pos) } // endif m_RowsetSize // } while (rc == SQL_STILL_EXECUTING); - if (trace > 1) + if (trace(2)) htrc("Fetch: hstmt=%p RowseSize=%d rc=%d\n", m_hstmt, m_RowsetSize, rc); @@ -1626,7 +1637,7 @@ int ODBConn::Fetch(int pos) m_Fetch++; m_Rows += irc; } catch(DBX *x) { - if (trace) + if (trace(1)) for (int i = 0; i < MAX_NUM_OF_MSG && x->m_ErrMsg[i]; i++) htrc(x->m_ErrMsg[i]); @@ -1662,7 +1673,7 @@ int ODBConn::PrepareSQL(char *sql) m_Transact = true; } catch(DBX *x) { - if (trace) + if (trace(1)) for (int i = 0; i < MAX_NUM_OF_MSG && x->m_ErrMsg[i]; i++) htrc(x->m_ErrMsg[i]); @@ -1693,7 +1704,7 @@ int ODBConn::PrepareSQL(char *sql) OnSetOptions(hstmt); b = true; - if (trace) + if (trace(1)) htrc("Prepare hstmt=%p %.64s\n", hstmt, sql); do { @@ -1708,7 +1719,7 @@ int ODBConn::PrepareSQL(char *sql) } while (rc == SQL_STILL_EXECUTING); } catch(DBX *x) { - if (trace) + if (trace(1)) for (int i = 0; i < MAX_NUM_OF_MSG && x->m_ErrMsg[i]; i++) htrc(x->m_ErrMsg[i]); @@ -1881,7 +1892,7 @@ bool ODBConn::ExecSQLcommand(char *sql) OnSetOptions(hstmt); b = true; - if (trace) + if (trace(1)) htrc("ExecSQLcommand hstmt=%p %.64s\n", hstmt, sql); // Proceed with command execution @@ -1908,7 +1919,7 @@ bool ODBConn::ExecSQLcommand(char *sql) } // endif ncol } catch(DBX *x) { - if (trace) + if (trace(1)) for (int i = 0; i < MAX_NUM_OF_MSG && x->m_ErrMsg[i]; i++) htrc(x->m_ErrMsg[i]); @@ -2394,7 +2405,7 @@ int ODBConn::GetCatInfo(CATPARM *cap) if ((rc = SQLFetch(hstmt)) == SQL_NO_DATA_FOUND) break; else if (rc != SQL_SUCCESS) { - if (trace > 1 || (trace && rc != SQL_SUCCESS_WITH_INFO)) { + if (trace(2) || (trace(1) && rc != SQL_SUCCESS_WITH_INFO)) { UCHAR msg[SQL_MAX_MESSAGE_LENGTH + 1]; UCHAR state[SQL_SQLSTATE_SIZE + 1]; RETCODE erc; @@ -2466,7 +2477,7 @@ int ODBConn::GetCatInfo(CATPARM *cap) irc = (int)crow; } catch(DBX *x) { - if (trace) + if (trace(1)) for (int i = 0; i < MAX_NUM_OF_MSG && x->m_ErrMsg[i]; i++) htrc(x->m_ErrMsg[i]); @@ -2605,12 +2616,12 @@ void ODBConn::Close() rc = SQLDisconnect(m_hdbc); - if (trace && rc != SQL_SUCCESS) + if (trace(1) && rc != SQL_SUCCESS) htrc("Error: SQLDisconnect rc=%d\n", rc); rc = SQLFreeConnect(m_hdbc); - if (trace && rc != SQL_SUCCESS) + if (trace(1) && rc != SQL_SUCCESS) htrc("Error: SQLFreeConnect rc=%d\n", rc); m_hdbc = SQL_NULL_HDBC; @@ -2619,7 +2630,7 @@ void ODBConn::Close() if (m_henv != SQL_NULL_HENV) { rc = SQLFreeEnv(m_henv); - if (trace && rc != SQL_SUCCESS) // Nothing we can do + if (trace(1) && rc != SQL_SUCCESS) // Nothing we can do htrc("Error: SQLFreeEnv failure ignored in Close\n"); m_henv = SQL_NULL_HENV; diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp index f669d644637..f248e72be12 100644 --- a/storage/connect/plgdbutl.cpp +++ b/storage/connect/plgdbutl.cpp @@ -1,11 +1,11 @@ /********** PlgDBUtl Fpe C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: PLGDBUTL */ /* ------------- */ -/* Version 4.0 */ +/* Version 4.1 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 1998-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 1998-2018 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -215,35 +215,13 @@ int global_open(GLOBAL *g, int msgid, const char *path, int flags, int mode) } DllExport void SetTrc(void) - { +{ // If tracing is on, debug must be initialized. debug = pfile; - } // end of SetTrc - -#if 0 -/**************************************************************************/ -/* Tracing output function. */ -/**************************************************************************/ -void ptrc(char const *fmt, ...) - { - va_list ap; - va_start (ap, fmt); - -// if (trace == 0 || (trace == 1 && !pfile) || !fmt) -// printf("In %s wrong trace=%d pfile=%p fmt=%p\n", -// __FILE__, trace, pfile, fmt); - - if (trace == 1) - vfprintf(pfile, fmt, ap); - else - vprintf(fmt, ap); - - va_end (ap); - } // end of ptrc -#endif // 0 +} // end of SetTrc /**************************************************************************/ -/* Allocate the result structure that will contain result data. */ +/* SubAllocate the result structure that will contain result data. */ /**************************************************************************/ PQRYRES PlgAllocResult(PGLOBAL g, int ncol, int maxres, int ids, int *buftyp, XFLD *fldtyp, @@ -307,7 +285,7 @@ PQRYRES PlgAllocResult(PGLOBAL g, int ncol, int maxres, int ids, else crp->Kdata = NULL; - if (trace) + if (trace(1)) htrc("Column(%d) %s type=%d len=%d value=%p\n", crp->Ncol, crp->Name, crp->Type, crp->Length, crp->Kdata); @@ -475,7 +453,7 @@ bool PlugEvalLike(PGLOBAL g, LPCSTR strg, LPCSTR pat, bool ci) char *tp, *sp; bool b; - if (trace) + if (trace(2)) htrc("LIKE: strg='%s' pattern='%s'\n", strg, pat); if (ci) { /* Case insensitive test */ @@ -544,7 +522,7 @@ bool EvalLikePattern(LPCSTR sp, LPCSTR tp) int n; bool b, t = false; - if (trace) + if (trace(2)) htrc("Eval Like: sp=%s tp=%s\n", (sp) ? sp : "Null", (tp) ? tp : "Null"); @@ -582,7 +560,7 @@ bool EvalLikePattern(LPCSTR sp, LPCSTR tp) else n = strlen(tp); /* Get length of pattern head */ - if (trace) + if (trace(2)) htrc(" testing: t=%d sp=%s tp=%s p=%p\n", t, sp, tp, p); if (n > (signed)strlen(sp)) /* If head is longer than strg */ @@ -628,7 +606,7 @@ bool EvalLikePattern(LPCSTR sp, LPCSTR tp) b = !strcmp(sp, tp); } /* endif p */ - if (trace) + if (trace(2)) htrc(" done: b=%d n=%d sp=%s tp=%s\n", b, n, (sp) ? sp : "Null", tp); @@ -668,7 +646,7 @@ char *MakeEscape(PGLOBAL g, char* str, char q) /***********************************************************************/ void PlugConvertConstant(PGLOBAL g, void* & value, short& type) { - if (trace) + if (trace(1)) htrc("PlugConvertConstant: value=%p type=%hd\n", value, type); if (type != TYPE_XOBJECT) { @@ -688,7 +666,7 @@ PDTP MakeDateFormat(PGLOBAL g, PCSZ dfmt, bool in, bool out, int flag) int rc; PDTP pdp = (PDTP)PlugSubAlloc(g, NULL, sizeof(DATPAR)); - if (trace) + if (trace(1)) htrc("MakeDateFormat: dfmt=%s\n", dfmt); memset(pdp, 0, sizeof(DATPAR)); @@ -711,7 +689,7 @@ PDTP MakeDateFormat(PGLOBAL g, PCSZ dfmt, bool in, bool out, int flag) rc = fmdflex(pdp); pthread_mutex_unlock(&parmut); - if (trace) + if (trace(1)) htrc("Done: in=%s out=%s rc=%d\n", SVP(pdp->InFmt), SVP(pdp->OutFmt), rc); return pdp; @@ -733,7 +711,7 @@ int ExtractDate(char *dts, PDTP pdp, int defy, int val[6]) else // assume standard MySQL date format fmt = "%4d-%2d-%2d %2d:%2d:%2d"; - if (trace > 1) + if (trace(2)) htrc("ExtractDate: dts=%s fmt=%s defy=%d\n", dts, fmt, defy); // Set default values for time only use @@ -816,7 +794,7 @@ int ExtractDate(char *dts, PDTP pdp, int defy, int val[6]) } // endfor i - if (trace > 1) + if (trace(2)) htrc("numval=%d val=(%d,%d,%d,%d,%d,%d)\n", numval, val[0], val[1], val[2], val[3], val[4], val[5]); @@ -833,18 +811,18 @@ FILE *PlugOpenFile(PGLOBAL g, LPCSTR fname, LPCSTR ftype) PFBLOCK fp; PDBUSER dbuserp = (PDBUSER)g->Activityp->Aptr; - if (trace) { + if (trace(1)) { htrc("PlugOpenFile: fname=%s ftype=%s\n", fname, ftype); htrc("dbuserp=%p\n", dbuserp); } // endif trace if ((fop= global_fopen(g, MSGID_OPEN_MODE_STRERROR, fname, ftype)) != NULL) { - if (trace) + if (trace(1)) htrc(" fop=%p\n", fop); fp = (PFBLOCK)PlugSubAlloc(g, NULL, sizeof(FBLOCK)); - if (trace) + if (trace(1)) htrc(" fp=%p\n", fp); // fname may be in volatile memory such as stack @@ -857,7 +835,7 @@ FILE *PlugOpenFile(PGLOBAL g, LPCSTR fname, LPCSTR ftype) dbuserp->Openlist = fp; } /* endif fop */ - if (trace) + if (trace(1)) htrc(" returning fop=%p\n", fop); return (fop); @@ -888,7 +866,7 @@ int PlugCloseFile(PGLOBAL g, PFBLOCK fp, bool all) { int rc = 0; - if (trace) + if (trace(1)) htrc("PlugCloseFile: fp=%p count=%hd type=%hd\n", fp, ((fp) ? fp->Count : 0), ((fp) ? fp->Type : 0)); @@ -1050,7 +1028,7 @@ int GetIniSize(char *section, char *key, char *def, char *ini) n *= 1024; } // endswitch c - if (trace) + if (trace(1)) htrc("GetIniSize: key=%s buff=%s i=%d n=%d\n", key, buff, i, n); return n; @@ -1086,7 +1064,7 @@ DllExport PSZ GetIniString(PGLOBAL g, void *mp, LPCSTR sec, LPCSTR key, p = (PSZ)PlugSubAlloc(g, mp, n + 1); - if (trace) + if (trace(1)) htrc("GetIniString: sec=%s key=%s buf=%s\n", sec, key, buf); strcpy(p, buf); @@ -1237,7 +1215,7 @@ char *GetExceptionDesc(PGLOBAL g, unsigned int e) /* so it can be freed at the normal or error query completion. */ /***********************************************************************/ void *PlgDBalloc(PGLOBAL g, void *area, MBLOCK& mp) - { +{ //bool b; size_t maxsub, minsub; void *arp = (area) ? area : g->Sarea; @@ -1253,7 +1231,7 @@ void *PlgDBalloc(PGLOBAL g, void *area, MBLOCK& mp) // done to check whether the block is already there. // b = mp.Sub; mp.Sub = false; // Restrict suballocation to one quarter - } // endif Memp + } // endif Memp // Suballoc when possible if mp.Sub is initially true, but leaving // a minimum amount of storage for future operations such as the @@ -1263,35 +1241,40 @@ void *PlgDBalloc(PGLOBAL g, void *area, MBLOCK& mp) maxsub = (pph->FreeBlk < minsub) ? 0 : pph->FreeBlk - minsub; mp.Sub = mp.Size <= ((mp.Sub) ? maxsub : (maxsub >> 2)); - if (trace > 1) - htrc("PlgDBalloc: in %p size=%d used=%d free=%d sub=%d\n", - arp, mp.Size, pph->To_Free, pph->FreeBlk, mp.Sub); + if (trace(2)) + htrc("PlgDBalloc: in %p size=%d used=%d free=%d sub=%d\n", + arp, mp.Size, pph->To_Free, pph->FreeBlk, mp.Sub); - if (!mp.Sub) { + if (!mp.Sub) { // For allocations greater than one fourth of remaining storage // in the area, do allocate from virtual storage. + const char*v = "malloc"; #if defined(__WIN__) - if (mp.Size >= BIGMEM) - mp.Memp = VirtualAlloc(NULL, mp.Size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); - else + if (mp.Size >= BIGMEM) { + v = "VirtualAlloc"; + mp.Memp = VirtualAlloc(NULL, mp.Size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + } else #endif mp.Memp = malloc(mp.Size); - if (!mp.Inlist && mp.Memp) { + if (trace(8)) + htrc("PlgDBalloc: %s(%d) at %p\n", v, mp.Size, mp.Memp); + + if (!mp.Inlist && mp.Memp) { // New allocated block, put it in the memory block chain. PDBUSER dbuserp = (PDBUSER)g->Activityp->Aptr; mp.Next = dbuserp->Memlist; dbuserp->Memlist = ∓ mp.Inlist = true; - } // endif mp + } // endif mp } else // Suballocating is Ok. mp.Memp = PlugSubAlloc(g, area, mp.Size); return mp.Memp; - } // end of PlgDBalloc +} // end of PlgDBalloc /***********************************************************************/ /* PlgDBrealloc: reallocates memory conditionally. */ @@ -1306,7 +1289,7 @@ void *PlgDBrealloc(PGLOBAL g, void *area, MBLOCK& mp, size_t newsize) // assert (mp.Memp != NULL); #endif - if (trace > 1) + if (trace(2)) htrc("PlgDBrealloc: %p size=%d sub=%d\n", mp.Memp, mp.Size, mp.Sub); if (newsize == mp.Size) @@ -1326,10 +1309,14 @@ void *PlgDBrealloc(PGLOBAL g, void *area, MBLOCK& mp, size_t newsize) mp.Memp = PlugSubAlloc(g, area, newsize); memcpy(mp.Memp, m.Memp, MY_MIN(m.Size, newsize)); PlgDBfree(m); // Free the old block - } else if (!(mp.Memp = realloc(mp.Memp, newsize))) { - mp = m; // Possible only if newsize > Size - return NULL; // Failed - } // endif's + } else { + if (!(mp.Memp = realloc(mp.Memp, newsize))) { + mp = m; // Possible only if newsize > Size + return NULL; // Failed + } else if (trace(8)) + htrc("PlgDBrealloc: realloc(%ld) at %p\n", newsize, mp.Memp); + + } // endif's mp.Size = newsize; } else if (!mp.Sub || newsize > mp.Size) { @@ -1352,7 +1339,7 @@ void *PlgDBrealloc(PGLOBAL g, void *area, MBLOCK& mp, size_t newsize) } // endif's - if (trace) + if (trace(8)) htrc(" newsize=%d newp=%p sub=%d\n", mp.Size, mp.Memp, mp.Sub); return mp.Memp; @@ -1363,16 +1350,20 @@ void *PlgDBrealloc(PGLOBAL g, void *area, MBLOCK& mp, size_t newsize) /***********************************************************************/ void PlgDBfree(MBLOCK& mp) { - if (trace > 1) - htrc("PlgDBfree: %p sub=%d size=%d\n", mp.Memp, mp.Sub, mp.Size); - - if (!mp.Sub && mp.Memp) + if (!mp.Sub && mp.Memp) { + const char*v = "free"; #if defined(__WIN__) - if (mp.Size >= BIGMEM) - VirtualFree(mp.Memp, 0, MEM_RELEASE); - else + if (mp.Size >= BIGMEM) { + v = "VirtualFree"; + VirtualFree(mp.Memp, 0, MEM_RELEASE); + } else #endif - free(mp.Memp); + free(mp.Memp); + + if (trace(8)) + htrc("PlgDBfree: %s(%p) size=%d\n", v, mp.Memp, mp.Size); + + } // endif mp // Do not reset Next to avoid cutting the Mblock chain mp.Memp = NULL; @@ -1384,7 +1375,7 @@ void PlgDBfree(MBLOCK& mp) /* Program for sub-allocating one item in a storage area. */ /* Note: This function is equivalent to PlugSubAlloc except that in */ /* case of insufficient memory, it returns NULL instead of doing a */ -/* long jump. The caller must test the return value for error. */ +/* throw. The caller must test the return value for error. */ /***********************************************************************/ void *PlgDBSubAlloc(PGLOBAL g, void *memp, size_t size) { @@ -1400,7 +1391,7 @@ void *PlgDBSubAlloc(PGLOBAL g, void *memp, size_t size) size = ((size + 7) / 8) * 8; /* Round up size to multiple of 8 */ pph = (PPOOLHEADER)memp; - if (trace > 1) + if (trace(16)) htrc("PlgDBSubAlloc: memp=%p size=%d used=%d free=%d\n", memp, size, pph->To_Free, pph->FreeBlk); @@ -1409,7 +1400,7 @@ void *PlgDBSubAlloc(PGLOBAL g, void *memp, size_t size) "Not enough memory in Work area for request of %d (used=%d free=%d)", (int) size, pph->To_Free, pph->FreeBlk); - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return NULL; @@ -1422,7 +1413,7 @@ void *PlgDBSubAlloc(PGLOBAL g, void *memp, size_t size) pph->To_Free += size; // New offset of pool free block pph->FreeBlk -= size; // New size of pool free block - if (trace > 1) + if (trace(16)) htrc("Done memp=%p used=%d free=%d\n", memp, pph->To_Free, pph->FreeBlk); @@ -1453,7 +1444,7 @@ void PlugPutOut(PGLOBAL g, FILE *f, short t, void *v, uint n) { char m[64]; - if (trace) + if (trace(1)) htrc("PUTOUT: f=%p t=%d v=%p n=%d\n", f, t, v, n); if (!v) diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp index e9ba1682e69..0e6caa953b1 100644 --- a/storage/connect/plugutil.cpp +++ b/storage/connect/plugutil.cpp @@ -136,7 +136,7 @@ PGLOBAL PlugInit(LPCSTR Language, uint worksize) { PGLOBAL g; - if (trace > 1) + if (trace(2)) htrc("PlugInit: Language='%s'\n", ((!Language) ? "Null" : (char*)Language)); @@ -205,7 +205,7 @@ LPSTR PlugRemoveType(LPSTR pBuff, LPCSTR FileName) _splitpath(FileName, drive, direc, fname, ftype); - if (trace > 1) { + if (trace(2)) { htrc("after _splitpath: FileName=%s\n", FileName); htrc("drive=%s dir=%s fname=%s ext=%s\n", SVP(drive), direc, fname, ftype); @@ -213,7 +213,7 @@ LPSTR PlugRemoveType(LPSTR pBuff, LPCSTR FileName) _makepath(pBuff, drive, direc, fname, ""); - if (trace > 1) + if (trace(2)) htrc("buff='%s'\n", pBuff); return pBuff; @@ -246,7 +246,7 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) char *drive = NULL, *defdrv = NULL; #endif - if (trace > 1) + if (trace(2)) htrc("prefix=%s fn=%s path=%s\n", prefix, FileName, defpath); if (!strncmp(FileName, "//", 2) || !strncmp(FileName, "\\\\", 2)) { @@ -263,7 +263,7 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) #if !defined(__WIN__) if (*FileName == '~') { if (_fullpath(pBuff, FileName, _MAX_PATH)) { - if (trace > 1) + if (trace(2)) htrc("pbuff='%s'\n", pBuff); return pBuff; @@ -298,7 +298,7 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) _splitpath(tmpdir, defdrv, defdir, NULL, NULL); - if (trace > 1) { + if (trace(2)) { htrc("after _splitpath: FileName=%s\n", FileName); #if defined(__WIN__) htrc("drive=%s dir=%s fname=%s ext=%s\n", drive, direc, fname, ftype); @@ -325,11 +325,11 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) _makepath(newname, drive, direc, fname, ftype); - if (trace > 1) + if (trace(2)) htrc("newname='%s'\n", newname); if (_fullpath(pBuff, newname, _MAX_PATH)) { - if (trace > 1) + if (trace(2)) htrc("pbuff='%s'\n", pBuff); return pBuff; @@ -470,7 +470,7 @@ bool AllocSarea(PGLOBAL g, uint size) #if defined(DEVELOPMENT) if (true) { #else - if (trace) { + if (trace(8)) { #endif if (g->Sarea) htrc("Work area of %u allocated at %p\n", size, g->Sarea); @@ -498,7 +498,7 @@ void FreeSarea(PGLOBAL g) #if defined(DEVELOPMENT) if (true) #else - if (trace) + if (trace(8)) #endif htrc("Freeing Sarea at %p size = %d\n", g->Sarea, g->Sarea_Size); @@ -545,7 +545,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) size = ((size + 7) / 8) * 8; /* Round up size to multiple of 8 */ pph = (PPOOLHEADER)memp; - if (trace > 3) + if (trace(16)) htrc("SubAlloc in %p size=%d used=%d free=%d\n", memp, size, pph->To_Free, pph->FreeBlk); @@ -556,7 +556,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) "Not enough memory in %s area for request of %u (used=%d free=%d)", pname, (uint)size, pph->To_Free, pph->FreeBlk); - if (trace) + if (trace(1)) htrc("PlugSubAlloc: %s\n", g->Message); throw 1234; @@ -569,7 +569,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) pph->To_Free += (OFFSET)size; /* New offset of pool free block */ pph->FreeBlk -= (uint)size; /* New size of pool free block */ - if (trace > 3) + if (trace(16)) htrc("Done memp=%p used=%d free=%d\n", memp, pph->To_Free, pph->FreeBlk); diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index 031fdebe650..072bd25c5a7 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -450,7 +450,7 @@ int TABDEF::GetColCatInfo(PGLOBAL g) } // endswitch tc // lrecl must be at least recln to avoid buffer overflow - if (trace) + if (trace(1)) htrc("Lrecl: Calculated=%d defined=%d\n", recln, Hc->GetIntegerOption("Lrecl")); diff --git a/storage/connect/tabcol.cpp b/storage/connect/tabcol.cpp index 5065d86ce6a..93de0598fe8 100644 --- a/storage/connect/tabcol.cpp +++ b/storage/connect/tabcol.cpp @@ -33,7 +33,7 @@ XTAB::XTAB(LPCSTR name, LPCSTR srcdef) : Name(name) Schema = NULL; Qualifier = NULL; - if (trace) + if (trace(1)) htrc("XTAB: making new TABLE %s %s\n", Name, Srcdef); } // end of XTAB constructor @@ -49,7 +49,7 @@ XTAB::XTAB(PTABLE tp) : Name(tp->Name) Schema = tp->Schema; Qualifier = tp->Qualifier; - if (trace) + if (trace(1)) htrc(" making copy TABLE %s %s\n", Name, SVP(Srcdef)); } // end of XTAB constructor @@ -61,7 +61,7 @@ PTABLE XTAB::Link(PTABLE tab2) { PTABLE tabp; - if (trace) + if (trace(1)) htrc("Linking tables %s... to %s\n", Name, tab2->Name); for (tabp = this; tabp->Next; tabp = tabp->Next) ; @@ -117,7 +117,7 @@ COLUMN::COLUMN(LPCSTR name) : Name(name) To_Col = NULL; Qualifier = NULL; - if (trace) + if (trace(1)) htrc(" making new COLUMN %s\n", Name); } // end of COLUMN constructor diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index 5b9667a6c84..6ead8e40cd9 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -704,7 +704,7 @@ int TDBDOS::MakeBlockValues(PGLOBAL g) // savmin = cdp->GetBmap(); // cdp->SetBmap(PlugSubAlloc(g, NULL, block * sizeof(int))); - if (trace) + if (trace(1)) htrc("Dval(%p) Bmap(%p) col(%d) %s Block=%d lg=%d\n", cdp->GetDval(), cdp->GetBmap(), i, cdp->GetName(), block, lg); @@ -729,7 +729,7 @@ int TDBDOS::MakeBlockValues(PGLOBAL g) memset(cdp->GetMax(), 0, block * lg); } // endif Type - if (trace) + if (trace(1)) htrc("min(%p) max(%p) col(%d) %s Block=%d lg=%d\n", cdp->GetMin(), cdp->GetMax(), i, cdp->GetName(), block, lg); @@ -901,7 +901,7 @@ bool TDBDOS::SaveBlockValues(PGLOBAL g) "wb", (int)errno, filename); strcat(strcat(g->Message, ": "), strerror(errno)); - if (trace) + if (trace(1)) htrc("%s\n", g->Message); return true; @@ -1634,7 +1634,7 @@ int TDBDOS::TestBlock(PGLOBAL g) To_Filter = NULL; // So remove filter } // endswitch Beval - if (trace) + if (trace(1)) htrc("BF Eval Beval=%d\n", Beval); } // endif To_BlkFil @@ -1779,7 +1779,7 @@ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) return RC_INFO; // Error or Physical table does not exist } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); rc = RC_FX; } catch (const char *msg) { @@ -1902,7 +1902,7 @@ bool TDBDOS::InitialyzeIndex(PGLOBAL g, volatile PIXDEF xdp, bool sorted) } // endif brc } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); brc = true; } catch (const char *msg) { @@ -2001,7 +2001,7 @@ int TDBDOS::Cardinality(PGLOBAL g) if (len >= 0) { int rec; - if (trace) + if (trace(1)) htrc("Estimating lines len=%d ending=%d/n", len, ((PDOSDEF)To_Def)->Ending); @@ -2018,7 +2018,7 @@ int TDBDOS::Cardinality(PGLOBAL g) Cardinal = (len + rec - 1) / rec; - if (trace) + if (trace(1)) htrc("avglen=%d MaxSize%d\n", rec, Cardinal); } // endif len @@ -2048,7 +2048,7 @@ int TDBDOS::GetMaxSize(PGLOBAL g) if (len >= 0) { int rec; - if (trace) + if (trace(1)) htrc("Estimating lines len=%d ending=%d/n", len, ((PDOSDEF)To_Def)->Ending); @@ -2059,7 +2059,7 @@ int TDBDOS::GetMaxSize(PGLOBAL g) rec = EstimatedLength() + ((PDOSDEF)To_Def)->Ending; MaxSize = (len + rec - 1) / rec; - if (trace) + if (trace(1)) htrc("avglen=%d MaxSize%d\n", rec, MaxSize); } // endif len @@ -2108,7 +2108,7 @@ bool TDBDOS::IsUsingTemp(PGLOBAL) /***********************************************************************/ bool TDBDOS::OpenDB(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("DOS OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n", this, Tdb_No, Use, Mode); @@ -2184,7 +2184,7 @@ bool TDBDOS::OpenDB(PGLOBAL g) } else memset(To_Line, 0, linelen); - if (trace) + if (trace(1)) htrc("OpenDos: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line); if (SkipHeader(g)) // When called from CSV/FMT files @@ -2202,7 +2202,7 @@ bool TDBDOS::OpenDB(PGLOBAL g) /***********************************************************************/ int TDBDOS::ReadDB(PGLOBAL g) { - if (trace > 1) + if (trace(2)) htrc("DOS ReadDB: R%d Mode=%d key=%p link=%p Kindex=%p To_Line=%p\n", GetTdb_No(), Mode, To_Key_Col, To_Link, To_Kindex, To_Line); @@ -2227,7 +2227,7 @@ int TDBDOS::ReadDB(PGLOBAL g) if (SetRecpos(g, recpos)) return RC_FX; - if (trace > 1) + if (trace(2)) htrc("File position is now %d\n", GetRecpos()); if (Mode == MODE_READ) @@ -2243,7 +2243,7 @@ int TDBDOS::ReadDB(PGLOBAL g) } // endif To_Kindex - if (trace > 1) + if (trace(2)) htrc(" ReadDB: this=%p To_Line=%p\n", this, To_Line); /*********************************************************************/ @@ -2279,14 +2279,14 @@ bool TDBDOS::PrepareWriting(PGLOBAL) /***********************************************************************/ int TDBDOS::WriteDB(PGLOBAL g) { - if (trace > 1) + if (trace(2)) htrc("DOS WriteDB: R%d Mode=%d \n", Tdb_No, Mode); // Make the line to write if (PrepareWriting(g)) return RC_FX; - if (trace > 1) + if (trace(2)) htrc("Write: line is='%s'\n", To_Line); // Now start the writing process @@ -2403,7 +2403,7 @@ DOSCOL::DOSCOL(PGLOBAL g, PCOLDEF cdp, PTDB tp, PCOL cp, int i, PCSZ am) Dcm = (*p) ? atoi(p) : GetScale(); } // endif fmt - if (trace) + if (trace(1)) htrc(" making new %sCOL C%d %s at %p\n", am, Index, Name, this); } // end of DOSCOL constructor @@ -2518,7 +2518,7 @@ void DOSCOL::ReadColumn(PGLOBAL g) double dval; PTDBDOS tdbp = (PTDBDOS)To_Tdb; - if (trace > 1) + if (trace(2)) htrc( "DOS ReadColumn: col %s R%d coluse=%.4X status=%.4X buf_type=%d\n", Name, tdbp->GetTdb_No(), ColUse, Status, Buf_Type); @@ -2607,13 +2607,13 @@ void DOSCOL::WriteColumn(PGLOBAL g) int i, k, len, field; PTDBDOS tdbp = (PTDBDOS)To_Tdb; - if (trace > 1) + if (trace(2)) htrc("DOS WriteColumn: col %s R%d coluse=%.4X status=%.4X\n", Name, tdbp->GetTdb_No(), ColUse, Status); p = tdbp->To_Line + Deplac; - if (trace > 1) + if (trace(2)) htrc("Lrecl=%d deplac=%d int=%d\n", tdbp->Lrecl, Deplac, Long); field = Long; @@ -2630,7 +2630,7 @@ void DOSCOL::WriteColumn(PGLOBAL g) } // endif Ftype - if (trace > 1) + if (trace(2)) htrc("Long=%d field=%d coltype=%d colval=%p\n", Long, field, Buf_Type, Value); @@ -2703,7 +2703,7 @@ void DOSCOL::WriteColumn(PGLOBAL g) } else // Standard CONNECT format p2 = Value->ShowValue(Buf, field); - if (trace) + if (trace(1)) htrc("new length(%p)=%d\n", p2, strlen(p2)); if ((len = strlen(p2)) > field) { @@ -2714,7 +2714,7 @@ void DOSCOL::WriteColumn(PGLOBAL g) if (p2[i] == '.') p2[i] = Dsp; - if (trace > 1) + if (trace(2)) htrc("buffer=%s\n", p2); /*******************************************************************/ @@ -2724,7 +2724,7 @@ void DOSCOL::WriteColumn(PGLOBAL g) memset(p, ' ', field); memcpy(p, p2, len); - if (trace > 1) + if (trace(2)) htrc(" col write: '%.*s'\n", len, p); } // endif Use diff --git a/storage/connect/tabext.cpp b/storage/connect/tabext.cpp index a75b373b564..64d401bef15 100644 --- a/storage/connect/tabext.cpp +++ b/storage/connect/tabext.cpp @@ -433,7 +433,7 @@ bool TDBEXT::MakeSQL(PGLOBAL g, bool cnt) } else Query->Resize(len); - if (trace) + if (trace(33)) htrc("Query=%s\n", Query->GetStr()); return false; @@ -527,7 +527,7 @@ bool TDBEXT::MakeCommand(PGLOBAL g) return true; } // endif p - if (trace) + if (trace(33)) htrc("Command=%s\n", stmt); Query = new(g)STRING(g, 0, stmt); @@ -585,7 +585,7 @@ EXTCOL::EXTCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) tdbp->SetColumns(this); } // endif cprec - if (trace) + if (trace(1)) htrc(" making new %sCOL C%d %s at %p\n", am, Index, Name, this); // Set additional remote access method information for column. diff --git a/storage/connect/tabfix.cpp b/storage/connect/tabfix.cpp index a78d5861e53..1969fd4465f 100644 --- a/storage/connect/tabfix.cpp +++ b/storage/connect/tabfix.cpp @@ -291,7 +291,7 @@ bool TDBFIX::IsUsingTemp(PGLOBAL) /***********************************************************************/ bool TDBFIX::OpenDB(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("FIX OpenDB: tdbp=%p tdb=R%d use=%d key=%p mode=%d Ftype=%d\n", this, Tdb_No, Use, To_Key_Col, Mode, Ftype); @@ -345,7 +345,7 @@ bool TDBFIX::OpenDB(PGLOBAL g) /*********************************************************************/ To_BlkFil = InitBlockFilter(g, To_Filter); - if (trace) + if (trace(1)) htrc("OpenFix: R%hd mode=%d BlkFil=%p\n", Tdb_No, Mode, To_BlkFil); /*********************************************************************/ @@ -474,7 +474,7 @@ void BINCOL::ReadColumn(PGLOBAL g) int rc; PTDBFIX tdbp = (PTDBFIX)To_Tdb; - if (trace > 1) + if (trace(2)) htrc("BIN ReadColumn: col %s R%d coluse=%.4X status=%.4X buf_type=%d\n", Name, tdbp->GetTdb_No(), ColUse, Status, Buf_Type); @@ -565,7 +565,7 @@ void BINCOL::WriteColumn(PGLOBAL g) longlong n; PTDBFIX tdbp = (PTDBFIX)To_Tdb; - if (trace) { + if (trace(1)) { htrc("BIN WriteColumn: col %s R%d coluse=%.4X status=%.4X", Name, tdbp->GetTdb_No(), ColUse, Status); htrc(" Lrecl=%d\n", tdbp->Lrecl); diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index 516601a5eb4..66339a49de2 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -185,7 +185,7 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) mxr = MY_MAX(0, tdp->Maxerr); - if (trace) + if (trace(1)) htrc("File %s Sep=%c Qot=%c Header=%d maxerr=%d\n", SVP(tdp->Fn), tdp->Sep, tdp->Qot, tdp->Header, tdp->Maxerr); @@ -379,7 +379,7 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) skip: ; // Skip erroneous line } // endfor num_read - if (trace) { + if (trace(1)) { htrc("imax=%d Lengths:", imax); for (i = 0; i < imax; i++) @@ -391,7 +391,7 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) tdbp->CloseDB(g); skipit: - if (trace) + if (trace(1)) htrc("CSVColumns: imax=%d hmax=%d len=%d\n", imax, hmax, length[0]); @@ -701,7 +701,7 @@ int TDBCSV::EstimatedLength(void) int n = 0; PCOLDEF cdp; - if (trace) + if (trace(1)) htrc("EstimatedLength: Fields=%d Columns=%p\n", Fields, Columns); for (cdp = To_Def->GetCols(); cdp; cdp = cdp->GetNext()) @@ -906,7 +906,7 @@ int TDBCSV::ReadBuffer(PGLOBAL g) int i, n, len, rc = Txfp->ReadBuffer(g); bool bad = false; - if (trace > 1) + if (trace(2)) htrc("CSV: Row is '%s' rc=%d\n", To_Line, rc); if (rc != RC_OK || !Fields) @@ -1024,7 +1024,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g) char sep[2], qot[2]; int i, nlen, oldlen = strlen(To_Line); - if (trace > 1) + if (trace(2)) htrc("CSV WriteDB: R%d Mode=%d key=%p link=%p\n", Tdb_No, Mode, To_Key_Col, To_Link); @@ -1090,7 +1090,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g) To_Line[nlen] = '\0'; } // endif - if (trace > 1) + if (trace(2)) htrc("Write: line is=%s", To_Line); return false; @@ -1118,7 +1118,7 @@ int TDBCSV::CheckWrite(PGLOBAL g) { int maxlen, n, nlen = (Fields - 1); - if (trace > 1) + if (trace(2)) htrc("CheckWrite: R%d Mode=%d\n", Tdb_No, Mode); // Before writing the line we must check its length @@ -1290,7 +1290,7 @@ int TDBFMT::ReadBuffer(PGLOBAL g) else ++Linenum; - if (trace > 1) + if (trace(2)) htrc("FMT: Row %d is '%s' rc=%d\n", Linenum, To_Line, rc); // Find the offsets and lengths of the columns for this row @@ -1445,7 +1445,7 @@ void CSVCOL::ReadColumn(PGLOBAL g) Deplac = tdbp->Offset[Fldnum]; // Field offset Long = tdbp->Fldlen[Fldnum]; // Field length - if (trace > 1) + if (trace(2)) htrc("CSV ReadColumn %s Fldnum=%d offset=%d fldlen=%d\n", Name, Fldnum, Deplac, Long); @@ -1489,13 +1489,13 @@ void CSVCOL::WriteColumn(PGLOBAL g) int flen; PTDBCSV tdbp = (PTDBCSV)To_Tdb; - if (trace > 1) + if (trace(2)) htrc("CSV WriteColumn: col %s R%d coluse=%.4X status=%.4X\n", Name, tdbp->GetTdb_No(), ColUse, Status); flen = GetLength(); - if (trace > 1) + if (trace(2)) htrc("Lrecl=%d Long=%d field=%d coltype=%d colval=%p\n", tdbp->Lrecl, Long, flen, Buf_Type, Value); @@ -1510,7 +1510,7 @@ void CSVCOL::WriteColumn(PGLOBAL g) /*********************************************************************/ p = Value->ShowValue(buf); - if (trace > 1) + if (trace(2)) htrc("new length(%p)=%d\n", p, strlen(p)); if ((signed)strlen(p) > flen) { @@ -1522,7 +1522,7 @@ void CSVCOL::WriteColumn(PGLOBAL g) if (p[i] == '.') p[i] = Dsp; - if (trace > 1) + if (trace(2)) htrc("buffer=%s\n", p); /*********************************************************************/ @@ -1536,7 +1536,7 @@ void CSVCOL::WriteColumn(PGLOBAL g) } else strncpy(tdbp->Field[Fldnum], p, flen); - if (trace > 1) + if (trace(2)) htrc(" col written: '%s'\n", p); } // end of WriteColumn diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp index b6a1487955b..c0fda584381 100644 --- a/storage/connect/tabjdbc.cpp +++ b/storage/connect/tabjdbc.cpp @@ -153,7 +153,7 @@ int JDBCDEF::ParseURL(PGLOBAL g, char *url, bool b) // Tabname = GetStringCatInfo(g, "Tabname", Tabname); } // endif - if (trace) + if (trace(1)) htrc("server: %s Tabname: %s", url, Tabname); // Now make the required URL @@ -470,7 +470,7 @@ bool TDBJDBC::MakeInsert(PGLOBAL g) else Prepared = true; - if (trace) + if (trace(33)) htrc("Insert=%s\n", Query->GetStr()); return false; @@ -553,7 +553,7 @@ bool TDBJDBC::OpenDB(PGLOBAL g) { bool rc = true; - if (trace) + if (trace(1)) htrc("JDBC OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n", this, Tdb_No, Use, Mode); @@ -767,7 +767,7 @@ bool TDBJDBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) Mode = MODE_READ; } // endif's op - if (trace) + if (trace(33)) htrc("JDBC ReadKey: Query=%s\n", Query->GetStr()); rc = Jcp->ExecuteQuery((char*)Query->GetStr()); @@ -783,7 +783,7 @@ int TDBJDBC::ReadDB(PGLOBAL g) { int rc; - if (trace > 1) + if (trace(2)) htrc("JDBC ReadDB: R%d Mode=%d\n", GetTdb_No(), Mode); if (Mode == MODE_UPDATE || Mode == MODE_DELETE) { @@ -836,7 +836,7 @@ int TDBJDBC::ReadDB(PGLOBAL g) } // endif placed - if (trace > 1) + if (trace(2)) htrc(" Read: Rbuf=%d rc=%d\n", Rbuf, rc); return rc; @@ -897,7 +897,7 @@ int TDBJDBC::WriteDB(PGLOBAL g) Query->RepLast(')'); - if (trace > 1) + if (trace(2)) htrc("Inserting: %s\n", Query->GetStr()); rc = Jcp->ExecuteUpdate(Query->GetStr()); @@ -925,7 +925,7 @@ int TDBJDBC::DeleteDB(PGLOBAL g, int irc) AftRows = Jcp->m_Aff; sprintf(g->Message, "%s: %d affected rows", TableName, AftRows); - if (trace) + if (trace(1)) htrc("%s\n", g->Message); PushWarning(g, this, 0); // 0 means a Note @@ -946,14 +946,14 @@ void TDBJDBC::CloseDB(PGLOBAL g) if (Jcp) Jcp->Close(); - if (trace) + if (trace(1)) htrc("JDBC CloseDB: closing %s\n", Name); if (!Werr && (Mode == MODE_INSERT || Mode == MODE_UPDATE || Mode == MODE_DELETE)) { sprintf(g->Message, "%s: %d affected rows", TableName, AftRows); - if (trace) + if (trace(1)) htrc("%s\n", g->Message); PushWarning(g, this, 0); // 0 means a Note @@ -1117,7 +1117,7 @@ bool TDBXJDC::OpenDB(PGLOBAL g) { bool rc = false; - if (trace) + if (trace(1)) htrc("JDBC OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n", this, Tdb_No, Use, Mode); diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 8778b7d4b47..45c5ef3afc9 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -142,7 +142,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) return NULL; } // endif Fn - if (trace) + if (trace(1)) htrc("File %s objname=%s pretty=%d lvl=%d\n", tdp->Fn, tdp->Objname, tdp->Pretty, lvl); @@ -422,7 +422,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) tjnp->CloseDB(g); skipit: - if (trace) + if (trace(1)) htrc("JSONColumns: n=%d len=%d\n", n, length[0]); /*********************************************************************/ @@ -1591,14 +1591,14 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n) vp->Reset(); ars = MY_MIN(Tjp->Limit, arp->size()); - if (trace) + if (trace(1)) htrc("CalculateArray: size=%d op=%d nextsame=%d\n", ars, op, nextsame); for (i = 0; i < ars; i++) { jvrp = arp->GetValue(i); - if (trace) + if (trace(1)) htrc("i=%d nv=%d\n", i, nv); if (!jvrp->IsNull() || (op == OP_CNC && GetJsonNull())) do { @@ -1612,7 +1612,7 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n) } else jvp = jvrp; - if (trace) + if (trace(1)) htrc("jvp=%s null=%d\n", jvp->GetString(g), jvp->IsNull() ? 1 : 0); @@ -1648,7 +1648,7 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n) if (err) vp->Reset(); - if (trace) { + if (trace(1)) { char buf(32); htrc("vp='%s' err=%d\n", diff --git a/storage/connect/table.cpp b/storage/connect/table.cpp index 61aefc93082..d4d3a34d67e 100644 --- a/storage/connect/table.cpp +++ b/storage/connect/table.cpp @@ -128,7 +128,7 @@ PCOL TDB::ColDB(PGLOBAL g, PSZ name, int num) PCOLDEF cdp; PCOL cp, colp = NULL, cprec = NULL; - if (trace) + if (trace(1)) htrc("ColDB: am=%d colname=%s tabname=%s num=%d\n", GetAmType(), SVP(name), Name, num); @@ -146,7 +146,7 @@ PCOL TDB::ColDB(PGLOBAL g, PSZ name, int num) else if (cp->GetIndex() < i) cprec = cp; - if (trace) + if (trace(1)) htrc("cdp(%d).Name=%s cp=%p\n", i, cdp->GetName(), cp); /*****************************************************************/ @@ -159,7 +159,7 @@ PCOL TDB::ColDB(PGLOBAL g, PSZ name, int num) else if (Mode != MODE_INSERT) colp = InsertSpcBlk(g, cdp); - if (trace) + if (trace(1)) htrc("colp=%p\n", colp); if (name || num) @@ -256,7 +256,7 @@ PCOL TDB::InsertSpcBlk(PGLOBAL g, PCOLDEF cdp) /***********************************************************************/ void TDB::MarkDB(PGLOBAL, PTDB tdb2) { - if (trace) + if (trace(1)) htrc("DOS MarkDB: tdbp=%p tdb2=%p\n", this, tdb2); } // end of MarkDB @@ -416,7 +416,7 @@ PCOL TDBASE::ColDB(PGLOBAL g, PSZ name, int num) PCOLDEF cdp; PCOL cp, colp = NULL, cprec = NULL; - if (trace) + if (trace(1)) htrc("ColDB: am=%d colname=%s tabname=%s num=%d\n", GetAmType(), SVP(name), Name, num); @@ -434,7 +434,7 @@ PCOL TDBASE::ColDB(PGLOBAL g, PSZ name, int num) else if (cp->GetIndex() < i) cprec = cp; - if (trace) + if (trace(1)) htrc("cdp(%d).Name=%s cp=%p\n", i, cdp->GetName(), cp); /*****************************************************************/ @@ -447,7 +447,7 @@ PCOL TDBASE::ColDB(PGLOBAL g, PSZ name, int num) else if (Mode != MODE_INSERT) colp = InsertSpcBlk(g, cdp); - if (trace) + if (trace(1)) htrc("colp=%p\n", colp); if (name || num) @@ -592,7 +592,7 @@ void TDBASE::PrintAM(FILE *f, char *m) /***********************************************************************/ void TDBASE::MarkDB(PGLOBAL, PTDB tdb2) { - if (trace) + if (trace(1)) htrc("DOS MarkDB: tdbp=%p tdb2=%p\n", this, tdb2); } // end of MarkDB diff --git a/storage/connect/tabmul.cpp b/storage/connect/tabmul.cpp index 5c41f9094ac..1afd21db452 100644 --- a/storage/connect/tabmul.cpp +++ b/storage/connect/tabmul.cpp @@ -134,7 +134,7 @@ bool TDBMUL::InitFileNames(PGLOBAL g) PSZ filename; int rc, n = 0; - if (trace) + if (trace(1)) htrc("in InitFileName: fn[]=%d\n", FNSZ); filename = (char*)PlugSubAlloc(g, NULL, FNSZ); @@ -144,7 +144,7 @@ bool TDBMUL::InitFileNames(PGLOBAL g) PlugSetPath(filename, Tdbp->GetFile(g), Tdbp->GetPath()); - if (trace) + if (trace(1)) htrc("InitFileName: fn='%s'\n", filename); if (Mul != 2) { @@ -159,7 +159,7 @@ bool TDBMUL::InitFileNames(PGLOBAL g) if (dirp->OpenDB(g)) return true; - if (trace && Mul == 3) { + if (trace(1) && Mul == 3) { int nf = ((PTDBSDR)dirp)->FindInDir(g); htrc("Number of files = %d\n", nf); } // endif trace @@ -319,7 +319,7 @@ int TDBMUL::GetMaxSize(PGLOBAL g) int i; int mxsz; - if (trace) + if (trace(1)) htrc("TDBMUL::GetMaxSize: Filenames=%p\n", Filenames); if (!Filenames && InitFileNames(g)) @@ -375,7 +375,7 @@ int TDBMUL::RowNumber(PGLOBAL g, bool b) /***********************************************************************/ bool TDBMUL::OpenDB(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("MUL OpenDB: tdbp=%p tdb=R%d use=%d key=%p mode=%d\n", this, Tdb_No, Use, To_Key_Col, Mode); @@ -546,7 +546,7 @@ bool TDBMSD::InitFileNames(PGLOBAL g) PSZ filename; int rc, n = 0; - if (trace) + if (trace(1)) htrc("in InitFileName: fn[]=%d\n", FNSZ); filename = (char*)PlugSubAlloc(g, NULL, FNSZ); @@ -556,7 +556,7 @@ bool TDBMSD::InitFileNames(PGLOBAL g) PlugSetPath(filename, Tdbp->GetFile(g), Tdbp->GetPath()); - if (trace) + if (trace(1)) htrc("InitFileName: fn='%s'\n", filename); dirp = new(g) TDBSDR(filename); @@ -787,7 +787,7 @@ int TDBDIR::GetMaxSize(PGLOBAL g) /***********************************************************************/ bool TDBDIR::OpenDB(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("DIR OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n", this, Tdb_No, Use, Mode); @@ -985,7 +985,7 @@ void DIRCOL::SetTimeValue(PGLOBAL g, FILETIME& ftime) /***********************************************************************/ void DIRCOL::ReadColumn(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("DIR ReadColumn: col %s R%d use=%.4X status=%.4X type=%d N=%d\n", Name, Tdbp->GetTdb_No(), ColUse, Status, Buf_Type, N); @@ -1452,7 +1452,7 @@ int TDBDHR::GetMaxSize(PGLOBAL g) /***********************************************************************/ bool TDBDHR::OpenDB(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("DHR OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n", this, Tdb_No, Use, Mode); @@ -1589,7 +1589,7 @@ void DHRCOL::ReadColumn(PGLOBAL g) int rc; PTDBDHR tdbp = (PTDBDHR)To_Tdb; - if (trace) + if (trace(1)) htrc("DHR ReadColumn: col %s R%d use=%.4X status=%.4X type=%d N=%d\n", Name, tdbp->GetTdb_No(), ColUse, Status, Buf_Type, N); diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp index d1e2ae69608..a80abcdd19f 100644 --- a/storage/connect/tabmysql.cpp +++ b/storage/connect/tabmysql.cpp @@ -203,7 +203,7 @@ bool MYSQLDEF::ParseURL(PGLOBAL g, char *url, bool b) // Otherwise, straight server name, Tabname = (b) ? GetStringCatInfo(g, "Tabname", Name) : NULL; - if (trace) + if (trace(1)) htrc("server: %s TableName: %s", url, Tabname); Server = url; @@ -567,7 +567,7 @@ bool TDBMYSQL::MakeSelect(PGLOBAL g, bool mx) return true; } // endif Query - if (trace) + if (trace(33)) htrc("Query=%s\n", Query->GetStr()); return false; @@ -1042,7 +1042,7 @@ int TDBMYSQL::SendCommand(PGLOBAL g) sprintf(g->Message, "%s: %d affected rows", TableName, AftRows); PushWarning(g, this, 0); // 0 means a Note - if (trace) + if (trace(1)) htrc("%s\n", g->Message); if (w && Myc.ExecSQL(g, "SHOW WARNINGS") == RC_OK) { @@ -1109,7 +1109,7 @@ bool TDBMYSQL::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) Mode = MODE_READ; } // endif's op - if (trace) + if (trace(33)) htrc("MYSQL ReadKey: Query=%s\n", Query->GetStr()); m_Rc = Myc.ExecSQL(g, Query->GetStr()); @@ -1124,7 +1124,7 @@ int TDBMYSQL::ReadDB(PGLOBAL g) { int rc; - if (trace > 1) + if (trace(2)) htrc("MySQL ReadDB: R%d Mode=%d\n", GetTdb_No(), Mode); if (Mode == MODE_UPDATE || Mode == MODE_DELETE) @@ -1137,7 +1137,7 @@ int TDBMYSQL::ReadDB(PGLOBAL g) N++; Fetched = ((rc = Myc.Fetch(g, -1)) == RC_OK); - if (trace > 1) + if (trace(2)) htrc(" Read: rc=%d\n", rc); return rc; @@ -1220,7 +1220,7 @@ void TDBMYSQL::CloseDB(PGLOBAL g) Myc.Close(); } // endif Myc - if (trace) + if (trace(1)) htrc("MySQL CloseDB: closing %s rc=%d\n", Name, m_Rc); } // end of CloseDB @@ -1248,7 +1248,7 @@ MYSQLCOL::MYSQLCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) Slen = 0; Rank = -1; // Not known yet - if (trace) + if (trace(1)) htrc(" making new %sCOL C%d %s at %p\n", am, Index, Name, this); } // end of MYSQLCOL constructor @@ -1279,7 +1279,7 @@ MYSQLCOL::MYSQLCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am) Slen = 0; Rank = i; - if (trace) + if (trace(1)) htrc(" making new %sCOL C%d %s at %p\n", am, Index, Name, this); } // end of MYSQLCOL constructor @@ -1409,7 +1409,7 @@ void MYSQLCOL::ReadColumn(PGLOBAL g) tdbp->Fetched = true; if ((buf = ((PTDBMY)To_Tdb)->Myc.GetCharField(Rank))) { - if (trace > 1) + if (trace(2)) htrc("MySQL ReadColumn: name=%s buf=%s\n", Name, buf); // TODO: have a true way to differenciate temporal values @@ -1679,7 +1679,7 @@ MYXCOL::MYXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) MYXCOL::MYXCOL(MYSQL_FIELD *fld, PTDB tdbp, int i, PCSZ am) : MYSQLCOL(fld, tdbp, i, am) { - if (trace) + if (trace(1)) htrc(" making new %sCOL C%d %s at %p\n", am, Index, Name, this); } // end of MYSQLCOL constructor diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp index 56e5e72efd6..1f89fd7af9c 100644 --- a/storage/connect/tabodbc.cpp +++ b/storage/connect/tabodbc.cpp @@ -538,7 +538,7 @@ bool TDBODBC::OpenDB(PGLOBAL g) { bool rc = true; - if (trace) + if (trace(1)) htrc("ODBC OpenDB: tdbp=%p tdb=R%d use=%dmode=%d\n", this, Tdb_No, Use, Mode); @@ -750,7 +750,7 @@ bool TDBODBC::ReadKey(PGLOBAL g, OPVAL op, const key_range *kr) Mode = MODE_READ; } // endif's op - if (trace) + if (trace(33)) htrc("ODBC ReadKey: Query=%s\n", Query->GetStr()); Rows = Ocp->ExecDirectSQL((char*)Query->GetStr(), (PODBCCOL)Columns); @@ -765,7 +765,7 @@ int TDBODBC::ReadDB(PGLOBAL g) { int rc; - if (trace > 1) + if (trace(2)) htrc("ODBC ReadDB: R%d Mode=%d\n", GetTdb_No(), Mode); if (Mode == MODE_UPDATE || Mode == MODE_DELETE) { @@ -776,7 +776,7 @@ int TDBODBC::ReadDB(PGLOBAL g) if (!Ocp->ExecSQLcommand(Query->GetStr())) { sprintf(g->Message, "%s: %d affected rows", TableName, AftRows); - if (trace) + if (trace(1)) htrc("%s\n", g->Message); PushWarning(g, this, 0); // 0 means a Note @@ -817,7 +817,7 @@ int TDBODBC::ReadDB(PGLOBAL g) } // endif Placed - if (trace > 1) + if (trace(2)) htrc(" Read: Rbuf=%d rc=%d\n", Rbuf, rc); return rc; @@ -852,7 +852,7 @@ int TDBODBC::DeleteDB(PGLOBAL g, int irc) if (!Ocp->ExecSQLcommand(Query->GetStr())) { sprintf(g->Message, "%s: %d affected rows", TableName, AftRows); - if (trace) + if (trace(1)) htrc("%s\n", g->Message); PushWarning(g, this, 0); // 0 means a Note @@ -874,7 +874,7 @@ void TDBODBC::CloseDB(PGLOBAL g) Ocp->Close(); - if (trace) + if (trace(1)) htrc("ODBC CloseDB: closing %s\n", Name); } // end of CloseDB @@ -975,7 +975,7 @@ void ODBCCOL::ReadColumn(PGLOBAL g) } // endif Buf_Type - if (trace > 1) { + if (trace(2)) { char buf[64]; htrc("ODBC Column %s: rows=%d buf=%p type=%d value=%s\n", @@ -1214,7 +1214,7 @@ bool TDBXDBC::OpenDB(PGLOBAL g) { bool rc = false; - if (trace) + if (trace(1)) htrc("ODBC OpenDB: tdbp=%p tdb=R%d use=%dmode=%d\n", this, Tdb_No, Use, Mode); diff --git a/storage/connect/tabpivot.cpp b/storage/connect/tabpivot.cpp index 76a46e6899b..da5d134f347 100644 --- a/storage/connect/tabpivot.cpp +++ b/storage/connect/tabpivot.cpp @@ -299,7 +299,7 @@ PQRYRES PIVAID::MakePivotColumns(PGLOBAL g) Qryp->Nbcol += (ndif - 2); return Qryp; } catch (int n) { - if (trace) + if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); } catch (const char *msg) { strcpy(g->Message, msg); diff --git a/storage/connect/tabsys.cpp b/storage/connect/tabsys.cpp index 7f0d9881298..f73a2b6578d 100644 --- a/storage/connect/tabsys.cpp +++ b/storage/connect/tabsys.cpp @@ -180,7 +180,7 @@ PTDB TDBINI::Clone(PTABS t) /***********************************************************************/ char *TDBINI::GetSeclist(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("GetSeclist: Seclist=%p\n", Seclist); if (!Seclist) { @@ -267,7 +267,7 @@ bool TDBINI::OpenDB(PGLOBAL g) if (!colp->IsSpecial()) // Not a pseudo column colp->AllocBuf(g); - if (trace) + if (trace(1)) htrc("INI OpenDB: seclist=%s seclen=%d ifile=%s\n", Seclist, Seclen, Ifile); @@ -287,7 +287,7 @@ int TDBINI::ReadDB(PGLOBAL) else Section += (strlen(Section) + 1); - if (trace > 1) + if (trace(2)) htrc("INI ReadDB: section=%s N=%d\n", Section, N); N++; @@ -453,7 +453,7 @@ void INICOL::ReadColumn(PGLOBAL) { PTDBINI tdbp = (PTDBINI)To_Tdb; - if (trace > 1) + if (trace(2)) htrc("INI ReadColumn: col %s R%d flag=%d\n", Name, tdbp->GetTdb_No(), Flag); @@ -493,7 +493,7 @@ void INICOL::WriteColumn(PGLOBAL g) bool rc; PTDBINI tdbp = (PTDBINI)To_Tdb; - if (trace > 1) + if (trace(2)) htrc("INI WriteColumn: col %s R%d coluse=%.4X status=%.4X\n", Name, tdbp->GetTdb_No(), ColUse, Status); @@ -823,7 +823,7 @@ void XINCOL::WriteColumn(PGLOBAL g) bool rc; PTDBXIN tdbp = (PTDBXIN)To_Tdb; - if (trace > 1) + if (trace(2)) htrc("XIN WriteColumn: col %s R%d coluse=%.4X status=%.4X\n", Name, tdbp->GetTdb_No(), ColUse, Status); diff --git a/storage/connect/tabtbl.cpp b/storage/connect/tabtbl.cpp index 05bf2e33878..53af28354e7 100644 --- a/storage/connect/tabtbl.cpp +++ b/storage/connect/tabtbl.cpp @@ -132,7 +132,7 @@ bool TBLDEF::DefineAM(PGLOBAL g, LPCSTR, int) tbl = new(g) XTAB(pn, def); tbl->SetSchema(pdb); - if (trace) + if (trace(1)) htrc("TBL: Name=%s db=%s\n", tbl->GetName(), tbl->GetSchema()); // Link the blocks @@ -436,7 +436,7 @@ int TDBTBL::RowNumber(PGLOBAL g, bool b) /***********************************************************************/ bool TDBTBL::OpenDB(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("TBL OpenDB: tdbp=%p tdb=R%d use=%d key=%p mode=%d\n", this, Tdb_No, Use, To_Key_Col, Mode); @@ -475,7 +475,7 @@ bool TDBTBL::OpenDB(PGLOBAL g) else if (((PPRXCOL)cp)->Init(g, NULL) && !Accept) return TRUE; - if (trace) + if (trace(1)) htrc("Opening subtable %s\n", Tdbp->GetName()); // Now we can safely open the table @@ -530,7 +530,7 @@ int TDBTBL::ReadDB(PGLOBAL g) else if (((PPRXCOL)cp)->Init(g, NULL) && !Accept) return RC_FX; - if (trace) + if (trace(1)) htrc("Opening subtable %s\n", Tdbp->GetName()); // Now we can safely open the table @@ -555,7 +555,7 @@ int TDBTBL::ReadDB(PGLOBAL g) /***********************************************************************/ void TBTBLK::ReadColumn(PGLOBAL) { - if (trace) + if (trace(1)) htrc("TBT ReadColumn: name=%s\n", Name); Value->SetValue_psz((char*)((PTDBTBL)To_Tdb)->Tdbp->GetName()); @@ -575,27 +575,30 @@ pthread_handler_t ThreadOpen(void *p) if (!my_thread_init()) { set_current_thd(cmp->Thd); - if (trace) + if (trace(1)) htrc("ThreadOpen: Thd=%d\n", cmp->Thd); // Try to open the connection - if (!cmp->Tap->GetTo_Tdb()->OpenDB(cmp->G)) { - pthread_mutex_lock(&tblmut); - if (trace) + pthread_mutex_lock(&tblmut); + + if (!cmp->Tap->GetTo_Tdb()->OpenDB(cmp->G)) { +// pthread_mutex_lock(&tblmut); + if (trace(1)) htrc("Table %s ready\n", cmp->Tap->GetName()); cmp->Ready = true; - pthread_mutex_unlock(&tblmut); +// pthread_mutex_unlock(&tblmut); } else { - pthread_mutex_lock(&tblmut); - if (trace) +// pthread_mutex_lock(&tblmut); + if (trace(1)) htrc("Opening %s failed\n", cmp->Tap->GetName()); cmp->Rc = RC_FX; - pthread_mutex_unlock(&tblmut); +// pthread_mutex_unlock(&tblmut); } // endif OpenDB - my_thread_end(); + pthread_mutex_unlock(&tblmut); + my_thread_end(); } else cmp->Rc = RC_FX; @@ -672,7 +675,7 @@ bool TDBTBM::OpenTables(PGLOBAL g) // Remove remote table from the local list *ptabp = tabp->Next; - if (trace) + if (trace(1)) htrc("=====> New remote table %s\n", tabp->GetName()); // Make the remote table block @@ -698,7 +701,7 @@ bool TDBTBM::OpenTables(PGLOBAL g) ptp = &tp->Next; Nrc++; // Number of remote connections } else { - if (trace) + if (trace(1)) htrc("=====> Local table %s\n", tabp->GetName()); ptabp = &tabp->Next; @@ -714,7 +717,7 @@ bool TDBTBM::OpenTables(PGLOBAL g) /***********************************************************************/ bool TDBTBM::OpenDB(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("TBM OpenDB: tdbp=%p tdb=R%d use=%d key=%p mode=%d\n", this, Tdb_No, Use, To_Key_Col, Mode); @@ -762,7 +765,7 @@ bool TDBTBM::OpenDB(PGLOBAL g) else if (((PPRXCOL)cp)->Init(g, NULL) && !Accept) return TRUE; - if (trace) + if (trace(1)) htrc("Opening subtable %s\n", Tdbp->GetName()); // Now we can safely open the table @@ -863,7 +866,7 @@ int TDBTBM::ReadNextRemote(PGLOBAL g) else if (((PPRXCOL)cp)->Init(g, NULL) && !Accept) return RC_FX; - if (trace) + if (trace(1)) htrc("Reading subtable %s\n", Tdbp->GetName()); return RC_OK; diff --git a/storage/connect/tabutil.cpp b/storage/connect/tabutil.cpp index 5d8d7c1b9f8..68b66aec31f 100644 --- a/storage/connect/tabutil.cpp +++ b/storage/connect/tabutil.cpp @@ -457,7 +457,7 @@ PTDB TDBPRX::GetSubTable(PGLOBAL g, PTABLE tabp, bool b) hc->get_table()->s->option_struct->srcdef = sp; } // endif s - if (trace && tdbp) + if (trace(1) && tdbp) htrc("Subtable %s in %s\n", name, SVP(tdbp->GetDef()->GetDB())); @@ -647,7 +647,7 @@ PRXCOL::PRXCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) Pseudo = false; Colnum = cdp->GetOffset(); // If columns are retrieved by number - if (trace) + if (trace(1)) htrc(" making new %sCOL C%d %s at %p\n", am, Index, Name, this); } // end of PRXCOL constructor @@ -732,7 +732,7 @@ void PRXCOL::Reset(void) /***********************************************************************/ void PRXCOL::ReadColumn(PGLOBAL g) { - if (trace > 1) + if (trace(2)) htrc("PRX ReadColumn: name=%s\n", Name); if (Colp) { @@ -759,7 +759,7 @@ void PRXCOL::ReadColumn(PGLOBAL g) /***********************************************************************/ void PRXCOL::WriteColumn(PGLOBAL g) { - if (trace > 1) + if (trace(2)) htrc("PRX WriteColumn: name=%s\n", Name); if (Colp) { diff --git a/storage/connect/tabvct.cpp b/storage/connect/tabvct.cpp index 533986e44da..11b344ef652 100644 --- a/storage/connect/tabvct.cpp +++ b/storage/connect/tabvct.cpp @@ -304,7 +304,7 @@ bool TDBVCT::IsUsingTemp(PGLOBAL) /***********************************************************************/ bool TDBVCT::OpenDB(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("VCT OpenDB: tdbp=%p tdb=R%d use=%d key=%p mode=%d\n", this, Tdb_No, Use, To_Key_Col, Mode); @@ -364,7 +364,7 @@ bool TDBVCT::OpenDB(PGLOBAL g) /***********************************************************************/ int TDBVCT::ReadDB(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("VCT ReadDB: R%d Mode=%d CurBlk=%d CurNum=%d key=%p link=%p Kindex=%p\n", GetTdb_No(), Mode, Txfp->CurBlk, Txfp->CurNum, To_Key_Col, To_Link, To_Kindex); @@ -546,7 +546,7 @@ void VCTCOL::ReadColumn(PGLOBAL g) assert (!To_Kcol); #endif - if (trace > 1) + if (trace(2)) htrc("VCT ReadColumn: col %s R%d coluse=%.4X status=%.4X buf_type=%d\n", Name, To_Tdb->GetTdb_No(), ColUse, Status, Buf_Type); @@ -574,7 +574,7 @@ void VCTCOL::WriteColumn(PGLOBAL) { PTXF txfp = ((PTDBVCT)To_Tdb)->Txfp;; - if (trace > 1) + if (trace(2)) htrc("VCT WriteColumn: col %s R%d coluse=%.4X status=%.4X buf_type=%d\n", Name, To_Tdb->GetTdb_No(), ColUse, Status, Buf_Type); diff --git a/storage/connect/tabwmi.cpp b/storage/connect/tabwmi.cpp index 335ffce5d7f..8a8e1bcbcb6 100644 --- a/storage/connect/tabwmi.cpp +++ b/storage/connect/tabwmi.cpp @@ -34,7 +34,7 @@ PWMIUT InitWMI(PGLOBAL g, PCSZ nsp, PCSZ classname) HRESULT res; PWMIUT wp = (PWMIUT)PlugSubAlloc(g, NULL, sizeof(WMIUTIL)); - if (trace) + if (trace(1)) htrc("WMIColumns class %s space %s\n", SVP(classname), SVP(nsp)); /*********************************************************************/ @@ -103,7 +103,7 @@ PWMIUT InitWMI(PGLOBAL g, PCSZ nsp, PCSZ classname) loc->Release(); - if (trace) + if (trace(1)) htrc("Successfully connected to namespace.\n"); /*********************************************************************/ diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp index 6402f48e090..759bb370b43 100644 --- a/storage/connect/tabxml.cpp +++ b/storage/connect/tabxml.cpp @@ -153,7 +153,7 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) lvl = (lvl < 0) ? 0 : (lvl > 16) ? 16 : lvl; } // endif fn - if (trace) + if (trace(1)) htrc("File %s lvl=%d\n", topt->filename, lvl); tdp = new(g) XMLDEF; @@ -362,7 +362,7 @@ PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info) txmp->CloseDB(g); skipit: - if (trace) + if (trace(1)) htrc("XMLColumns: n=%d len=%d\n", n, length[0]); /*********************************************************************/ @@ -686,7 +686,7 @@ PTDB TDBXML::Clone(PTABS t) /***********************************************************************/ PCOL TDBXML::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) { - if (trace) + if (trace(1)) htrc("TDBXML: MakeCol %s n=%d\n", (cdp) ? cdp->GetName() : "", n); return new(g) XMLCOL(cdp, this, cprec, n); @@ -720,7 +720,7 @@ int TDBXML::LoadTableFile(PGLOBAL g, char *filename) if (Docp) return rc; // Already done - if (trace) + if (trace(1)) htrc("TDBXML: loading %s\n", filename); /*********************************************************************/ @@ -753,7 +753,7 @@ int TDBXML::LoadTableFile(PGLOBAL g, char *filename) return RC_FX; } // endif init - if (trace) + if (trace(1)) htrc("TDBXML: parsing %s rc=%d\n", filename, rc); // Parse the XML file @@ -1182,7 +1182,7 @@ int TDBXML::ReadDB(PGLOBAL g) } // endswitch recpos } else { - if (trace) + if (trace(1)) htrc("TDBXML ReadDB: Irow=%d Nrow=%d\n", Irow, Nrow); // This is to force the table to be expanded when constructing @@ -1209,7 +1209,7 @@ int TDBXML::ReadDB(PGLOBAL g) } // endif To_Kindex if (!same) { - if (trace > 1) + if (trace(2)) htrc("TDBXML ReadDB: Irow=%d RowNode=%p\n", Irow, RowNode); // Get the new row node @@ -1472,7 +1472,7 @@ bool XMLCOL::ParseXpath(PGLOBAL g, bool mode) } else strcat(pbuf, Xname); - if (trace) + if (trace(1)) htrc("XMLCOL: pbuf=%s\n", pbuf); // For Update or Insert the Xpath must be analyzed @@ -1555,7 +1555,7 @@ bool XMLCOL::ParseXpath(PGLOBAL g, bool mode) if (Type || Nod) Tdbp->Hasnod = true; - if (trace) + if (trace(1)) htrc("XMLCOL: Xname=%s\n", pbuf); // Save the calculated Xpath @@ -1679,7 +1679,7 @@ void XMLCOL::WriteColumn(PGLOBAL g) int i, n, k = 0; PXNODE TopNode = NULL; - if (trace > 1) + if (trace(2)) htrc("XML WriteColumn: col %s R%d coluse=%.4X status=%.4X\n", Name, Tdbp->GetTdb_No(), ColUse, Status); @@ -1913,7 +1913,7 @@ void XMULCOL::WriteColumn(PGLOBAL g) int i, n, len, k = 0; PXNODE TopNode = NULL; - if (trace) + if (trace(1)) htrc("XML WriteColumn: col %s R%d coluse=%.4X status=%.4X\n", Name, Tdbp->GetTdb_No(), ColUse, Status); @@ -2129,7 +2129,7 @@ void XPOSCOL::WriteColumn(PGLOBAL g) char *p, buf[16]; int i, k, n; - if (trace) + if (trace(1)) htrc("XML WriteColumn: col %s R%d coluse=%.4X status=%.4X\n", Name, Tdbp->GetTdb_No(), ColUse, Status); diff --git a/storage/connect/user_connect.cc b/storage/connect/user_connect.cc index cb62667c0fe..9532d7c2a8d 100644 --- a/storage/connect/user_connect.cc +++ b/storage/connect/user_connect.cc @@ -178,7 +178,7 @@ bool user_connect::CheckCleanup(bool force) g->Mrr = 0; last_query_id= thdp->query_id; - if (trace && !force) + if (trace(65) && !force) printf("=====> Begin new query %llu\n", last_query_id); return true; diff --git a/storage/connect/valblk.cpp b/storage/connect/valblk.cpp index 018c7ee3fe1..73ca135691c 100644 --- a/storage/connect/valblk.cpp +++ b/storage/connect/valblk.cpp @@ -53,7 +53,7 @@ PVBLK AllocValBlock(PGLOBAL g, void *mp, int type, int nval, int len, { PVBLK blkp; - if (trace) + if (trace(1)) htrc("AVB: mp=%p type=%d nval=%d len=%d check=%u blank=%u\n", mp, type, nval, len, check, blank); diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index 7af70ea1349..90c01f72b35 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -337,7 +337,7 @@ PVAL AllocateValue(PGLOBAL g, void *value, short type, short prec) { PVAL valp; - if (trace) + if (trace(1)) htrc("AllocateConstant: value=%p type=%hd\n", value, type); switch (type) { @@ -727,7 +727,7 @@ bool TYPVAL::SetValue_char(const char *p, int n) else Tval = (TYPE)val; - if (trace > 1) { + if (trace(2)) { char buf[64]; htrc(strcat(strcat(strcpy(buf, " setting %s to: "), Fmt), "\n"), GetTypeName(Type), Tval); @@ -750,7 +750,7 @@ bool TYPVAL::SetValue_char(const char *p, int n) buf[n] = '\0'; Tval = atof(buf); - if (trace > 1) + if (trace(2)) htrc(" setting double: '%s' -> %lf\n", buf, Tval); Null = false; @@ -996,7 +996,7 @@ int TYPVAL::CompareValue(PVAL vp) // Process filtering on numeric values. TYPE n = GetTypedValue(vp); -//if (trace) +//if (trace(1)) // htrc(" Comparing: val=%d,%d\n", Tval, n); return (Tval > n) ? 1 : (Tval < n) ? (-1) : 0; @@ -1384,7 +1384,7 @@ bool TYPVAL::SetValue_char(const char *cp, int n) strncpy(Strp, cp, n); Strp[n] = '\0'; - if (trace > 1) + if (trace(2)) htrc(" Setting string to: '%s'\n", Strp); } else @@ -1631,7 +1631,7 @@ int TYPVAL::CompareValue(PVAL vp) int n; //assert(vp->GetType() == Type); - if (trace) + if (trace(1)) htrc(" Comparing: val='%s','%s'\n", Strp, vp->GetCharValue()); // Process filtering on character strings. @@ -1656,14 +1656,14 @@ bool TYPVAL::Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op) char *p[2], val[2][32]; int i; - if (trace) + if (trace(1)) htrc("Compute: np=%d op=%d\n", np, op); for (i = 0; i < np; i++) if (!vp[i]->IsNull()) { p[i] = vp[i]->GetCharString(val[i]); - if (trace) + if (trace(1)) htrc("p[%d]=%s\n", i, p[i]); } else @@ -1679,7 +1679,7 @@ bool TYPVAL::Compute(PGLOBAL g, PVAL *vp, int np, OPVAL op) if ((i = Len - (signed)strlen(Strp)) > 0) strncat(Strp, p[np - 1], i); - if (trace) + if (trace(1)) htrc("Strp=%s\n", Strp); break; @@ -1854,7 +1854,7 @@ int DECVAL::CompareValue(PVAL vp) // Process filtering on numeric values. double f = atof(Strp), n = vp->GetFloatValue(); -//if (trace) +//if (trace(1)) // htrc(" Comparing: val=%d,%d\n", f, n); return (f > n) ? 1 : (f < n) ? (-1) : 0; @@ -2410,7 +2410,7 @@ void DTVAL::SetTimeShift(void) Shift = (int)mktime(&dtm) - 86400; - if (trace) + if (trace(1)) htrc("DTVAL Shift=%d\n", Shift); } // end of SetTimeShift @@ -2485,7 +2485,7 @@ bool DTVAL::MakeTime(struct tm *ptm) int n, y = ptm->tm_year; time_t t = mktime_mysql(ptm); - if (trace > 1) + if (trace(2)) htrc("MakeTime from (%d,%d,%d,%d,%d,%d)\n", ptm->tm_year, ptm->tm_mon, ptm->tm_mday, ptm->tm_hour, ptm->tm_min, ptm->tm_sec); @@ -2508,7 +2508,7 @@ bool DTVAL::MakeTime(struct tm *ptm) } Tval= (int) t; - if (trace > 1) + if (trace(2)) htrc("MakeTime Ival=%d\n", Tval); return false; @@ -2528,14 +2528,14 @@ bool DTVAL::MakeDate(PGLOBAL g, int *val, int nval) datm.tm_mon=0; datm.tm_year=70; - if (trace > 1) + if (trace(2)) htrc("MakeDate from(%d,%d,%d,%d,%d,%d) nval=%d\n", val[0], val[1], val[2], val[3], val[4], val[5], nval); for (i = 0; i < nval; i++) { n = val[i]; -// if (trace > 1) +// if (trace(2)) // htrc("i=%d n=%d\n", i, n); switch (i) { @@ -2545,7 +2545,7 @@ bool DTVAL::MakeDate(PGLOBAL g, int *val, int nval) datm.tm_year = n; -// if (trace > 1) +// if (trace(2)) // htrc("n=%d tm_year=%d\n", n, datm.tm_year); break; @@ -2564,7 +2564,7 @@ bool DTVAL::MakeDate(PGLOBAL g, int *val, int nval) datm.tm_mon = m; datm.tm_year += n; -// if (trace > 1) +// if (trace(2)) // htrc("n=%d m=%d tm_year=%d tm_mon=%d\n", n, m, datm.tm_year, datm.tm_mon); break; @@ -2581,7 +2581,7 @@ bool DTVAL::MakeDate(PGLOBAL g, int *val, int nval) datm.tm_mday = m; datm.tm_year += n; -// if (trace > 1) +// if (trace(2)) // htrc("n=%d m=%d tm_year=%d tm_mon=%d\n", n, m, datm.tm_year, datm.tm_mon); break; @@ -2592,7 +2592,7 @@ bool DTVAL::MakeDate(PGLOBAL g, int *val, int nval) } // endfor i - if (trace > 1) + if (trace(2)) htrc("MakeDate datm=(%d,%d,%d,%d,%d,%d)\n", datm.tm_year, datm.tm_mon, datm.tm_mday, datm.tm_hour, datm.tm_min, datm.tm_sec); @@ -2667,7 +2667,7 @@ bool DTVAL::SetValue_char(const char *p, int n) ndv = ExtractDate(Sdate, Pdtp, DefYear, dval); MakeDate(NULL, dval, ndv); - if (trace > 1) + if (trace(2)) htrc(" setting date: '%s' -> %d\n", Sdate, Tval); Null = (Nullable && ndv == 0); @@ -2694,7 +2694,7 @@ void DTVAL::SetValue_psz(PCSZ p) ndv = ExtractDate(Sdate, Pdtp, DefYear, dval); MakeDate(NULL, dval, ndv); - if (trace > 1) + if (trace(2)) htrc(" setting date: '%s' -> %d\n", Sdate, Tval); Null = (Nullable && ndv == 0); @@ -2849,13 +2849,13 @@ bool DTVAL::FormatValue(PVAL vp, PCSZ fmt) char *buf = (char*)vp->GetTo_Val(); // Should be big enough struct tm tm, *ptm = GetGmTime(&tm); - if (trace > 1) + if (trace(2)) htrc("FormatValue: ptm=%p len=%d\n", ptm, vp->GetValLen()); if (ptm) { size_t n = strftime(buf, vp->GetValLen(), fmt, ptm); - if (trace > 1) + if (trace(2)) htrc("strftime: n=%d buf=%s\n", n, (n) ? buf : "???"); return (n == 0); diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index 30dce3b7fef..efefc17b5f5 100755 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -344,7 +344,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) } // endif n - if (trace) + if (trace(1)) htrc("XINDEX Make: n=%d\n", n); // File position must be stored @@ -417,7 +417,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) if (kcp->Init(g, colp, n, true, 0)) return true; - if (trace) + if (trace(1)) htrc("Adding colp=%p Buf_Type=%d size=%d\n", colp, colp->GetResultType(), n); @@ -484,7 +484,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) } else To_Rec[nkey] = Tdbp->GetRecpos(); - if (trace > 1) + if (trace(2)) htrc("Make: To_Rec[%d]=%d\n", nkey, To_Rec[nkey]); /*******************************************************************/ @@ -553,7 +553,7 @@ bool XINDEX::Make(PGLOBAL g, PIXDEF sxp) if ((Ndif = Qsort(g, Num_K)) < 0) goto err; // Error during sort - if (trace) + if (trace(1)) htrc("Make: Nk=%d n=%d Num_K=%d Ndif=%d addcolp=%p BlkFil=%p X=%p\n", Nk, n, Num_K, Ndif, addcolp, Tdbp->To_BlkFil, X); @@ -883,7 +883,7 @@ bool XINDEX::SaveIndex(PGLOBAL g, PIXDEF sxp) n[5] = Nblk; n[6] = Sblk; n[7] = Srtd ? 1 : 0; // Values are sorted in the file - if (trace) { + if (trace(1)) { htrc("Saving index %s\n", Xdp->GetName()); htrc("ID=%d Nk=%d nof=%d Num_K=%d Incr=%d Nblk=%d Sblk=%d Srtd=%d\n", ID, Nk, nof, Num_K, Incr, Nblk, Sblk, Srtd); @@ -926,7 +926,7 @@ bool XINDEX::SaveIndex(PGLOBAL g, PIXDEF sxp) // dup->ProgCur += 5; } // endfor kcp - if (trace) + if (trace(1)) htrc("Index %s saved, Size=%d\n", Xdp->GetName(), size); end: @@ -1016,7 +1016,7 @@ bool XINDEX::Init(PGLOBAL g) PlugSetPath(fn, fn, Tdbp->GetPath()); - if (trace) + if (trace(1)) htrc("Index %s file: %s\n", Xdp->GetName(), fn); /*********************************************************************/ @@ -1039,7 +1039,7 @@ bool XINDEX::Init(PGLOBAL g) } else Srtd = false; - if (trace) + if (trace(1)) htrc("nv=%d %d %d %d %d %d %d (%d)\n", nv[0], nv[1], nv[2], nv[3], nv[4], nv[5], nv[6], Srtd); @@ -1048,7 +1048,7 @@ bool XINDEX::Init(PGLOBAL g) if (/*nv[0] != ID ||*/ nv[1] != Nk) { sprintf(g->Message, MSG(BAD_INDEX_FILE), fn); - if (trace) + if (trace(1)) htrc("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); goto err; @@ -1269,7 +1269,7 @@ bool XINDEX::MapInit(PGLOBAL g) PlugSetPath(fn, fn, Tdbp->GetPath()); - if (trace) + if (trace(1)) htrc("Index %s file: %s\n", Xdp->GetName(), fn); /*********************************************************************/ @@ -1300,7 +1300,7 @@ bool XINDEX::MapInit(PGLOBAL g) nv0 = nv[0]; } // endif nv - if (trace) + if (trace(1)) htrc("nv=%d %d %d %d %d %d %d %d\n", nv0, nv[1], nv[2], nv[3], nv[4], nv[5], nv[6], Srtd); @@ -1310,7 +1310,7 @@ bool XINDEX::MapInit(PGLOBAL g) // Not this index sprintf(g->Message, MSG(BAD_INDEX_FILE), fn); - if (trace) + if (trace(1)) htrc("nv0=%d ID=%d nv[1]=%d Nk=%d\n", nv0, ID, nv[1], Nk); goto err; @@ -1483,7 +1483,7 @@ bool XINDEX::GetAllSizes(PGLOBAL g,/* int &ndif,*/ int &numk) PlugSetPath(fn, fn, Tdbp->GetPath()); - if (trace) + if (trace(1)) htrc("Index %s file: %s\n", Xdp->GetName(), fn); /*********************************************************************/ @@ -1500,7 +1500,7 @@ bool XINDEX::GetAllSizes(PGLOBAL g,/* int &ndif,*/ int &numk) if (X->Read(g, nv, NZ, sizeof(int))) goto err; - if (trace) + if (trace(1)) htrc("nv=%d %d %d %d\n", nv[0], nv[1], nv[2], nv[3]); // The test on ID was suppressed because MariaDB can change an index ID @@ -1508,7 +1508,7 @@ bool XINDEX::GetAllSizes(PGLOBAL g,/* int &ndif,*/ int &numk) if (/*nv[0] != ID ||*/ nv[1] != Nk) { sprintf(g->Message, MSG(BAD_INDEX_FILE), fn); - if (trace) + if (trace(1)) htrc("nv[0]=%d ID=%d nv[1]=%d Nk=%d\n", nv[0], ID, nv[1], Nk); goto err; @@ -1770,7 +1770,7 @@ int XINDEX::Fetch(PGLOBAL g) if (Num_K == 0) return -1; // means end of file - if (trace > 1) + if (trace(2)) htrc("XINDEX Fetch: Op=%d\n", Op); /*********************************************************************/ @@ -1834,7 +1834,7 @@ int XINDEX::Fetch(PGLOBAL g) Nth++; - if (trace > 1) + if (trace(2)) htrc("Fetch: Looking for new value Nth=%d\n", Nth); Cur_K = FastFind(); @@ -1907,7 +1907,7 @@ int XINDEX::FastFind(void) sup = To_KeyCol->Ndf; } // endif Nblk - if (trace > 2) + if (trace(4)) htrc("XINDEX FastFind: Nblk=%d Op=%d inf=%d sup=%d\n", Nblk, Op, inf, sup); @@ -1985,7 +1985,7 @@ int XINDEX::FastFind(void) curk = (kcp->Kof) ? kcp->Kof[kcp->Val_K] : kcp->Val_K; } // endfor kcp - if (trace > 2) + if (trace(4)) htrc("XINDEX FastFind: curk=%d\n", curk); return curk; @@ -2123,7 +2123,7 @@ int XINDXS::Fetch(PGLOBAL g) if (Num_K == 0) return -1; // means end of file - if (trace > 1) + if (trace(2)) htrc("XINDXS Fetch: Op=%d\n", Op); /*********************************************************************/ @@ -2176,7 +2176,7 @@ int XINDXS::Fetch(PGLOBAL g) else Nth++; - if (trace > 1) + if (trace(2)) htrc("Fetch: Looking for new value Nth=%d\n", Nth); Cur_K = FastFind(); @@ -2243,7 +2243,7 @@ int XINDXS::FastFind(void) sup = Ndif; } // endif Nblk - if (trace > 2) + if (trace(4)) htrc("XINDXS FastFind: Nblk=%d Op=%d inf=%d sup=%d\n", Nblk, Op, inf, sup); @@ -2269,7 +2269,7 @@ int XINDXS::FastFind(void) n = 0; } // endif sup - if (trace > 2) + if (trace(4)) htrc("XINDXS FastFind: n=%d i=%d\n", n, i); // Loop on kcp because of dynamic indexing @@ -2337,7 +2337,7 @@ bool XFILE::Open(PGLOBAL g, char *filename, int id, MODE mode) } // endswitch mode if (!(Xfile= global_fopen(g, MSGID_OPEN_ERROR_AND_STRERROR, filename, pmod))) { - if (trace) + if (trace(1)) htrc("Open: %s\n", g->Message); return true; @@ -2354,7 +2354,7 @@ bool XFILE::Open(PGLOBAL g, char *filename, int id, MODE mode) NewOff.v.Low = (int)ftell(Xfile); - if (trace) + if (trace(1)) htrc("XFILE Open: NewOff.v.Low=%d\n", NewOff.v.Low); } else if (mode == MODE_WRITE) { @@ -2365,7 +2365,7 @@ bool XFILE::Open(PGLOBAL g, char *filename, int id, MODE mode) fseek(Xfile, 0, SEEK_END); NewOff.v.Low = (int)ftell(Xfile); - if (trace) + if (trace(1)) htrc("XFILE Open: NewOff.v.Low=%d\n", NewOff.v.Low); } // endif id @@ -2377,7 +2377,7 @@ bool XFILE::Open(PGLOBAL g, char *filename, int id, MODE mode) return true; } // endif MAX_INDX - if (trace) + if (trace(1)) htrc("XFILE Open: noff[%d].v.Low=%d\n", id, noff[id].v.Low); // Position the cursor at the offset of this index @@ -2510,7 +2510,7 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) return true; } // endif - if (trace) + if (trace(1)) htrc(" Xopen: filename=%s id=%d mode=%d\n", filename, id, mode); #if defined(__WIN__) @@ -2554,7 +2554,7 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) return true; } // endif Hfile - if (trace) + if (trace(1)) htrc(" access=%p share=%p creation=%d handle=%p fn=%s\n", access, share, creation, Hfile, filename); @@ -2628,13 +2628,13 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) if (Hfile == INVALID_HANDLE_VALUE) { /*rc = errno;*/ - if (trace) + if (trace(1)) htrc("Open: %s\n", g->Message); return true; } // endif Hfile - if (trace) + if (trace(1)) htrc(" oflag=%p mode=%d handle=%d fn=%s\n", oflag, mode, Hfile, filename); @@ -2647,7 +2647,7 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) return true; } // endif - if (trace) + if (trace(1)) htrc("INSERT: NewOff=%lld\n", NewOff.Val); } else if (mode == MODE_WRITE) { @@ -2657,7 +2657,7 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) NewOff.v.Low = write(Hfile, &noff, sizeof(noff)); } // endif id - if (trace) + if (trace(1)) htrc("WRITE: NewOff=%lld\n", NewOff.Val); } else if (mode == MODE_READ && id >= 0) { @@ -2667,7 +2667,7 @@ bool XHUGE::Open(PGLOBAL g, char *filename, int id, MODE mode) return true; } // endif read - if (trace) + if (trace(1)) htrc("noff[%d]=%lld\n", id, noff[id].Val); // Position the cursor at the offset of this index @@ -2705,13 +2705,13 @@ bool XHUGE::Seek(PGLOBAL g, int low, int high, int origin) if (lseek64(Hfile, pos, origin) < 0) { sprintf(g->Message, MSG(ERROR_IN_LSK), errno); - if (trace) + if (trace(1)) htrc("lseek64 error %d\n", errno); return true; } // endif lseek64 - if (trace) + if (trace(1)) htrc("Seek: low=%d high=%d\n", low, high); #endif // UNIX @@ -2750,13 +2750,13 @@ bool XHUGE::Read(PGLOBAL g, void *buf, int n, int size) #else // UNIX ssize_t count = (ssize_t)(n * size); - if (trace) + if (trace(1)) htrc("Hfile=%d n=%d size=%d count=%d\n", Hfile, n, size, count); if (read(Hfile, buf, count) != count) { sprintf(g->Message, MSG(READ_ERROR), "Index file", strerror(errno)); - if (trace) + if (trace(1)) htrc("read error %d\n", errno); rc = true; @@ -2810,7 +2810,7 @@ int XHUGE::Write(PGLOBAL g, void *buf, int n, int size, bool& rc) /***********************************************************************/ void XHUGE::Close(char *fn, int id) { - if (trace) + if (trace(1)) htrc("XHUGE::Close: fn=%s id=%d NewOff=%lld\n", fn, id, NewOff.Val); #if defined(__WIN__) @@ -3022,7 +3022,7 @@ bool KXYCOL::Init(PGLOBAL g, PCOL colp, int n, bool sm, int kln) Prefix = true; } // endif kln - if (trace) + if (trace(1)) htrc("KCOL(%p) Init: col=%s n=%d type=%d sm=%d\n", this, colp->GetName(), n, colp->GetResultType(), sm); @@ -3076,7 +3076,7 @@ BYTE* KXYCOL::MapInit(PGLOBAL g, PCOL colp, int *n, BYTE *m) Type = colp->GetResultType(); - if (trace) + if (trace(1)) htrc("MapInit(%p): colp=%p type=%d n=%d len=%d m=%p\n", this, colp, Type, n[0], len, m); @@ -3196,7 +3196,7 @@ bool KXYCOL::InitFind(PGLOBAL g, PXOB xp) Valp->SetValue_pval(xp->GetValue(), false); } // endif Type - if (trace > 1) { + if (trace(2)) { char buf[32]; htrc("KCOL InitFind: value=%s\n", Valp->GetCharString(buf)); @@ -3237,7 +3237,7 @@ int KXYCOL::Compare(int i1, int i2) // Do the actual comparison between values. register int k = Kblp->CompVal(i1, i2); - if (trace > 2) + if (trace(4)) htrc("Compare done result=%d\n", k); return (Asc) ? k : -k; @@ -3249,7 +3249,7 @@ int KXYCOL::Compare(int i1, int i2) int KXYCOL::CompVal(int i) { // Do the actual comparison between numerical values. - if (trace > 2) { + if (trace(4)) { register int k = (int)Kblp->CompVal(Valp, (int)i); htrc("Compare done result=%d\n", k); From dd07e30cb015236f3abfaa3aaab05587dee62a6d Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 30 Jan 2018 23:30:06 +0100 Subject: [PATCH 002/139] - Change the connect_xtrace variable to from int to set modified: storage/connect/inihandl.cpp --- storage/connect/inihandl.cpp | 54 ++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 27 deletions(-) diff --git a/storage/connect/inihandl.cpp b/storage/connect/inihandl.cpp index 96ae0a67a6b..c039a980bcb 100644 --- a/storage/connect/inihandl.cpp +++ b/storage/connect/inihandl.cpp @@ -293,7 +293,7 @@ static PROFILESECTION *PROFILE_Load( FILE *file ) next_key = §ion->key; prev_key = NULL; - if (trace > 1) + if (trace(2)) htrc("New section: '%s'\n",section->name); continue; @@ -336,7 +336,7 @@ static PROFILESECTION *PROFILE_Load( FILE *file ) next_key = &key->next; prev_key = key; - if (trace > 1) + if (trace(2)) htrc("New key: name='%s', value='%s'\n", key->name,key->value?key->value:"(none)"); @@ -359,7 +359,7 @@ static BOOL PROFILE_FlushFile(void) FILE *file = NULL; struct stat buf; - if (trace > 1) + if (trace(2)) htrc("PROFILE_FlushFile: CurProfile=%p\n", CurProfile); if (!CurProfile) { @@ -398,7 +398,7 @@ static BOOL PROFILE_FlushFile(void) return FALSE; } // endif !file - if (trace > 1) + if (trace(2)) htrc("Saving '%s'\n", CurProfile->filename); PROFILE_Save(file, CurProfile->section); @@ -447,7 +447,7 @@ static BOOL PROFILE_Open(LPCSTR filename) struct stat buf; PROFILE *tempProfile; - if (trace > 1) + if (trace(2)) htrc("PROFILE_Open: CurProfile=%p N=%d\n", CurProfile, N_CACHED_PROFILES); /* First time around */ @@ -468,7 +468,7 @@ static BOOL PROFILE_Open(LPCSTR filename) /* Check for a match */ for (i = 0; i < N_CACHED_PROFILES; i++) { - if (trace > 1) + if (trace(2)) htrc("MRU=%s i=%d\n", SVP(MRUProfile[i]->filename), i); if (MRUProfile[i]->filename && !strcmp(filename, MRUProfile[i]->filename)) { @@ -483,11 +483,11 @@ static BOOL PROFILE_Open(LPCSTR filename) } // endif i if (!stat(CurProfile->filename, &buf) && CurProfile->mtime == buf.st_mtime) { - if (trace > 1) + if (trace(2)) htrc("(%s): already opened (mru=%d)\n", filename, i); } else { - if (trace > 1) + if (trace(2)) htrc("(%s): already opened, needs refreshing (mru=%d)\n", filename, i); } // endif stat @@ -535,11 +535,11 @@ static BOOL PROFILE_Open(LPCSTR filename) // strcpy(p, filename); // _strlwr(p); - if (trace > 1) + if (trace(2)) htrc("Opening %s\n", filename); if ((file = fopen(filename, "r"))) { - if (trace > 1) + if (trace(2)) htrc("(%s): found it\n", filename); // CurProfile->unix_name = malloc(strlen(buffer)+1); @@ -574,12 +574,12 @@ void PROFILE_Close(LPCSTR filename) struct stat buf; PROFILE *tempProfile; - if (trace > 1) + if (trace(2)) htrc("PROFILE_Close: CurProfile=%p N=%d\n", CurProfile, N_CACHED_PROFILES); /* Check for a match */ for (i = 0; i < N_CACHED_PROFILES; i++) { - if (trace > 1) + if (trace(2)) htrc("MRU=%s i=%d\n", SVP(MRUProfile[i]->filename), i); if (MRUProfile[i]->filename && !strcmp(filename, MRUProfile[i]->filename)) { @@ -591,7 +591,7 @@ void PROFILE_Close(LPCSTR filename) CurProfile=tempProfile; } // endif i - if (trace > 1) { + if (trace(2)) { if (!stat(CurProfile->filename, &buf) && CurProfile->mtime == buf.st_mtime) htrc("(%s): already opened (mru=%d)\n", filename, i); else @@ -620,7 +620,7 @@ void PROFILE_End(void) { int i; - if (trace) + if (trace(3)) htrc("PROFILE_End: CurProfile=%p N=%d\n", CurProfile, N_CACHED_PROFILES); if (!CurProfile) // Sergey Vojtovich @@ -628,7 +628,7 @@ void PROFILE_End(void) /* Close all opened files and free the cache structure */ for (i = 0; i < N_CACHED_PROFILES; i++) { - if (trace) + if (trace(3)) htrc("MRU=%s i=%d\n", SVP(MRUProfile[i]->filename), i); // CurProfile = MRUProfile[i]; Sergey Vojtovich @@ -894,7 +894,7 @@ static int PROFILE_GetSectionNames(LPSTR buffer, uint len) uint f,l; PROFILESECTION *section; - if (trace > 1) + if (trace(2)) htrc("GetSectionNames: buffer=%p len=%u\n", buffer, len); if (!buffer || !len) @@ -909,17 +909,17 @@ static int PROFILE_GetSectionNames(LPSTR buffer, uint len) buf = buffer; section = CurProfile->section; - if (trace > 1) + if (trace(2)) htrc("GetSectionNames: section=%p\n", section); while (section != NULL) { - if (trace > 1) + if (trace(2)) htrc("section=%s\n", section->name); if (section->name[0]) { l = strlen(section->name) + 1; - if (trace > 1) + if (trace(2)) htrc("l=%u f=%u\n", l, f); if (l > f) { @@ -982,7 +982,7 @@ static int PROFILE_GetString(LPCSTR section, LPCSTR key_name, key = PROFILE_Find(&CurProfile->section, section, key_name, FALSE, FALSE); PROFILE_CopyEntry(buffer, (key && key->value) ? key->value : def_val, len, FALSE); - if (trace > 1) + if (trace(2)) htrc("('%s','%s','%s'): returning '%s'\n", section, key_name, def_val, buffer ); @@ -1010,7 +1010,7 @@ static BOOL PROFILE_SetString(LPCSTR section_name, LPCSTR key_name, LPCSTR value, BOOL create_always) { if (!key_name) { /* Delete a whole section */ - if (trace > 1) + if (trace(2)) htrc("Deleting('%s')\n", section_name); CurProfile->changed |= PROFILE_DeleteSection(&CurProfile->section, @@ -1018,7 +1018,7 @@ static BOOL PROFILE_SetString(LPCSTR section_name, LPCSTR key_name, return TRUE; /* Even if PROFILE_DeleteSection() has failed, this is not an error on application's level.*/ } else if (!value) { /* Delete a key */ - if (trace > 1) + if (trace(2)) htrc("Deleting('%s','%s')\n", section_name, key_name); CurProfile->changed |= PROFILE_DeleteKey(&CurProfile->section, @@ -1027,7 +1027,7 @@ static BOOL PROFILE_SetString(LPCSTR section_name, LPCSTR key_name, } else { /* Set the key value */ PROFILEKEY *key = PROFILE_Find(&CurProfile->section, section_name, key_name, TRUE, create_always); - if (trace > 1) + if (trace(2)) htrc("Setting('%s','%s','%s')\n", section_name, key_name, value); if (!key) @@ -1040,17 +1040,17 @@ static BOOL PROFILE_SetString(LPCSTR section_name, LPCSTR key_name, value++; if (!strcmp(key->value, value)) { - if (trace > 1) + if (trace(2)) htrc(" no change needed\n" ); return TRUE; /* No change needed */ } // endif value - if (trace > 1) + if (trace(2)) htrc(" replacing '%s'\n", key->value); free(key->value); - } else if (trace > 1) + } else if (trace(2)) htrc(" creating key\n" ); key->value = (char*)malloc(strlen(value) + 1); @@ -1345,7 +1345,7 @@ GetPrivateProfileSectionNames(LPSTR buffer, DWORD size, LPCSTR filename) { DWORD ret = 0; - if (trace > 1) + if (trace(2)) htrc("GPPSN: filename=%s\n", filename); EnterCriticalSection(&PROFILE_CritSect); From 79c1df4c23da4a40e57920bab0471730c25276f0 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 31 Jan 2018 01:08:52 +0100 Subject: [PATCH 003/139] - Change the connect_xtrace variable to from int to set modified: storage/connect/ha_connect.cc modified: storage/connect/libdoc.cpp --- storage/connect/ha_connect.cc | 2 +- storage/connect/libdoc.cpp | 98 +++++++++++++++++------------------ 2 files changed, 50 insertions(+), 50 deletions(-) diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 1896da97c3e..fe25d511193 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -174,7 +174,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.06.0006 January 30, 2018"; + char version[]= "Version 1.06.0006 January 31, 2018"; #if defined(__WIN__) char compver[]= "Version 1.06.0006 " __DATE__ " " __TIME__; char slash= '\\'; diff --git a/storage/connect/libdoc.cpp b/storage/connect/libdoc.cpp index 700d247da38..9b30b315441 100644 --- a/storage/connect/libdoc.cpp +++ b/storage/connect/libdoc.cpp @@ -194,7 +194,7 @@ void xtrc(char const *fmt, ...) { va_list ap; va_start (ap, fmt); - + ; //vfprintf(stderr, fmt, ap); vsprintf(s, fmt, ap); if (s[strlen(s)-1] == '\n') @@ -210,7 +210,7 @@ static xmlStrdupFunc Strdup; void xmlMyFree(void *mem) { - if (trace) { + if (trace(1)) { htrc("%.4d Freeing at %p %s\n", ++m, mem, s); *s = 0; } // endif trace @@ -220,7 +220,7 @@ void xmlMyFree(void *mem) void *xmlMyMalloc(size_t size) { void *p = Malloc(size); - if (trace) { + if (trace(1)) { htrc("%.4d Allocating %.5d at %p %s\n", ++m, size, p, s); *s = 0; } // endif trace @@ -230,7 +230,7 @@ void *xmlMyMalloc(size_t size) void *xmlMyMallocAtomic(size_t size) { void *p = MallocA(size); - if (trace) { + if (trace(1)) { htrc("%.4d Atom alloc %.5d at %p %s\n", ++m, size, p, s); *s = 0; } // endif trace @@ -240,7 +240,7 @@ void *xmlMyMallocAtomic(size_t size) void *xmlMyRealloc(void *mem, size_t size) { void *p = Realloc(mem, size); - if (trace) { + if (trace(1)) { htrc("%.4d ReAlloc %.5d to %p from %p %s\n", ++m, size, p, mem, s); *s = 0; } // endif trace @@ -250,7 +250,7 @@ void *xmlMyRealloc(void *mem, size_t size) char *xmlMyStrdup(const char *str) { char *p = Strdup(str); - if (trace) { + if (trace(1)) { htrc("%.4d Duplicating to %p from %p %s %s\n", ++m, p, str, str, s); *s = 0; } // endif trace @@ -339,7 +339,7 @@ void CloseXML2File(PGLOBAL g, PFBLOCK fp, bool all) { PX2BLOCK xp = (PX2BLOCK)fp; - if (trace) + if (trace(1)) htrc("CloseXML2File: xp=%p count=%d\n", xp, (xp) ? xp->Count : 0); if (xp && xp->Count > 1 && !all) { @@ -387,7 +387,7 @@ bool LIBXMLDOC::Initialize(PGLOBAL g, PCSZ entry, bool zipped) /******************************************************************/ bool LIBXMLDOC::ParseFile(PGLOBAL g, char *fn) { - if (trace) + if (trace(1)) htrc("ParseFile\n"); if (zip) { @@ -436,7 +436,7 @@ PFBLOCK LIBXMLDOC::LinkXblock(PGLOBAL g, MODE m, int rc, char *fn) /******************************************************************/ bool LIBXMLDOC::NewDoc(PGLOBAL g, PCSZ ver) { - if (trace) + if (trace(1)) htrc("NewDoc\n"); return ((Docp = xmlNewDoc(BAD_CAST ver)) == NULL); @@ -447,7 +447,7 @@ bool LIBXMLDOC::NewDoc(PGLOBAL g, PCSZ ver) /******************************************************************/ void LIBXMLDOC::AddComment(PGLOBAL g, char *txtp) { - if (trace) + if (trace(1)) htrc("AddComment: %s\n", txtp); xmlNodePtr cp = xmlNewDocComment(Docp, BAD_CAST txtp); @@ -459,7 +459,7 @@ void LIBXMLDOC::AddComment(PGLOBAL g, char *txtp) /******************************************************************/ PXNODE LIBXMLDOC::GetRoot(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("GetRoot\n"); xmlNodePtr root = xmlDocGetRootElement(Docp); @@ -475,7 +475,7 @@ PXNODE LIBXMLDOC::GetRoot(PGLOBAL g) /******************************************************************/ PXNODE LIBXMLDOC::NewRoot(PGLOBAL g, char *name) { - if (trace) + if (trace(1)) htrc("NewRoot: %s\n", name); xmlNodePtr root = xmlNewDocNode(Docp, NULL, BAD_CAST name, NULL); @@ -493,7 +493,7 @@ PXNODE LIBXMLDOC::NewRoot(PGLOBAL g, char *name) /******************************************************************/ PXNODE LIBXMLDOC::NewPnode(PGLOBAL g, char *name) { - if (trace) + if (trace(1)) htrc("NewNode: %s\n", name); xmlNodePtr nop; @@ -534,7 +534,7 @@ int LIBXMLDOC::DumpDoc(PGLOBAL g, char *ofn) int rc = 0; FILE *of; - if (trace) + if (trace(1)) htrc("DumpDoc: %s\n", ofn); if (!(of= global_fopen(g, MSGID_CANNOT_OPEN, ofn, "w"))) @@ -576,7 +576,7 @@ int LIBXMLDOC::DumpDoc(PGLOBAL g, char *ofn) /******************************************************************/ void LIBXMLDOC::CloseDoc(PGLOBAL g, PFBLOCK xp) { - if (trace) + if (trace(1)) htrc("CloseDoc: xp=%p count=%d\n", xp, (xp) ? xp->Count : 0); //if (xp && xp->Count == 1) { @@ -630,24 +630,24 @@ xmlNodeSetPtr LIBXMLDOC::GetNodeList(PGLOBAL g, xmlNodePtr np, char *xp) { xmlNodeSetPtr nl; - if (trace) + if (trace(1)) htrc("GetNodeList: %s np=%p\n", xp, np); if (!Ctxp) { // Init Xpath - if (trace) + if (trace(1)) htrc("Calling xmlPathInit\n"); xmlXPathInit(); - if (trace) + if (trace(1)) htrc("Calling xmlXPathNewContext Docp=%p\n", Docp); // Create xpath evaluation context if (!(Ctxp = xmlXPathNewContext(Docp))) { strcpy(g->Message, MSG(XPATH_CNTX_ERR)); - if (trace) + if (trace(1)) htrc("Context error: %s\n", g->Message); return NULL; @@ -655,7 +655,7 @@ xmlNodeSetPtr LIBXMLDOC::GetNodeList(PGLOBAL g, xmlNodePtr np, char *xp) // Register namespaces from list (if any) for (PNS nsp = Namespaces; nsp; nsp = nsp->Next) { - if (trace) + if (trace(1)) htrc("Calling xmlXPathRegisterNs Prefix=%s Uri=%s\n", nsp->Prefix, nsp->Uri); @@ -663,7 +663,7 @@ xmlNodeSetPtr LIBXMLDOC::GetNodeList(PGLOBAL g, xmlNodePtr np, char *xp) BAD_CAST nsp->Uri)) { sprintf(g->Message, MSG(REGISTER_ERR), nsp->Prefix, nsp->Uri); - if (trace) + if (trace(1)) htrc("Ns error: %s\n", g->Message); return NULL; @@ -674,7 +674,7 @@ xmlNodeSetPtr LIBXMLDOC::GetNodeList(PGLOBAL g, xmlNodePtr np, char *xp) } // endif Ctxp if (Xop) { - if (trace) + if (trace(1)) htrc("Calling xmlXPathFreeNodeSetList Xop=%p NOFREE=%d\n", Xop, Nofreelist); @@ -698,21 +698,21 @@ xmlNodeSetPtr LIBXMLDOC::GetNodeList(PGLOBAL g, xmlNodePtr np, char *xp) // Set the context to the calling node Ctxp->node = np; - if (trace) + if (trace(1)) htrc("Calling xmlXPathEval %s Ctxp=%p\n", xp, Ctxp); // Evaluate table xpath if (!(Xop = xmlXPathEval(BAD_CAST xp, Ctxp))) { sprintf(g->Message, MSG(XPATH_EVAL_ERR), xp); - if (trace) + if (trace(1)) htrc("Path error: %s\n", g->Message); return NULL; } else nl = Xop->nodesetval; - if (trace) + if (trace(1)) htrc("GetNodeList nl=%p n=%p\n", nl, (nl) ? nl->nodeNr : 0); return nl; @@ -811,7 +811,7 @@ XML2NODE::XML2NODE(PXDOC dp, xmlNodePtr np) : XMLNODE(dp) int XML2NODE::GetType(void) { - if (trace) + if (trace(1)) htrc("GetType type=%d\n", Nodep->type); return Nodep->type; @@ -822,7 +822,7 @@ int XML2NODE::GetType(void) /******************************************************************/ PXNODE XML2NODE::GetNext(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("GetNext\n"); if (!Nodep->next) @@ -838,7 +838,7 @@ PXNODE XML2NODE::GetNext(PGLOBAL g) /******************************************************************/ PXNODE XML2NODE::GetChild(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("GetChild\n"); if (!Nodep->children) @@ -856,7 +856,7 @@ RCODE XML2NODE::GetContent(PGLOBAL g, char *buf, int len) { RCODE rc = RC_OK; - if (trace) + if (trace(1)) htrc("GetContent\n"); if (Content) @@ -888,7 +888,7 @@ RCODE XML2NODE::GetContent(PGLOBAL g, char *buf, int len) *p2 = 0; - if (trace) + if (trace(1)) htrc("GetText buf='%s' len=%d\n", buf, len); xmlFree(Content); @@ -896,7 +896,7 @@ RCODE XML2NODE::GetContent(PGLOBAL g, char *buf, int len) } else *buf = '\0'; - if (trace) + if (trace(1)) htrc("GetContent: %s\n", buf); return rc; @@ -907,12 +907,12 @@ RCODE XML2NODE::GetContent(PGLOBAL g, char *buf, int len) /******************************************************************/ bool XML2NODE::SetContent(PGLOBAL g, char *txtp, int len) { - if (trace) + if (trace(1)) htrc("SetContent: %s\n", txtp); xmlChar *buf = xmlEncodeEntitiesReentrant(Docp, BAD_CAST txtp); - if (trace) + if (trace(1)) htrc("SetContent: %s -> %s\n", txtp, buf); xmlNodeSetContent(Nodep, buf); @@ -925,7 +925,7 @@ bool XML2NODE::SetContent(PGLOBAL g, char *txtp, int len) /******************************************************************/ PXNODE XML2NODE::Clone(PGLOBAL g, PXNODE np) { - if (trace) + if (trace(1)) htrc("Clone: np=%p\n", np); if (np) { @@ -941,7 +941,7 @@ PXNODE XML2NODE::Clone(PGLOBAL g, PXNODE np) /******************************************************************/ PXLIST XML2NODE::GetChildElements(PGLOBAL g, char *xp, PXLIST lp) { - if (trace) + if (trace(1)) htrc("GetChildElements: %s\n", xp); return SelectNodes(g, (xp) ? xp : (char*)"*", lp); @@ -952,7 +952,7 @@ PXLIST XML2NODE::GetChildElements(PGLOBAL g, char *xp, PXLIST lp) /******************************************************************/ PXLIST XML2NODE::SelectNodes(PGLOBAL g, char *xp, PXLIST lp) { - if (trace) + if (trace(1)) htrc("SelectNodes: %s\n", xp); xmlNodeSetPtr nl = ((PXDOC2)Doc)->GetNodeList(g, Nodep, xp); @@ -970,7 +970,7 @@ PXLIST XML2NODE::SelectNodes(PGLOBAL g, char *xp, PXLIST lp) /******************************************************************/ PXNODE XML2NODE::SelectSingleNode(PGLOBAL g, char *xp, PXNODE np) { - if (trace) + if (trace(1)) htrc("SelectSingleNode: %s\n", xp); xmlNodeSetPtr nl = ((PXDOC2)Doc)->GetNodeList(g, Nodep, xp); @@ -994,7 +994,7 @@ PXATTR XML2NODE::GetAttribute(PGLOBAL g, char *name, PXATTR ap) { xmlAttrPtr atp; - if (trace) + if (trace(1)) htrc("GetAttribute: %s\n", SVP(name)); if (name) @@ -1023,7 +1023,7 @@ PXNODE XML2NODE::AddChildNode(PGLOBAL g, PCSZ name, PXNODE np) { char *p, *pn, *pf = NULL, *nmp = PlugDup(g, name); - if (trace) + if (trace(1)) htrc("AddChildNode: %s\n", name); // Is a prefix specified @@ -1074,7 +1074,7 @@ PXNODE XML2NODE::AddChildNode(PGLOBAL g, PCSZ name, PXNODE np) /******************************************************************/ PXATTR XML2NODE::AddProperty(PGLOBAL g, char *name, PXATTR ap) { - if (trace) + if (trace(1)) htrc("AddProperty: %s\n", name); xmlAttrPtr atp = xmlNewProp(Nodep, BAD_CAST name, NULL); @@ -1097,7 +1097,7 @@ PXATTR XML2NODE::AddProperty(PGLOBAL g, char *name, PXATTR ap) /******************************************************************/ void XML2NODE::AddText(PGLOBAL g, PCSZ txtp) { - if (trace) + if (trace(1)) htrc("AddText: %s\n", txtp); // This is to avoid a blank line when inserting a new line @@ -1119,7 +1119,7 @@ void XML2NODE::DeleteChild(PGLOBAL g, PXNODE dnp) { xmlErrorPtr xerr; - if (trace) + if (trace(1)) htrc("DeleteChild: node=%p\n", dnp); xmlNodePtr np = ((PNODE2)dnp)->Nodep; @@ -1157,7 +1157,7 @@ void XML2NODE::DeleteChild(PGLOBAL g, PXNODE dnp) return; err: - if (trace) + if (trace(1)) htrc("DeleteChild: errmsg=%s\n", xerr->message); xmlResetError(xerr); @@ -1187,7 +1187,7 @@ int XML2NODELIST::GetLength(void) /******************************************************************/ PXNODE XML2NODELIST::GetItem(PGLOBAL g, int n, PXNODE np) { - if (trace) + if (trace(1)) htrc("GetItem: %d\n", n); if (!Listp || Listp->nodeNr <= n) @@ -1206,7 +1206,7 @@ PXNODE XML2NODELIST::GetItem(PGLOBAL g, int n, PXNODE np) /******************************************************************/ bool XML2NODELIST::DropItem(PGLOBAL g, int n) { - if (trace) + if (trace(1)) htrc("DropItem: n=%d\n", n); // We should do something here @@ -1234,7 +1234,7 @@ XML2ATTR::XML2ATTR(PXDOC dp, xmlAttrPtr ap, xmlNodePtr np) /******************************************************************/ PXATTR XML2ATTR::GetNext(PGLOBAL g) { - if (trace) + if (trace(1)) htrc("Attr GetNext\n"); if (!Atrp->next) @@ -1252,7 +1252,7 @@ RCODE XML2ATTR::GetText(PGLOBAL g, char *buf, int len) RCODE rc = RC_OK; xmlChar *txt; - if (trace) + if (trace(1)) htrc("GetText\n"); if ((txt = xmlGetProp(Atrp->parent, Atrp->name))) { @@ -1269,7 +1269,7 @@ RCODE XML2ATTR::GetText(PGLOBAL g, char *buf, int len) } else *buf = '\0'; - if (trace) + if (trace(1)) htrc("GetText: %s\n", buf); return rc; @@ -1280,7 +1280,7 @@ RCODE XML2ATTR::GetText(PGLOBAL g, char *buf, int len) /******************************************************************/ bool XML2ATTR::SetText(PGLOBAL g, char *txtp, int len) { - if (trace) + if (trace(1)) htrc("SetText: %s %d\n", txtp, len); xmlSetProp(Parent, Atrp->name, BAD_CAST txtp); From efe80675f573b6255a399308fae1ede8e8fc4501 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 2 Feb 2018 15:27:45 +0100 Subject: [PATCH 004/139] - Remove warning on not used tabtyp variable in connect_assisted_discovery modified: storage/connect/ha_connect.cc --- storage/connect/ha_connect.cc | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index fe25d511193..4c30938691f 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -174,7 +174,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.06.0006 January 31, 2018"; + char version[]= "Version 1.06.0006 February 02, 2018"; #if defined(__WIN__) char compver[]= "Version 1.06.0006 " __DATE__ " " __TIME__; char slash= '\\'; @@ -5431,7 +5431,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, #endif // __WIN__ //int hdr, mxe; int port = 0, mxr = 0, rc = 0, mul = 0, lrecl = 0; - PCSZ tabtyp = NULL; +//PCSZ tabtyp = NULL; #if defined(ODBC_SUPPORT) POPARM sop= NULL; PCSZ ucnc= NULL; @@ -5495,7 +5495,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, #endif // __WIN__ port= atoi(GetListOption(g, "port", topt->oplist, "0")); #if defined(ODBC_SUPPORT) - tabtyp = GetListOption(g, "Tabtype", topt->oplist, NULL); +// tabtyp = GetListOption(g, "Tabtype", topt->oplist, NULL); mxr= atoi(GetListOption(g,"maxres", topt->oplist, "0")); cto= atoi(GetListOption(g,"ConnectTimeout", topt->oplist, "-1")); qto= atoi(GetListOption(g,"QueryTimeout", topt->oplist, "-1")); @@ -5808,7 +5808,8 @@ static int connect_assisted_discovery(handlerton *, THD* thd, break; case FNC_TABLE: - qrp = JDBCTables(g, shm, tab, tabtyp, mxr, true, sjp); +// qrp = JDBCTables(g, shm, tab, tabtyp, mxr, true, sjp); + qrp = JDBCTables(g, shm, tab, NULL, mxr, true, sjp); break; #if 0 case FNC_DSN: From 57ae4992ed8f5e1f381c462774447c3a41d23739 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Tue, 6 Feb 2018 11:29:36 -0500 Subject: [PATCH 005/139] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 24d355ca91e..75daccdd9d4 100644 --- a/VERSION +++ b/VERSION @@ -1,3 +1,3 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=0 -MYSQL_VERSION_PATCH=34 +MYSQL_VERSION_PATCH=35 From 273233119c575d34d1c7b6cf649150f36d200242 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Mon, 12 Feb 2018 16:26:12 +0100 Subject: [PATCH 006/139] - Fix a bug causing CONNECT to loop when expanding a JSON column when the expanded column value is null or void array. - Adding the FullArray option to JSON tables. - Skipping expanded JSON lines when the expanded column value is null. modified: storage/connect/connect.cc modified: storage/connect/tabdos.h modified: storage/connect/tabjson.cpp modified: storage/connect/tabjson.h --- storage/connect/connect.cc | 1 + storage/connect/tabdos.h | 1 + storage/connect/tabjson.cpp | 743 +++++++++++++++++++----------------- storage/connect/tabjson.h | 44 ++- 4 files changed, 444 insertions(+), 345 deletions(-) diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index 4ce382ca024..d731f7d9838 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -404,6 +404,7 @@ RCODE EvalColumns(PGLOBAL g, PTDB tdbp, bool reset, bool mrr) rc = RC_FX; } catch (const char *msg) { strcpy(g->Message, msg); + rc = RC_NF; } // end catch return rc; diff --git a/storage/connect/tabdos.h b/storage/connect/tabdos.h index 948b357dc1f..bdde37adaad 100644 --- a/storage/connect/tabdos.h +++ b/storage/connect/tabdos.h @@ -29,6 +29,7 @@ class DllExport DOSDEF : public TABDEF { /* Logical table description */ friend class TXTFAM; friend class DBFBASE; friend class UNZIPUTL; + friend class JSONCOL; public: // Constructor DOSDEF(void); diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 45c5ef3afc9..3acc2389975 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -54,16 +54,16 @@ USETEMP UseTemp(void); char *GetJsonNull(void); -typedef struct _jncol { - struct _jncol *Next; - char *Name; - char *Fmt; - int Type; - int Len; - int Scale; - bool Cbn; - bool Found; -} JCOL, *PJCL; +//typedef struct _jncol { +// struct _jncol *Next; +// char *Name; +// char *Fmt; +// int Type; +// int Len; +// int Scale; +// bool Cbn; +// bool Found; +//} JCOL, *PJCL; /***********************************************************************/ /* JSONColumns: construct the result blocks containing the description */ @@ -76,26 +76,13 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) static XFLD fldtyp[] = {FLD_NAME, FLD_TYPE, FLD_TYPENAME, FLD_PREC, FLD_LENGTH, FLD_SCALE, FLD_NULL, FLD_FORMAT}; static unsigned int length[] = {0, 6, 8, 10, 10, 6, 6, 0}; - char *p, colname[65], fmt[129]; - int i, j, lvl, n = 0; + int i, n = 0; int ncol = sizeof(buftyp) / sizeof(int); - bool mgo = (GetTypeID(topt->type) == TAB_MONGO); - PCSZ sep, level; - PVAL valp; - JCOL jcol; - PJCL jcp, fjcp = NULL, pjcp = NULL; - PJPR *jrp, jpp; - PJSON jsp; - PJVAL jvp; - PJOB row; - PJDEF tdp; - TDBJSN *tjnp = NULL; - PJTDB tjsp = NULL; + PJCL jcp; + JSONDISC *pjdc = NULL; PQRYRES qrp; PCOLRES crp; - jcol.Name = jcol.Fmt = NULL; - if (info) { length[0] = 128; length[7] = 256; @@ -107,320 +94,11 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) return NULL; } // endif Multiple - /*********************************************************************/ - /* Open the input file. */ - /*********************************************************************/ - level = GetStringTableOption(g, topt, "Level", NULL); + pjdc = new(g) JSONDISC(g, (int*)length); - if (level) { - lvl = atoi(level); - lvl = (lvl > 16) ? 16 : lvl; - } else - lvl = 0; - - sep = GetStringTableOption(g, topt, "Separator", "."); - - tdp = new(g) JSONDEF; -#if defined(ZIP_SUPPORT) - tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL); - tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false); -#endif // ZIP_SUPPORT - tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL); - - if (!(tdp->Database = SetPath(g, db))) + if (!(n = pjdc->GetColumns(g, db, dsn, topt))) return NULL; - tdp->Objname = GetStringTableOption(g, topt, "Object", NULL); - tdp->Base = GetIntegerTableOption(g, topt, "Base", 0) ? 1 : 0; - tdp->Pretty = GetIntegerTableOption(g, topt, "Pretty", 2); - tdp->Xcol = GetStringTableOption(g, topt, "Expand", NULL); - tdp->Accept = GetBooleanTableOption(g, topt, "Accept", false); - tdp->Uri = (dsn && *dsn ? dsn : NULL); - - if (!tdp->Fn && !tdp->Uri) { - strcpy(g->Message, MSG(MISSING_FNAME)); - return NULL; - } // endif Fn - - if (trace(1)) - htrc("File %s objname=%s pretty=%d lvl=%d\n", - tdp->Fn, tdp->Objname, tdp->Pretty, lvl); - - if (tdp->Uri) { -#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) - tdp->Collname = GetStringTableOption(g, topt, "Name", NULL); - tdp->Collname = GetStringTableOption(g, topt, "Tabname", tdp->Collname); - tdp->Schema = GetStringTableOption(g, topt, "Dbname", "test"); - tdp->Options = (PSZ)GetStringTableOption(g, topt, "Colist", "all"); - tdp->Pipe = GetBooleanTableOption(g, topt, "Pipeline", false); - tdp->Driver = (PSZ)GetStringTableOption(g, topt, "Driver", NULL); - tdp->Version = GetIntegerTableOption(g, topt, "Version", 3); - tdp->Wrapname = (PSZ)GetStringTableOption(g, topt, "Wrapper", - (tdp->Version == 2) ? "Mongo2Interface" : "Mongo3Interface"); - tdp->Pretty = 0; -#else // !MONGO_SUPPORT - sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); - return NULL; -#endif // !MONGO_SUPPORT - } // endif Uri - - if (tdp->Pretty == 2) { - if (tdp->Zipped) { -#if defined(ZIP_SUPPORT) - tjsp = new(g) TDBJSON(tdp, new(g) UNZFAM(tdp)); -#else // !ZIP_SUPPORT - sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); - return NULL; -#endif // !ZIP_SUPPORT - } else - tjsp = new(g) TDBJSON(tdp, new(g) MAPFAM(tdp)); - - if (tjsp->MakeDocument(g)) - return NULL; - - jsp = (tjsp->GetDoc()) ? tjsp->GetDoc()->GetValue(0) : NULL; - } else { - if (!(tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0))) - if (!mgo) { - sprintf(g->Message, "LRECL must be specified for pretty=%d", tdp->Pretty); - return NULL; - } else - tdp->Lrecl = 8192; // Should be enough - - tdp->Ending = GetIntegerTableOption(g, topt, "Ending", CRLF); - - if (tdp->Zipped) { -#if defined(ZIP_SUPPORT) - tjnp = new(g)TDBJSN(tdp, new(g) UNZFAM(tdp)); -#else // !ZIP_SUPPORT - sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); - return NULL; -#endif // !ZIP_SUPPORT - } else if (tdp->Uri) { - if (tdp->Driver && toupper(*tdp->Driver) == 'C') { -#if defined(CMGO_SUPPORT) - tjnp = new(g) TDBJSN(tdp, new(g) CMGFAM(tdp)); -#else - sprintf(g->Message, "Mongo %s Driver not available", "C"); - return NULL; -#endif - } else if (tdp->Driver && toupper(*tdp->Driver) == 'J') { -#if defined(JAVA_SUPPORT) - tjnp = new(g) TDBJSN(tdp, new(g) JMGFAM(tdp)); -#else - sprintf(g->Message, "Mongo %s Driver not available", "Java"); - return NULL; -#endif - } else { // Driver not specified -#if defined(CMGO_SUPPORT) - tjnp = new(g) TDBJSN(tdp, new(g) CMGFAM(tdp)); -#elif defined(JAVA_SUPPORT) - tjnp = new(g) TDBJSN(tdp, new(g) JMGFAM(tdp)); -#else - sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); - return NULL; -#endif - } // endif Driver - - } else - tjnp = new(g) TDBJSN(tdp, new(g) DOSFAM(tdp)); - - tjnp->SetMode(MODE_READ); - - // Allocate the parse work memory - PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL)); - memset(G, 0, sizeof(GLOBAL)); - G->Sarea_Size = tdp->Lrecl * 10; - G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); - PlugSubSet(G, G->Sarea, G->Sarea_Size); - G->jump_level = 0; - tjnp->SetG(G); - - if (tjnp->OpenDB(g)) - return NULL; - - switch (tjnp->ReadDB(g)) { - case RC_EF: - strcpy(g->Message, "Void json table"); - case RC_FX: - goto err; - default: - jsp = tjnp->GetRow(); - } // endswitch ReadDB - - } // endif pretty - - if (!(row = (jsp) ? jsp->GetObject() : NULL)) { - strcpy(g->Message, "Can only retrieve columns from object rows"); - goto err; - } // endif row - - jcol.Next = NULL; - jcol.Found = true; - colname[64] = 0; - fmt[128] = 0; - - if (!tdp->Uri) { - *fmt = '$'; - fmt[1] = '.'; - p = fmt + 2; - } else - p = fmt; - - jrp = (PJPR*)PlugSubAlloc(g, NULL, sizeof(PJPR) * MY_MAX(lvl, 0)); - - /*********************************************************************/ - /* Analyse the JSON tree and define columns. */ - /*********************************************************************/ - for (i = 1; ; i++) { - for (jpp = row->GetFirst(); jpp; jpp = jpp->GetNext()) { - for (j = 0; j < lvl; j++) - jrp[j] = NULL; - - more: - strncpy(colname, jpp->GetKey(), 64); - *p = 0; - j = 0; - jvp = jpp->GetVal(); - - retry: - if ((valp = jvp ? jvp->GetValue() : NULL)) { - jcol.Type = valp->GetType(); - jcol.Len = valp->GetValLen(); - jcol.Scale = valp->GetValPrec(); - jcol.Cbn = valp->IsNull(); - } else if (!jvp || jvp->IsNull()) { - jcol.Type = TYPE_UNKNOWN; - jcol.Len = jcol.Scale = 0; - jcol.Cbn = true; - } else if (j < lvl) { - if (!*p) - strcat(fmt, colname); - - jsp = jvp->GetJson(); - - switch (jsp->GetType()) { - case TYPE_JOB: - if (!jrp[j]) - jrp[j] = jsp->GetFirst(); - - if (*jrp[j]->GetKey() != '$') { - strncat(strncat(fmt, sep, 128), jrp[j]->GetKey(), 128); - strncat(strncat(colname, "_", 64), jrp[j]->GetKey(), 64); - } // endif Key - - jvp = jrp[j]->GetVal(); - j++; - break; - case TYPE_JAR: - if (!tdp->Xcol || stricmp(tdp->Xcol, colname)) { - if (tdp->Uri) - strncat(strncat(fmt, sep, 128), "0", 128); - else - strncat(fmt, "[0]", 128); - - } else - strncat(fmt, (tdp->Uri ? sep : "[]"), 128); - - jvp = jsp->GetValue(0); - break; - default: - sprintf(g->Message, "Logical error after %s", fmt); - goto err; - } // endswitch jsp - - goto retry; - } else if (lvl >= 0) { - jcol.Type = TYPE_STRING; - jcol.Len = 256; - jcol.Scale = 0; - jcol.Cbn = true; - } else - continue; - - // Check whether this column was already found - for (jcp = fjcp; jcp; jcp = jcp->Next) - if (!strcmp(colname, jcp->Name)) - break; - - if (jcp) { - if (jcp->Type != jcol.Type) { - if (jcp->Type == TYPE_UNKNOWN) - jcp->Type = jcol.Type; - else if (jcol.Type != TYPE_UNKNOWN) - jcp->Type = TYPE_STRING; - - } // endif Type - - if (*p && (!jcp->Fmt || strlen(jcp->Fmt) < strlen(fmt))) { - jcp->Fmt = PlugDup(g, fmt); - length[7] = MY_MAX(length[7], strlen(fmt)); - } // endif fmt - - jcp->Len = MY_MAX(jcp->Len, jcol.Len); - jcp->Scale = MY_MAX(jcp->Scale, jcol.Scale); - jcp->Cbn |= jcol.Cbn; - jcp->Found = true; - } else if (jcol.Type != TYPE_UNKNOWN || tdp->Accept) { - // New column - jcp = (PJCL)PlugSubAlloc(g, NULL, sizeof(JCOL)); - *jcp = jcol; - jcp->Cbn |= (i > 1); - jcp->Name = PlugDup(g, colname); - length[0] = MY_MAX(length[0], strlen(colname)); - - if (*p) { - jcp->Fmt = PlugDup(g, fmt); - length[7] = MY_MAX(length[7], strlen(fmt)); - } else - jcp->Fmt = NULL; - - if (pjcp) { - jcp->Next = pjcp->Next; - pjcp->Next = jcp; - } else - fjcp = jcp; - - n++; - } // endif jcp - - pjcp = jcp; - - for (j = lvl - 1; j >= 0; j--) - if (jrp[j] && (jrp[j] = jrp[j]->GetNext())) - goto more; - - } // endfor jpp - - // Missing column can be null - for (jcp = fjcp; jcp; jcp = jcp->Next) { - jcp->Cbn |= !jcp->Found; - jcp->Found = false; - } // endfor jcp - - if (tdp->Pretty != 2) { - // Read next record - switch (tjnp->ReadDB(g)) { - case RC_EF: - jsp = NULL; - break; - case RC_FX: - goto err; - default: - jsp = tjnp->GetRow(); - } // endswitch ReadDB - - } else - jsp = tjsp->GetDoc()->GetValue(i); - - if (!(row = (jsp) ? jsp->GetObject() : NULL)) - break; - - } // endor i - - if (tdp->Pretty != 2) - tjnp->CloseDB(g); - skipit: if (trace(1)) htrc("JSONColumns: n=%d len=%d\n", n, length[0]); @@ -443,7 +121,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) /*********************************************************************/ /* Now get the results into blocks. */ /*********************************************************************/ - for (i = 0, jcp = fjcp; jcp; i++, jcp = jcp->Next) { + for (i = 0, jcp = pjdc->fjcp; jcp; i++, jcp = jcp->Next) { if (jcp->Type == TYPE_UNKNOWN) jcp->Type = TYPE_STRING; // Void column @@ -472,13 +150,381 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) /* Return the result pointer. */ /*********************************************************************/ return qrp; + } // end of JSONColumns + +/* -------------------------- Class JSONDISC ------------------------- */ + +/***********************************************************************/ +/* Class used to get the columns of a JSON table. */ +/***********************************************************************/ +JSONDISC::JSONDISC(PGLOBAL g, int *lg) +{ + length = lg; + jcp = fjcp = pjcp = NULL; + tjnp = NULL; + jpp = NULL; + tjsp = NULL; + jsp = NULL; + row = NULL; + sep = NULL; + i = n = bf = ncol = lvl = 0; + all = false; +} // end of JSONDISC constructor + +int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) +{ + bool mgo = (GetTypeID(topt->type) == TAB_MONGO); + PCSZ level = GetStringTableOption(g, topt, "Level", NULL); + + if (level) { + lvl = atoi(level); + lvl = (lvl > 16) ? 16 : lvl; + } else + lvl = 0; + + sep = GetStringTableOption(g, topt, "Separator", "."); + + /*********************************************************************/ + /* Open the input file. */ + /*********************************************************************/ + tdp = new(g) JSONDEF; +#if defined(ZIP_SUPPORT) + tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL); + tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false); +#endif // ZIP_SUPPORT + tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL); + + if (!(tdp->Database = SetPath(g, db))) + return NULL; + + tdp->Objname = GetStringTableOption(g, topt, "Object", NULL); + tdp->Base = GetIntegerTableOption(g, topt, "Base", 0) ? 1 : 0; + tdp->Pretty = GetIntegerTableOption(g, topt, "Pretty", 2); + tdp->Xcol = GetStringTableOption(g, topt, "Expand", NULL); + tdp->Accept = GetBooleanTableOption(g, topt, "Accept", false); + tdp->Uri = (dsn && *dsn ? dsn : NULL); + + if (!tdp->Fn && !tdp->Uri) { + strcpy(g->Message, MSG(MISSING_FNAME)); + return 0; + } // endif Fn + + if (trace(1)) + htrc("File %s objname=%s pretty=%d lvl=%d\n", + tdp->Fn, tdp->Objname, tdp->Pretty, lvl); + + if (tdp->Uri) { +#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) + tdp->Collname = GetStringTableOption(g, topt, "Name", NULL); + tdp->Collname = GetStringTableOption(g, topt, "Tabname", tdp->Collname); + tdp->Schema = GetStringTableOption(g, topt, "Dbname", "test"); + tdp->Options = (PSZ)GetStringTableOption(g, topt, "Colist", "all"); + tdp->Pipe = GetBooleanTableOption(g, topt, "Pipeline", false); + tdp->Driver = (PSZ)GetStringTableOption(g, topt, "Driver", NULL); + tdp->Version = GetIntegerTableOption(g, topt, "Version", 3); + tdp->Wrapname = (PSZ)GetStringTableOption(g, topt, "Wrapper", + (tdp->Version == 2) ? "Mongo2Interface" : "Mongo3Interface"); + tdp->Pretty = 0; +#else // !MONGO_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); + return 0; +#endif // !MONGO_SUPPORT + } // endif Uri + + if (tdp->Pretty == 2) { + if (tdp->Zipped) { +#if defined(ZIP_SUPPORT) + tjsp = new(g) TDBJSON(tdp, new(g) UNZFAM(tdp)); +#else // !ZIP_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return 0; +#endif // !ZIP_SUPPORT + } else + tjsp = new(g) TDBJSON(tdp, new(g) MAPFAM(tdp)); + + if (tjsp->MakeDocument(g)) + return NULL; + + jsp = (tjsp->GetDoc()) ? tjsp->GetDoc()->GetValue(0) : NULL; + } else { + if (!(tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0))) + if (!mgo) { + sprintf(g->Message, "LRECL must be specified for pretty=%d", tdp->Pretty); + return NULL; + } else + tdp->Lrecl = 8192; // Should be enough + + tdp->Ending = GetIntegerTableOption(g, topt, "Ending", CRLF); + + if (tdp->Zipped) { +#if defined(ZIP_SUPPORT) + tjnp = new(g)TDBJSN(tdp, new(g) UNZFAM(tdp)); +#else // !ZIP_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return NULL; +#endif // !ZIP_SUPPORT + } else if (tdp->Uri) { + if (tdp->Driver && toupper(*tdp->Driver) == 'C') { +#if defined(CMGO_SUPPORT) + tjnp = new(g) TDBJSN(tdp, new(g) CMGFAM(tdp)); +#else + sprintf(g->Message, "Mongo %s Driver not available", "C"); + return NULL; +#endif + } else if (tdp->Driver && toupper(*tdp->Driver) == 'J') { +#if defined(JAVA_SUPPORT) + tjnp = new(g) TDBJSN(tdp, new(g) JMGFAM(tdp)); +#else + sprintf(g->Message, "Mongo %s Driver not available", "Java"); + return NULL; +#endif + } else { // Driver not specified +#if defined(CMGO_SUPPORT) + tjnp = new(g) TDBJSN(tdp, new(g) CMGFAM(tdp)); +#elif defined(JAVA_SUPPORT) + tjnp = new(g) TDBJSN(tdp, new(g) JMGFAM(tdp)); +#else + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); + return NULL; +#endif + } // endif Driver + + } else + tjnp = new(g) TDBJSN(tdp, new(g) DOSFAM(tdp)); + + tjnp->SetMode(MODE_READ); + + // Allocate the parse work memory + PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL)); + memset(G, 0, sizeof(GLOBAL)); + G->Sarea_Size = tdp->Lrecl * 10; + G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); + PlugSubSet(G, G->Sarea, G->Sarea_Size); + G->jump_level = 0; + tjnp->SetG(G); + + if (tjnp->OpenDB(g)) + return NULL; + + switch (tjnp->ReadDB(g)) { + case RC_EF: + strcpy(g->Message, "Void json table"); + case RC_FX: + goto err; + default: + jsp = tjnp->GetRow(); + } // endswitch ReadDB + + } // endif pretty + + if (!(row = (jsp) ? jsp->GetObject() : NULL)) { + strcpy(g->Message, "Can only retrieve columns from object rows"); + goto err; + } // endif row + + all = GetBooleanTableOption(g, topt, "Fullarray", false); + jcol.Name = jcol.Fmt = NULL; + jcol.Next = NULL; + jcol.Found = true; + colname[0] = 0; + + if (!tdp->Uri) { + fmt[0] = '$'; + fmt[1] = '.'; + bf = 2; + } // endif Uri + + /*********************************************************************/ + /* Analyse the JSON tree and define columns. */ + /*********************************************************************/ + for (i = 1; ; i++) { + for (jpp = row->GetFirst(); jpp; jpp = jpp->GetNext()) { + strncpy(colname, jpp->GetKey(), 64); + fmt[bf] = 0; + + if (Find(g, jpp->GetVal(), MY_MIN(lvl, 0))) + goto err; + + } // endfor jpp + + // Missing column can be null + for (jcp = fjcp; jcp; jcp = jcp->Next) { + jcp->Cbn |= !jcp->Found; + jcp->Found = false; + } // endfor jcp + + if (tdp->Pretty != 2) { + // Read next record + switch (tjnp->ReadDB(g)) { + case RC_EF: + jsp = NULL; + break; + case RC_FX: + goto err; + default: + jsp = tjnp->GetRow(); + } // endswitch ReadDB + + } else + jsp = tjsp->GetDoc()->GetValue(i); + + if (!(row = (jsp) ? jsp->GetObject() : NULL)) + break; + + } // endfor i + + if (tdp->Pretty != 2) + tjnp->CloseDB(g); + + return n; err: - if (tdp->Pretty != 2) - tjnp->CloseDB(g); + if (tdp->Pretty != 2) + tjnp->CloseDB(g); + + return 0; +} // end of GetColumns + +bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, int j) +{ + char *p, *pc = colname + strlen(colname); + int ars; + PJOB job; + PJAR jar; + + if ((valp = jvp ? jvp->GetValue() : NULL)) { + jcol.Type = valp->GetType(); + jcol.Len = valp->GetValLen(); + jcol.Scale = valp->GetValPrec(); + jcol.Cbn = valp->IsNull(); + } else if (!jvp || jvp->IsNull()) { + jcol.Type = TYPE_UNKNOWN; + jcol.Len = jcol.Scale = 0; + jcol.Cbn = true; + } else if (j < lvl) { + if (!fmt[bf]) + strcat(fmt, colname); + + p = fmt + strlen(fmt); + jsp = jvp->GetJson(); + + switch (jsp->GetType()) { + case TYPE_JOB: + job = (PJOB)jsp; + + for (PJPR jrp = job->GetFirst(); jrp; jrp = jrp->GetNext()) { + if (*jrp->GetKey() != '$') { + strncat(strncat(fmt, sep, 128), jrp->GetKey(), 128); + strncat(strncat(colname, "_", 64), jrp->GetKey(), 64); + } // endif Key + + if (Find(g, jrp->GetVal(), j + 1)) + return true; + + *p = *pc = 0; + } // endfor jrp + + return false; + case TYPE_JAR: + jar = (PJAR)jsp; + + if (all || (tdp->Xcol && !stricmp(tdp->Xcol, colname))) + ars = jar->GetSize(false); + else + ars = MY_MIN(jar->GetSize(false), 1); + + for (int k = 0; k < ars; k++) { + if (!tdp->Xcol || stricmp(tdp->Xcol, colname)) { + sprintf(buf, "%d", k); + + if (tdp->Uri) + strncat(strncat(fmt, sep, 128), buf, 128); + else + strncat(strncat(strncat(fmt, "[", 128), buf, 128), "]", 128); + + if (all) + strncat(strncat(colname, "_", 64), buf, 64); + + } else + strncat(fmt, (tdp->Uri ? sep : "[*]"), 128); + + if (Find(g, jar->GetValue(k), j)) + return true; + + *p = *pc = 0; + } // endfor k + + return false; + default: + sprintf(g->Message, "Logical error after %s", fmt); + return true; + } // endswitch Type + + } else if (lvl >= 0) { + jcol.Type = TYPE_STRING; + jcol.Len = 256; + jcol.Scale = 0; + jcol.Cbn = true; + } else + return false; + + AddColumn(g); + return false; +} // end of Find + +void JSONDISC::AddColumn(PGLOBAL g) +{ + bool b = fmt[bf] != 0; // True if formatted + + // Check whether this column was already found + for (jcp = fjcp; jcp; jcp = jcp->Next) + if (!strcmp(colname, jcp->Name)) + break; + + if (jcp) { + if (jcp->Type != jcol.Type) { + if (jcp->Type == TYPE_UNKNOWN) + jcp->Type = jcol.Type; + else if (jcol.Type != TYPE_UNKNOWN) + jcp->Type = TYPE_STRING; + + } // endif Type + + if (b && (!jcp->Fmt || strlen(jcp->Fmt) < strlen(fmt))) { + jcp->Fmt = PlugDup(g, fmt); + length[7] = MY_MAX(length[7], strlen(fmt)); + } // endif fmt + + jcp->Len = MY_MAX(jcp->Len, jcol.Len); + jcp->Scale = MY_MAX(jcp->Scale, jcol.Scale); + jcp->Cbn |= jcol.Cbn; + jcp->Found = true; + } else if (jcol.Type != TYPE_UNKNOWN || tdp->Accept) { + // New column + jcp = (PJCL)PlugSubAlloc(g, NULL, sizeof(JCOL)); + *jcp = jcol; + jcp->Cbn |= (i > 1); + jcp->Name = PlugDup(g, colname); + length[0] = MY_MAX(length[0], strlen(colname)); + + if (b) { + jcp->Fmt = PlugDup(g, fmt); + length[7] = MY_MAX(length[7], strlen(fmt)); + } else + jcp->Fmt = NULL; + + if (pjcp) { + jcp->Next = pjcp->Next; + pjcp->Next = jcp; + } else + fjcp = jcp; + + n++; + } // endif jcp + + pjcp = jcp; +} // end of AddColumn - return NULL; - } // end of JSONColumns /* -------------------------- Class JSONDEF -------------------------- */ @@ -513,6 +559,7 @@ bool JSONDEF::DefineAM(PGLOBAL g, LPCSTR, int poff) Limit = GetIntCatInfo("Limit", 10); Base = GetIntCatInfo("Base", 0) ? 1 : 0; Sep = *GetStringCatInfo(g, "Separator", "."); + Accept = GetBoolCatInfo("Accept", false); if (Uri = GetStringCatInfo(g, "Connect", NULL)) { #if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) @@ -1471,6 +1518,9 @@ void JSONCOL::ReadColumn(PGLOBAL g) if (!Tjp->SameRow || Xnod >= Tjp->SameRow) Value->SetValue_pval(GetColumnValue(g, Tjp->Row, 0)); + if (Xpd && Value->IsNull() && !((PJDEF)Tjp->To_Def)->Accept) + throw("Null expandable JSON value"); + // Set null when applicable if (!Nullable) Value->SetNull(false); @@ -1546,11 +1596,16 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i) /***********************************************************************/ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) { - int ars; + int ars = MY_MIN(Tjp->Limit, arp->size()); PJVAL jvp; JVALUE jval; - ars = MY_MIN(Tjp->Limit, arp->size()); + if (!ars) { + Value->Reset(); + Value->SetNull(true); + Tjp->NextSame = 0; + return Value; + } // endif ars if (!(jvp = arp->GetValue((Nodes[n].Rx = Nodes[n].Nx)))) { strcpy(g->Message, "Logical error expanding array"); diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index 17583cba333..fb0ee786f74 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -15,6 +15,7 @@ enum JMODE {MODE_OBJECT, MODE_ARRAY, MODE_VALUE}; typedef class JSONDEF *PJDEF; typedef class TDBJSON *PJTDB; typedef class JSONCOL *PJCOL; +class TDBJSN; /***********************************************************************/ /* The JSON tree node. Can be an Object or an Array. */ @@ -29,6 +30,47 @@ typedef struct _jnode { int Nx; // Next to read row number } JNODE, *PJNODE; +typedef struct _jncol { + struct _jncol *Next; + char *Name; + char *Fmt; + int Type; + int Len; + int Scale; + bool Cbn; + bool Found; +} JCOL, *PJCL; + +/***********************************************************************/ +/* Class used to get the columns of a mongo collection. */ +/***********************************************************************/ +class JSONDISC : public BLOCK { +public: + // Constructor + JSONDISC(PGLOBAL g, int *lg); + + // Functions + int GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt); + bool Find(PGLOBAL g, PJVAL jvp, int j); + void AddColumn(PGLOBAL g); + + // Members + JCOL jcol; + PJCL jcp, fjcp, pjcp; + PVAL valp; + PJDEF tdp; + TDBJSN *tjnp; + PJTDB tjsp; + PJPR jpp; + PJSON jsp; + PJOB row; + PCSZ sep; + char colname[65], fmt[129], buf[16]; + int *length; + int i, n, bf, ncol, lvl; + bool all; +}; // end of JSONDISC + /***********************************************************************/ /* JSON table. */ /***********************************************************************/ @@ -36,13 +78,13 @@ class DllExport JSONDEF : public DOSDEF { /* Table description */ friend class TDBJSON; friend class TDBJSN; friend class TDBJCL; + friend class JSONDISC; #if defined(CMGO_SUPPORT) friend class CMGFAM; #endif // CMGO_SUPPORT #if defined(JAVA_SUPPORT) friend class JMGFAM; #endif // JAVA_SUPPORT - friend PQRYRES JSONColumns(PGLOBAL, PCSZ, PCSZ, PTOS, bool); public: // Constructor JSONDEF(void); From ac3fd5acac6b3717ce206e3e9ebf78204af06861 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Sat, 3 Feb 2018 22:01:30 +1100 Subject: [PATCH 007/139] debian: VCS is on github --- debian/dist/Debian/control | 4 ++-- debian/dist/Ubuntu/control | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/debian/dist/Debian/control b/debian/dist/Debian/control index e83ac1ffa5d..8c1173b8540 100644 --- a/debian/dist/Debian/control +++ b/debian/dist/Debian/control @@ -12,8 +12,8 @@ Build-Depends: procps | hurd, debhelper, libncurses5-dev (>= 5.0-6), ${CMAKE_DEP}libaio-dev, libjemalloc-dev (>= 3.0.0) Standards-Version: 3.8.3 Homepage: http://mariadb.org/ -Vcs-Browser: http://bazaar.launchpad.net/~maria-captains/maria/5.5/files -Vcs-Bzr: bzr://lp:maria +Vcs-Git: https://github.com/MariaDB/server.git +Vcs-Browser: https://github.com/MariaDB/server/ Package: libmariadbclient18 Section: libs diff --git a/debian/dist/Ubuntu/control b/debian/dist/Ubuntu/control index 94424f38db8..037c7bc1454 100644 --- a/debian/dist/Ubuntu/control +++ b/debian/dist/Ubuntu/control @@ -12,8 +12,8 @@ Build-Depends: procps | hurd, debhelper, libncurses5-dev (>= 5.0-6), ${CMAKE_DEP}libaio-dev, libjemalloc-dev (>= 3.0.0) Standards-Version: 3.8.2 Homepage: http://mariadb.org/ -Vcs-Browser: http://bazaar.launchpad.net/~maria-captains/maria/5.5/files -Vcs-Bzr: bzr://lp:maria +Vcs-Git: https://github.com/MariaDB/server.git +Vcs-Browser: https://github.com/MariaDB/server/ Package: libmariadbclient18 Section: libs From 175ce0e7f58794b5dfc4e64557072c75bffe58a4 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sun, 25 Feb 2018 14:31:28 +0100 Subject: [PATCH 008/139] - Remove warning on not used tabtyp variable in connect_assisted_discovery modified: storage/connect/ha_connect.cc - Fix a bug causing CONNECT to loop when expanding a JSON column when the expanded column value is null or void array. - Adding the FullArray option to JSON tables. - Skipping expanded JSON lines when the expanded column value is null. modified: storage/connect/connect.cc modified: storage/connect/tabdos.h modified: storage/connect/tabjson.cpp modified: storage/connect/tabjson.h - Fix MDEV-13353 No file privilege for ODBC, JDBC, MONGO, MAC, WMI file types. modified: storage/connect/ha_connect.cc - Make some Json UDFs to accept a non JSON item as 1st parameter. modified: storage/connect/json.cpp modified: storage/connect/json.h modified: storage/connect/jsonudf.cpp modified: storage/connect/jsonudf.h - Update Json UDF tests to cope with above changes. modified: storage/connect/mysql-test/connect/r/json_udf.result modified: storage/connect/mysql-test/connect/r/json_udf_bin.result modified: storage/connect/mysql-test/connect/r/vcol.result modified: storage/connect/mysql-test/connect/t/json_udf.test modified: storage/connect/mysql-test/connect/t/vcol.test - Fix some compiler warning treated as error PlugSubAlloc no more exported because it does throw. modified: storage/connect/global.h modified: storage/connect/ha_connect.cc modified: storage/connect/jsonudf.cpp modified: storage/connect/tabjson.cpp modified: storage/connect/tabjson.h - Other files modified (?) when going back to wrong merge modified: storage/connect/CMakeLists.txt modified: storage/connect/array.cpp modified: storage/connect/colblk.cpp modified: storage/connect/connect.cc modified: storage/connect/csort.cpp modified: storage/connect/domdoc.cpp modified: storage/connect/filamap.cpp modified: storage/connect/filamgz.cpp modified: storage/connect/filamtxt.cpp modified: storage/connect/filamzip.cpp modified: storage/connect/filter.cpp modified: storage/connect/fmdlex.c modified: storage/connect/jdbconn.cpp modified: storage/connect/macutil.cpp modified: storage/connect/myconn.cpp modified: storage/connect/odbconn.cpp modified: storage/connect/plgdbutl.cpp modified: storage/connect/plugutil.cpp modified: storage/connect/preparse.h modified: storage/connect/rcmsg.c modified: storage/connect/rcmsg.h modified: storage/connect/reldef.cpp modified: storage/connect/tabdos.cpp modified: storage/connect/tabfmt.cpp modified: storage/connect/tabmac.cpp modified: storage/connect/tabmul.cpp modified: storage/connect/tabmysql.cpp modified: storage/connect/tabmysql.h modified: storage/connect/tabodbc.cpp modified: storage/connect/tabtbl.cpp modified: storage/connect/tabxml.cpp modified: storage/connect/value.cpp modified: storage/connect/xobject.cpp --- storage/connect/CMakeLists.txt | 59 +- storage/connect/array.cpp | 2 +- storage/connect/colblk.cpp | 1 - storage/connect/connect.cc | 3 +- storage/connect/csort.cpp | 16 +- storage/connect/domdoc.cpp | 3 +- storage/connect/filamap.cpp | 12 +- storage/connect/filamgz.cpp | 8 +- storage/connect/filamtxt.cpp | 11 +- storage/connect/filamzip.cpp | 4 +- storage/connect/filter.cpp | 4 +- storage/connect/fmdlex.c | 26 +- storage/connect/global.h | 2 +- storage/connect/ha_connect.cc | 126 +-- storage/connect/jdbconn.cpp | 62 +- storage/connect/json.cpp | 35 +- storage/connect/json.h | 8 +- storage/connect/jsonudf.cpp | 834 ++++++++++++------ storage/connect/jsonudf.h | 4 + storage/connect/macutil.cpp | 4 +- storage/connect/myconn.cpp | 7 +- .../mysql-test/connect/r/json_udf.result | 10 +- .../mysql-test/connect/r/json_udf_bin.result | 3 +- .../connect/mysql-test/connect/r/vcol.result | 29 + .../mysql-test/connect/t/json_udf.test | 6 +- .../connect/mysql-test/connect/t/vcol.test | 31 + storage/connect/odbconn.cpp | 6 +- storage/connect/plgdbutl.cpp | 2 +- storage/connect/plugutil.cpp | 4 +- storage/connect/preparse.h | 2 +- storage/connect/rcmsg.c | 7 +- storage/connect/rcmsg.h | 2 +- storage/connect/reldef.cpp | 14 +- storage/connect/tabdos.cpp | 9 +- storage/connect/tabfmt.cpp | 6 +- storage/connect/tabjson.cpp | 4 +- storage/connect/tabjson.h | 4 +- storage/connect/tabmac.cpp | 4 +- storage/connect/tabmul.cpp | 4 +- storage/connect/tabmysql.cpp | 6 +- storage/connect/tabmysql.h | 2 +- storage/connect/tabodbc.cpp | 2 +- storage/connect/tabtbl.cpp | 2 +- storage/connect/tabxml.cpp | 2 +- storage/connect/value.cpp | 6 +- storage/connect/xobject.cpp | 5 +- 46 files changed, 899 insertions(+), 504 deletions(-) create mode 100644 storage/connect/mysql-test/connect/r/vcol.result create mode 100644 storage/connect/mysql-test/connect/t/vcol.test diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index 9cc9d16ba4d..58d9ab1f1c3 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -45,34 +45,17 @@ add_definitions( -DHUGE_SUPPORT -DGZ_SUPPORT ) # OS specific C flags, definitions and source files. # IF(UNIX) - if(CMAKE_CXX_COMPILER_ID MATCHES "GNU|Clang") - # Bar: -Wfatal-errors removed (does not present in gcc on solaris10) - if(WITH_WARNINGS) - add_definitions(-Wall -Wextra -Wmissing-declarations) - #message(STATUS "CONNECT: GCC: All warnings enabled") - else() - add_definitions(-Wall -Wmissing-declarations) - add_definitions(-Wno-write-strings) - add_definitions(-Wno-unused-variable) - # Bar: -Wno-unused-but-set-variables commented (does not present on sol10) - # add_definitions(-Wno-unused-but-set-variable) - add_definitions(-Wno-unused-value) - add_definitions(-Wno-unused-function) - add_definitions(-Wno-parentheses) - #add_definitions(-Wno-missing-declarations) - # Bar: -Wno-int-to-pointer-cast commended (does not present in gcc on sol10) - # add_definitions(-Wno-int-to-pointer-cast) - # Bar: -Wno-narrowing commented (does not present in gcc on solaris10) - # add_definitions(-Wno-narrowing) - -# This switch is for pure C only: -# add_definitions(-Wno-implicit-function-declaration) -# These switches are for C++ only -# add_definitions(-Wno-reorder) - - #message(STATUS "CONNECT: GCC: Some warnings disabled") - endif(WITH_WARNINGS) - endif() + MY_CHECK_AND_SET_COMPILER_FLAG("-Wall -Wmissing-declarations") + if(NOT WITH_WARNINGS) + MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-unused-function") + MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-unused-variable") + MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-unused-value") + MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-parentheses") + MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-strict-aliasing") + MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-misleading-indentation") + MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-format-truncation") + MY_CHECK_AND_SET_COMPILER_FLAG("-Wno-implicit-fallthrough") + endif(NOT WITH_WARNINGS) add_definitions( -DUNIX -DLINUX -DUBUNTU ) @@ -302,7 +285,7 @@ IF(CONNECT_WITH_MONGO) C:/mongo-c-driver/lib D:/mongo-c-driver/lib) ENDIF(WIN32) - FIND_PACKAGE(libmongoc-1.0 1.7) + FIND_PACKAGE(libmongoc-1.0 1.7 QUIET) IF (libmongoc-1.0_FOUND) INCLUDE_DIRECTORIES(${MONGOC_INCLUDE_DIRS}) SET(MONGOC_LIBRARY ${MONGOC_LIBRARIES}) @@ -363,6 +346,23 @@ IF(WIN32) DESTINATION ${INSTALL_PLUGINDIR} COMPONENT connect-engine) ENDIF(WIN32) +IF(NOT TARGET connect) + RETURN() +ENDIF() + +# Install some extra files that belong to connect engine +IF(WIN32) + # install ha_connect.lib + GET_TARGET_PROPERTY(CONNECT_LOCATION connect LOCATION) + STRING(REPLACE "dll" "lib" CONNECT_LIB ${CONNECT_LOCATION}) + IF(CMAKE_CONFIGURATION_TYPES) + STRING(REPLACE "${CMAKE_CFG_INTDIR}" "\${CMAKE_INSTALL_CONFIG_NAME}" + CONNECT_LIB ${CONNECT_LIB}) + ENDIF() + INSTALL(FILES ${CONNECT_LIB} + DESTINATION ${INSTALL_PLUGINDIR} COMPONENT connect-engine) +ENDIF(WIN32) + IF(CONNECT_WITH_JDBC AND JAVA_FOUND AND JNI_FOUND) # TODO: Find how to compile and install the java wrapper classes # Find required libraries and include directories @@ -373,4 +373,3 @@ IF(CONNECT_WITH_JDBC AND JAVA_FOUND AND JNI_FOUND) ${CMAKE_CURRENT_BINARY_DIR}/JdbcInterface.jar DESTINATION ${INSTALL_PLUGINDIR} COMPONENT connect-engine) ENDIF() - diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp index c779fcef816..cd1785b48ac 100644 --- a/storage/connect/array.cpp +++ b/storage/connect/array.cpp @@ -520,7 +520,7 @@ bool ARRAY::FilTest(PGLOBAL g, PVAL valp, OPVAL opc, int opm) } else if (opc != OP_EXIST) { sprintf(g->Message, MSG(MISSING_ARG), opc); - throw (int)TYPE_ARRAY; + throw (int)TYPE_ARRAY; } else // OP_EXIST return Nval > 0; diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp index fa5c29aff74..a9cf43f3d96 100644 --- a/storage/connect/colblk.cpp +++ b/storage/connect/colblk.cpp @@ -412,4 +412,3 @@ void SIDBLK::ReadColumn(PGLOBAL) // } // endif Sname } // end of ReadColumn - diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index d731f7d9838..39123b18c59 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -1,4 +1,5 @@ -/* Copyright (C) MariaDB Corporation Ab +/* Copyright (C) Olivier Bertrand 2004 - 2017 + Copyright (C) MariaDB Corporation Ab This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by diff --git a/storage/connect/csort.cpp b/storage/connect/csort.cpp index 13f325d8f3f..670131b8fd2 100644 --- a/storage/connect/csort.cpp +++ b/storage/connect/csort.cpp @@ -351,7 +351,7 @@ void CSORT::Qstx(int *base, int *max) zlo = zhi = cnm = 0; // Avoid warning message - lo = max - base; // Number of elements as longs + lo = (int)(max - base); // Number of elements as longs if (Dup) cnm = Cmpnum(lo); @@ -472,7 +472,7 @@ void CSORT::Qstx(int *base, int *max) i = him + 1; if (Pof) - Pof[him - Pex] = Pof[mid - Pex] = i - j; + Pof[him - Pex] = Pof[mid - Pex] = (int)(i - j); /*******************************************************************/ /* Look at sizes of the two partitions, do the smaller one first */ @@ -481,8 +481,8 @@ void CSORT::Qstx(int *base, int *max) /* But only repeat (recursively or by branching) if the partition */ /* is of at least size THRESH. */ /*******************************************************************/ - lo = j - base; - hi = max - i; + lo = (int)(j - base); + hi = (int)(max - i); if (Dup) { // Update progress information zlo = Cmpnum(lo); @@ -726,7 +726,7 @@ void CSORT::Qstc(int *base, int *max) zlo = zhi = cnm = 0; // Avoid warning message - lo = max - base; // Number of elements as longs + lo = (int)(max - base); // Number of elements as longs if (Dup) cnm = Cmpnum(lo); @@ -853,7 +853,7 @@ void CSORT::Qstc(int *base, int *max) /* the offset array values indicating break point and block size. */ /*******************************************************************/ if (Pof) - Pof[lt - Pex] = Pof[(jj - 1) - Pex] = jj - lt; + Pof[lt - Pex] = Pof[(jj - 1) - Pex] = (int)(jj - lt); /*******************************************************************/ /* Look at sizes of the two partitions, do the smaller one first */ @@ -862,8 +862,8 @@ void CSORT::Qstc(int *base, int *max) /* But only repeat (recursively or by branching) if the partition */ /* is of at least size THRESH. */ /*******************************************************************/ - lo = lt - base; - hi = gt - Swix; + lo = (int)(lt - base); + hi = (int)(gt - Swix); if (Dup) { // Update progress information zlo = Cmpnum(lo); diff --git a/storage/connect/domdoc.cpp b/storage/connect/domdoc.cpp index e24e10835c1..ba8eb829abd 100644 --- a/storage/connect/domdoc.cpp +++ b/storage/connect/domdoc.cpp @@ -13,6 +13,7 @@ #elif defined(MSX4) #import "msxml4.dll" //Causes error C2872: DOMNodeType: ambiguous symbol ?? #elif defined(MSX6) +#pragma warning(suppress : 4192) #import "msxml6.dll" //Causes error C2872: DOMNodeType: ambiguous symbol ?? #else // MSX4 #error MSX? is not defined @@ -540,7 +541,7 @@ PXNODE DOMNODE::AddChildNode(PGLOBAL g, PCSZ name, PXNODE np) // If name has the format m[n] only m is taken as node name if ((p = strchr(name, '['))) - pn = BufAlloc(g, name, p - name); + pn = BufAlloc(g, name, (int)(p - name)); else pn = name; diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp index 956a70578f5..6e71e1bf2cd 100644 --- a/storage/connect/filamap.cpp +++ b/storage/connect/filamap.cpp @@ -247,7 +247,7 @@ int MAPFAM::GetRowID(void) /***********************************************************************/ int MAPFAM::GetPos(void) { - return Fpos - Memory; + return (int)(Fpos - Memory); } // end of GetPos /***********************************************************************/ @@ -255,7 +255,7 @@ int MAPFAM::GetPos(void) /***********************************************************************/ int MAPFAM::GetNextPos(void) { - return Mempos - Memory; + return (int)(Mempos - Memory); } // end of GetNextPos /***********************************************************************/ @@ -368,7 +368,7 @@ int MAPFAM::ReadBuffer(PGLOBAL g) } // endif Mempos // Set caller line buffer - len = (Mempos - Fpos) - n; + len = (int)(Mempos - Fpos) - n; // Don't rely on ENDING setting if (len > 0 && *(Mempos - 2) == '\r') @@ -428,7 +428,7 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) /* not required here, just setting of future Spos and Tpos. */ /*******************************************************************/ Tpos = Spos = Fpos; - } else if ((n = Fpos - Spos) > 0) { + } else if ((n = (int)(Fpos - Spos)) > 0) { /*******************************************************************/ /* Non consecutive line to delete. Move intermediate lines. */ /*******************************************************************/ @@ -461,7 +461,7 @@ int MAPFAM::DeleteRecords(PGLOBAL g, int irc) /*****************************************************************/ /* Remove extra records. */ /*****************************************************************/ - n = Tpos - Memory; + n = (int)(Tpos - Memory); #if defined(__WIN__) DWORD drc = SetFilePointer(fp->Handle, n, NULL, FILE_BEGIN); @@ -627,7 +627,7 @@ int MBKFAM::ReadBuffer(PGLOBAL g) break; // Set caller line buffer - len = (Mempos - Fpos) - Ending; + len = (int)(Mempos - Fpos) - Ending; memcpy(Tdbp->GetLine(), Fpos, len); Tdbp->GetLine()[len] = '\0'; return RC_OK; diff --git a/storage/connect/filamgz.cpp b/storage/connect/filamgz.cpp index fccda772fea..880db54c91d 100644 --- a/storage/connect/filamgz.cpp +++ b/storage/connect/filamgz.cpp @@ -537,7 +537,7 @@ int ZBKFAM::ReadBuffer(PGLOBAL g) while (*NxtLine++ != '\n') ; // Set caller line buffer - n = NxtLine - CurLine - Ending; + n = (int)(NxtLine - CurLine - Ending); memcpy(Tdbp->GetLine(), CurLine, n); Tdbp->GetLine()[n] = '\0'; return RC_OK; @@ -588,7 +588,7 @@ int ZBKFAM::ReadBuffer(PGLOBAL g) for (NxtLine = CurLine; *NxtLine++ != '\n';) ; // Set caller line buffer - n = NxtLine - CurLine - Ending; + n = (int)(NxtLine - CurLine - Ending); memcpy(Tdbp->GetLine(), CurLine, n); Tdbp->GetLine()[n] = '\0'; Rbuf = (CurBlk == Block - 1) ? Last : Nrec; @@ -1087,7 +1087,7 @@ bool ZLBFAM::SetPos(PGLOBAL g, int pos __attribute__((unused))) /***********************************************************************/ int ZLBFAM::ReadBuffer(PGLOBAL g) { - int n; + size_t n; void *rdbuf; /*********************************************************************/ @@ -1299,7 +1299,7 @@ int ZLBFAM::WriteBuffer(PGLOBAL g) else NxtLine = CurLine + Lrecl; - BlkLen = NxtLine - To_Buf; + BlkLen = (int)(NxtLine - To_Buf); if (WriteCompressedBuffer(g)) { Closing = TRUE; // To tell CloseDB about a Write error diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index e6c9a627df0..7c222eb3c80 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -1351,7 +1351,7 @@ int BLKFAM::GetPos(void) /***********************************************************************/ int BLKFAM::GetNextPos(void) { - return Fpos + NxtLine - CurLine; + return (int)(Fpos + NxtLine - CurLine); } // end of GetNextPos /***********************************************************************/ @@ -1396,7 +1396,8 @@ int BLKFAM::SkipRecord(PGLOBAL, bool header) /***********************************************************************/ int BLKFAM::ReadBuffer(PGLOBAL g) { - int i, n, rc = RC_OK; + int i, rc = RC_OK; + size_t n; /*********************************************************************/ /* Sequential reading when Placed is not true. */ @@ -1458,7 +1459,7 @@ int BLKFAM::ReadBuffer(PGLOBAL g) // Read the entire next block n = fread(To_Buf, 1, (size_t)BlkLen, Stream); - if (n == BlkLen) { + if ((size_t) n == (size_t) BlkLen) { // ReadBlks++; num_read++; Rbuf = (CurBlk == Block - 1) ? Last : Nrec; @@ -1497,7 +1498,7 @@ int BLKFAM::ReadBuffer(PGLOBAL g) fin: // Store the current record file position for Delete and Update - Fpos = BlkPos[CurBlk] + CurLine - To_Buf; + Fpos = (int)(BlkPos[CurBlk] + CurLine - To_Buf); return rc; } // end of ReadBuffer @@ -1524,7 +1525,7 @@ int BLKFAM::WriteBuffer(PGLOBAL g) // Now start the writing process. NxtLine = CurLine + strlen(CurLine); - BlkLen = NxtLine - To_Buf; + BlkLen = (int)(NxtLine - To_Buf); if (fwrite(To_Buf, 1, BlkLen, Stream) != (size_t)BlkLen) { sprintf(g->Message, MSG(FWRITE_ERROR), strerror(errno)); diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index 53e30d0fa02..e76dc496246 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -748,7 +748,7 @@ UNZFAM::UNZFAM(PUNZFAM txfp) : MAPFAM(txfp) /***********************************************************************/ int UNZFAM::GetFileLength(PGLOBAL g) { - int len = (zutp && zutp->entryopen) ? Top - Memory + int len = (zutp && zutp->entryopen) ? (int)(Top - Memory) : TXTFAM::GetFileLength(g) * 3; if (trace(1)) @@ -1088,7 +1088,7 @@ int ZIPFAM::WriteBuffer(PGLOBAL g) // Prepare to write the new line strcat(strcpy(To_Buf, Tdbp->GetLine()), (Bin) ? CrLf : "\n"); - len = strchr(To_Buf, '\n') - To_Buf + 1; + len = (int)(strchr(To_Buf, '\n') - To_Buf + 1); return zutp->writeEntry(g, To_Buf, len); } // end of WriteBuffer diff --git a/storage/connect/filter.cpp b/storage/connect/filter.cpp index 47fead660fd..7082b082c67 100644 --- a/storage/connect/filter.cpp +++ b/storage/connect/filter.cpp @@ -84,7 +84,7 @@ BYTE OpBmp(PGLOBAL g, OPVAL opc) case OP_EXIST: bt = 0x00; break; default: sprintf(g->Message, MSG(BAD_FILTER_OP), opc); - throw (int)TYPE_ARRAY; + throw (int)TYPE_FILTER; } // endswitch opc return bt; @@ -1707,7 +1707,7 @@ PFIL PrepareFilter(PGLOBAL g, PFIL fp, bool having) break; // Remove eventual ending separator(s) // if (fp->Convert(g, having)) -// (int)throw TYPE_ARRAY; +// throw (int)TYPE_FILTER; filp = fp; fp = fp->Next; diff --git a/storage/connect/fmdlex.c b/storage/connect/fmdlex.c index 548a7ae5b7e..729b1b883c1 100644 --- a/storage/connect/fmdlex.c +++ b/storage/connect/fmdlex.c @@ -283,7 +283,7 @@ static void yy_fatal_error YY_PROTO(( const char msg[] )); */ #define YY_DO_BEFORE_ACTION \ yytext_ptr = yy_bp; \ - yyleng = yy_cp - yy_bp; \ + yyleng = (int)(yy_cp - yy_bp); \ yy_hold_char = *yy_cp; \ *yy_cp = '\0'; \ yy_c_buf_p = yy_cp; @@ -417,10 +417,10 @@ static PDTP pp; static void MakeParm(int n); static void MakeMMDD(int n); static void MakeAMPM(int n); -static void MakeIn(char *); -static void MakeOut(char *); -static void Quotin(char *); -static void Quotout(char *); +static void MakeIn(const char *); +static void MakeOut(const char *); +static void Quotin(const char *); +static void Quotout(const char *); /* Macros after this point can all be overridden by user definitions in * section 1. @@ -529,7 +529,7 @@ YY_DECL pp->Num = 0; if (pp->InFmt) {*pp->InFmt = '\0'; pp->InFmt[pp->Outsize -1] = '\0'; } if (pp->OutFmt) {*pp->OutFmt = '\0'; pp->OutFmt[pp->Outsize -1] = '\0'; } - pp->Curp = pp->Format; + pp->Curp = (char*) pp->Format; yy_init = 1; /* This is a new input */ @@ -695,7 +695,7 @@ case YY_STATE_EOF(dqt): case YY_END_OF_BUFFER: { /* Amount of text matched not including the EOB char. */ - int yy_amount_of_matched_text = yy_cp - yytext_ptr - 1; + int yy_amount_of_matched_text = (int)(yy_cp - yytext_ptr - 1); /* Undo the effects of YY_DO_BEFORE_ACTION. */ *yy_cp = yy_hold_char; @@ -862,7 +862,7 @@ static int yy_get_next_buffer() /* Try to read more data. */ /* First move last chars to start of buffer. */ - number_to_move = yy_c_buf_p - yytext_ptr; + number_to_move = (int)(yy_c_buf_p - yytext_ptr); for ( i = 0; i < number_to_move; ++i ) *(dest++) = *(source++); @@ -888,7 +888,7 @@ static int yy_get_next_buffer() /* just a shorter name for the current buffer */ YY_BUFFER_STATE b = yy_current_buffer; - int yy_c_buf_p_offset = yy_c_buf_p - b->yy_ch_buf; + int yy_c_buf_p_offset = (int)(yy_c_buf_p - b->yy_ch_buf); b->yy_buf_size *= 2; b->yy_ch_buf = (char *) @@ -1492,7 +1492,7 @@ void MakeAMPM(int n) } /* end of MakeAMPM */ -void MakeIn(char *text) +void MakeIn(const char *text) { if (!pp->InFmt) return; @@ -1500,14 +1500,14 @@ void MakeIn(char *text) strncat(pp->InFmt, text, (pp->Outsize - 1) - strlen(pp->InFmt)); } /* end of MakeIn */ -void MakeOut(char *text) +void MakeOut(const char *text) { if (!pp->OutFmt) return; strncat(pp->OutFmt, text, (pp->Outsize - 1) - strlen(pp->OutFmt)); } /* end of MakeOut */ -void Quotin(char *text) +void Quotin(const char *text) { if (!pp->InFmt) return; @@ -1516,7 +1516,7 @@ void Quotin(char *text) pp->InFmt[strlen(pp->InFmt)-1] = '\0'; } /* end of Quotin */ -void Quotout(char *text) +void Quotout(const char *text) { if (!pp->OutFmt) return; diff --git a/storage/connect/global.h b/storage/connect/global.h index 63d8782ee72..d8d03f606ba 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -220,7 +220,7 @@ DllExport BOOL PlugIsAbsolutePath(LPCSTR path); DllExport bool AllocSarea(PGLOBAL, uint); DllExport void FreeSarea(PGLOBAL); DllExport BOOL PlugSubSet(PGLOBAL, void *, uint); -DllExport void *PlugSubAlloc(PGLOBAL, void *, size_t); + void *PlugSubAlloc(PGLOBAL, void *, size_t); // Does throw DllExport char *PlugDup(PGLOBAL g, const char *str); DllExport void *MakePtr(void *, OFFSET); DllExport void htrc(char const *fmt, ...); diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 4c30938691f..2efed93ee2a 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -98,8 +98,8 @@ rnd_next signals that it has reached the end of its data. Calls to ha_connect::extra() are hints as to what will be occuring to the request. - Author Olivier Bertrand -*/ + Author Olivier Bertrand + */ #ifdef USE_PRAGMA_IMPLEMENTATION #pragma implementation // gcc: Class implementation @@ -425,7 +425,7 @@ handlerton *connect_hton= NULL; /* Function to export session variable values to other source files. */ /***********************************************************************/ uint GetTraceValue(void) - {return connect_hton ? THDVAR(current_thd, xtrace) : 0;} + {return (uint)(connect_hton ? THDVAR(current_thd, xtrace) : 0);} bool ExactInfo(void) {return THDVAR(current_thd, exact_info);} USETEMP UseTemp(void) {return (USETEMP)THDVAR(current_thd, use_tempfile);} int GetConvSize(void) {return THDVAR(current_thd, conv_size);} @@ -1107,55 +1107,55 @@ PCSZ GetListOption(PGLOBAL g, PCSZ opname, PCSZ oplist, PCSZ def) if (!oplist) return (char*)def; - char key[16], val[256]; - char *pv, *pn, *pk= (char*)oplist; - PCSZ opval= def; - int n; + char key[16], val[256]; + char *pv, *pn, *pk = (char*)oplist; + PCSZ opval = def; + int n; while (*pk == ' ') pk++; - for (; pk; pk= pn) { - pn= strchr(pk, ','); - pv= strchr(pk, '='); + for (; pk; pk = pn) { + pn = strchr(pk, ','); + pv = strchr(pk, '='); - if (pv && (!pn || pv < pn)) { + if (pv && (!pn || pv < pn)) { n = MY_MIN(static_cast(pv - pk), sizeof(key) - 1); memcpy(key, pk, n); while (n && key[n - 1] == ' ') n--; - key[n]= 0; + key[n] = 0; - while(*(++pv) == ' ') ; + while (*(++pv) == ' '); - n= MY_MIN((pn ? pn - pv : strlen(pv)), sizeof(val) - 1); - memcpy(val, pv, n); + n = MY_MIN((pn ? pn - pv : strlen(pv)), sizeof(val) - 1); + memcpy(val, pv, n); while (n && val[n - 1] == ' ') n--; - val[n]= 0; - } else { - n= MY_MIN((pn ? pn - pk : strlen(pk)), sizeof(key) - 1); - memcpy(key, pk, n); + val[n] = 0; + } else { + n = MY_MIN((pn ? pn - pk : strlen(pk)), sizeof(key) - 1); + memcpy(key, pk, n); while (n && key[n - 1] == ' ') n--; - key[n]= 0; - val[0]= 0; - } // endif pv + key[n] = 0; + val[0] = 0; + } // endif pv - if (!stricmp(opname, key)) { - opval= PlugDup(g, val); - break; - } else if (!pn) - break; + if (!stricmp(opname, key)) { + opval = PlugDup(g, val); + break; + } else if (!pn) + break; - while (*(++pn) == ' ') ; - } // endfor pk + while (*(++pn) == ' '); + } // endfor pk return opval; } // end of GetListOption @@ -4391,53 +4391,59 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn, bool quick) my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--secure-file-priv"); return true; } // endif path - } + + } // endif !quick + } else return false; - /* Fall through to check FILE_ACL */ - case TAB_ODBC: - case TAB_JDBC: + // Fall through case TAB_MYSQL: - case TAB_MONGO: case TAB_DIR: - case TAB_MAC: - case TAB_WMI: case TAB_ZIP: case TAB_OEM: #ifdef NO_EMBEDDED_ACCESS_CHECKS - return false; -#endif - /* - If table or table->mdl_ticket is NULL - it's a DLL, e.g. CREATE TABLE. - if the table has an MDL_EXCLUSIVE lock - it's a DDL too, e.g. the - insert step of CREATE ... SELECT. + return false; + #endif - Otherwise it's a DML, the table was normally opened, locked, - privilege were already checked, and table->grant.privilege is set. - With SQL SECURITY DEFINER, table->grant.privilege has definer's privileges. + /* + Check FILE_ACL + If table or table->mdl_ticket is NULL - it's a DLL, e.g. CREATE TABLE. + if the table has an MDL_EXCLUSIVE lock - it's a DDL too, e.g. the + insert step of CREATE ... SELECT. + + Otherwise it's a DML, the table was normally opened, locked, + privilege were already checked, and table->grant.privilege is set. + With SQL SECURITY DEFINER, table->grant.privilege has definer's privileges. + + Unless we're in prelocking mode, in this case table->grant.privilege + is only checked in start_stmt(), not in external_lock(). + */ + if (!table || !table->mdl_ticket || table->mdl_ticket->get_type() == MDL_EXCLUSIVE) + return check_access(thd, FILE_ACL, db, NULL, NULL, 0, 0); - Unless we're in prelocking mode, in this case table->grant.privilege - is only checked in start_stmt(), not in external_lock(). - */ - if (!table || !table->mdl_ticket || table->mdl_ticket->get_type() == MDL_EXCLUSIVE) - return check_access(thd, FILE_ACL, db, NULL, NULL, 0, 0); - if ((!quick && thd->lex->requires_prelocking()) || table->grant.privilege & FILE_ACL) - return false; - status_var_increment(thd->status_var.access_denied_errors); - my_error(access_denied_error_code(thd->password), MYF(0), - thd->security_ctx->priv_user, thd->security_ctx->priv_host, - (thd->password ? ER(ER_YES) : ER(ER_NO))); - return true; + if ((!quick && thd->lex->requires_prelocking()) || table->grant.privilege & FILE_ACL) + return false; - // This is temporary until a solution is found + status_var_increment(thd->status_var.access_denied_errors); + my_error(access_denied_error_code(thd->password), MYF(0), + thd->security_ctx->priv_user, thd->security_ctx->priv_host, + (thd->password ? ER(ER_YES) : ER(ER_NO))); + return true; + case TAB_ODBC: + case TAB_JDBC: + case TAB_MONGO: + case TAB_MAC: + case TAB_WMI: + return false; case TAB_TBL: case TAB_XCL: case TAB_PRX: case TAB_OCCUR: case TAB_PIVOT: case TAB_VIR: - return false; + // This is temporary until a solution is found + return false; } // endswitch type my_printf_error(ER_UNKNOWN_ERROR, "check_privileges failed", MYF(0)); @@ -5629,7 +5635,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, #endif // JAVA_SUPPORT case TAB_DBF: dbf = true; - // Passthru + // fall through case TAB_CSV: if (!fn && fnc != FNC_NO) sprintf(g->Message, "Missing %s file name", topt->type); diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index 4e0cf401ed4..ff84c75b67f 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -154,38 +154,38 @@ int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v) case 91: // DATE, YEAR type = TYPE_DATE; - if (!tn || toupper(tn[0]) != 'Y') { - len = 10; - v = 'D'; - } else { - len = 4; - v = 'Y'; - } // endif len + if (!tn || toupper(tn[0]) != 'Y') { + len = 10; + v = 'D'; + } else { + len = 4; + v = 'Y'; + } // endif len - break; - case 92: // TIME - type = TYPE_DATE; - len = 8 + ((prec) ? (prec+1) : 0); - v = 'T'; - break; - case -5: // BIGINT - type = TYPE_BIGINT; - break; - case 0: // NULL - case -2: // BINARY - case -4: // LONGVARBINARY - case 70: // DATALINK - case 2000: // JAVA_OBJECT - case 2001: // DISTINCT - case 2002: // STRUCT - case 2003: // ARRAY - case 2004: // BLOB - case 2005: // CLOB - case 2006: // REF - case 2009: // SQLXML - case 2011: // NCLOB - default: - type = TYPE_ERROR; + break; + case 92: // TIME + type = TYPE_DATE; + len = 8 + ((prec) ? (prec + 1) : 0); + v = 'T'; + break; + case -5: // BIGINT + type = TYPE_BIGINT; + break; + case 0: // NULL + case -2: // BINARY + case -4: // LONGVARBINARY + case 70: // DATALINK + case 2000: // JAVA_OBJECT + case 2001: // DISTINCT + case 2002: // STRUCT + case 2003: // ARRAY + case 2004: // BLOB + case 2005: // CLOB + case 2006: // REF + case 2009: // SQLXML + case 2011: // NCLOB + default: + type = TYPE_ERROR; len = 0; } // endswitch type diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index f6ed48c4d06..98a4659cea8 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -165,7 +165,7 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) }; // endswitch s[i] if (!jsp) - sprintf(g->Message, "Invalid Json string '%.*s'", 50, s); + sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN(len, 50), s); else if (ptyp && pretty == 3) { *ptyp = 3; // Not recognized pretty @@ -1015,6 +1015,20 @@ PJAR JOBJECT::GetKeyList(PGLOBAL g) return jarp; } // end of GetKeyList +/***********************************************************************/ +/* Return all values as an array. */ +/***********************************************************************/ +PJAR JOBJECT::GetValList(PGLOBAL g) +{ + PJAR jarp = new(g) JARRAY(); + + for (PJPR jpp = First; jpp; jpp = jpp->Next) + jarp->AddValue(g, jpp->GetVal()); + + jarp->InitArray(g); + return jarp; +} // end of GetValList + /***********************************************************************/ /* Get the value corresponding to the given key. */ /***********************************************************************/ @@ -1224,6 +1238,7 @@ PJVAL JARRAY::AddValue(PGLOBAL g, PJVAL jvp, int *x) Last->Next = jvp; Last = jvp; + Last->Next = NULL; } // endif x return jvp; @@ -1318,6 +1333,24 @@ bool JARRAY::IsNull(void) /* -------------------------- Class JVALUE- -------------------------- */ +/***********************************************************************/ +/* Constructor for a JSON. */ +/***********************************************************************/ +JVALUE::JVALUE(PJSON jsp) : JSON() +{ + if (jsp->GetType() == TYPE_JVAL) { + Jsp = jsp->GetJsp(); + Value = jsp->GetValue(); + } else { + Jsp = jsp; + Value = NULL; + } // endif Type + + Next = NULL; + Del = false; + Size = 1; +} // end of JVALUE constructor + /***********************************************************************/ /* Constructor for a Value with a given string or numeric value. */ /***********************************************************************/ diff --git a/storage/connect/json.h b/storage/connect/json.h index 375532212c4..dcc97287420 100644 --- a/storage/connect/json.h +++ b/storage/connect/json.h @@ -20,7 +20,8 @@ enum JTYP {TYPE_NULL = TYPE_VOID, TYPE_BINT = TYPE_BIGINT, TYPE_DTM = TYPE_DATE, TYPE_INTG = TYPE_INT, - TYPE_JSON = 12, + TYPE_VAL = 12, + TYPE_JSON, TYPE_JAR, TYPE_JOB, TYPE_JVAL}; @@ -157,6 +158,7 @@ class JSON : public BLOCK { //virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL) {X return NULL;} virtual PJPR AddPair(PGLOBAL g, PCSZ key) {X return NULL;} virtual PJAR GetKeyList(PGLOBAL g) {X return NULL;} + virtual PJAR GetValList(PGLOBAL g) {X return NULL;} virtual PJVAL GetValue(const char *key) {X return NULL;} virtual PJOB GetObject(void) {return NULL;} virtual PJAR GetArray(void) {return NULL;} @@ -205,6 +207,7 @@ class JOBJECT : public JSON { virtual PJOB GetObject(void) {return this;} virtual PJVAL GetValue(const char* key); virtual PJAR GetKeyList(PGLOBAL g); + virtual PJAR GetValList(PGLOBAL g); virtual PSZ GetText(PGLOBAL g, PSZ text); virtual bool Merge(PGLOBAL g, PJSON jsp); virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key); @@ -258,8 +261,7 @@ class JVALUE : public JSON { friend bool SerializeValue(JOUT *, PJVAL); public: JVALUE(void) : JSON() {Clear();} - JVALUE(PJSON jsp) : JSON() - {Jsp = jsp; Value = NULL; Next = NULL; Del = false; Size = 1;} + JVALUE(PJSON jsp); JVALUE(PGLOBAL g, PVAL valp); JVALUE(PGLOBAL g, PCSZ strp); diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 952cd76ef8d..e45846ea23b 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -1,6 +1,6 @@ /****************** jsonudf C++ Program Source Code File (.CPP) ******************/ -/* PROGRAM NAME: jsonudf Version 1.6 */ -/* (C) Copyright to the author Olivier BERTRAND 2015-2017 */ +/* PROGRAM NAME: jsonudf Version 1.7 */ +/* (C) Copyright to the author Olivier BERTRAND 2015-2018 */ /* This program are the JSON User Defined Functions . */ /*********************************************************************************/ @@ -31,16 +31,39 @@ bool IsNum(PSZ s); char *NextChr(PSZ s, char sep); char *GetJsonNull(void); uint GetJsonGrpSize(void); -static int IsJson(UDF_ARGS *args, uint i); +static int IsJson(UDF_ARGS *args, uint i, bool b = false); static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i); static char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error); static char *bin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error); +static PJSON JsonNew(PGLOBAL g, JTYP type); +static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp = NULL); +static PJSNX JsnxNew(PGLOBAL g, PJSON jsp, int type, int len = 64); static uint JsonGrpSize = 10; -/* ----------------------------------- JSNX ------------------------------------ */ +/*********************************************************************************/ +/* SubAlloc a new JSNX class with protection against memory exhaustion. */ +/*********************************************************************************/ +static PJSNX JsnxNew(PGLOBAL g, PJSON jsp, int type, int len) +{ + PJSNX jsx; + + try { + jsx = new(g) JSNX(g, jsp, type, len); + } catch (...) { + if (trace(1023)) + htrc("%s\n", g->Message); + + PUSH_WARNING(g->Message); + jsx = NULL; + } // end try/catch + + return jsx; +} /* end of JsnxNew */ + + /* ----------------------------------- JSNX ------------------------------------ */ /*********************************************************************************/ /* JSNX public constructor. */ @@ -81,21 +104,7 @@ my_bool JSNX::SetJpath(PGLOBAL g, char *path, my_bool jb) return true; Value->SetNullable(true); - -#if 0 - if (jb) { - // Path must return a Json item - size_t n = strlen(path); - - if (!n || path[n - 1] != '*') { - Jpath = (char*)PlugSubAlloc(g, NULL, n + 3); - strcat(strcpy(Jpath, path), (n) ? ":*" : "*"); - } else - Jpath = path; - - } else -#endif // 0 - Jpath = path; + Jpath = path; // Parse the json path Parsed = false; @@ -753,6 +762,7 @@ my_bool JSNX::WriteValue(PGLOBAL g, PJVAL jvalp) /*********************************************************************************/ PSZ JSNX::Locate(PGLOBAL g, PJSON jsp, PJVAL jvp, int k) { + PSZ str = NULL; my_bool b = false, err = true; g->Message[0] = 0; @@ -762,37 +772,47 @@ PSZ JSNX::Locate(PGLOBAL g, PJSON jsp, PJVAL jvp, int k) return NULL; } // endif jsp - // Write to the path string - Jp = new(g) JOUTSTR(g); - Jp->WriteChr('$'); - Jvalp = jvp; - K = k; + try { + // Write to the path string + Jp = new(g) JOUTSTR(g); + Jp->WriteChr('$'); + Jvalp = jvp; + K = k; - switch (jsp->GetType()) { - case TYPE_JAR: - err = LocateArray((PJAR)jsp); - break; - case TYPE_JOB: - err = LocateObject((PJOB)jsp); - break; - case TYPE_JVAL: - err = LocateValue((PJVAL)jsp); - break; - default: - err = true; + switch (jsp->GetType()) { + case TYPE_JAR: + err = LocateArray((PJAR)jsp); + break; + case TYPE_JOB: + err = LocateObject((PJOB)jsp); + break; + case TYPE_JVAL: + err = LocateValue((PJVAL)jsp); + break; + default: + err = true; } // endswitch Type - if (err) { - if (!g->Message[0]) - strcpy(g->Message, "Invalid json tree"); + if (err) { + if (!g->Message[0]) + strcpy(g->Message, "Invalid json tree"); - } else if (Found) { - Jp->WriteChr('\0'); - PlugSubAlloc(g, NULL, Jp->N); - return Jp->Strp; - } // endif's + } else if (Found) { + Jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, Jp->N); + str = Jp->Strp; + } // endif's - return NULL; + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + + PUSH_WARNING(g->Message); + } catch (const char *msg) { + strcpy(g->Message, msg); + } // end catch + + return str; } // end of Locate /*********************************************************************************/ @@ -864,53 +884,62 @@ my_bool JSNX::LocateValue(PJVAL jvp) /*********************************************************************************/ PSZ JSNX::LocateAll(PGLOBAL g, PJSON jsp, PJVAL jvp, int mx) { + PSZ str = NULL; my_bool b = false, err = true; - PJPN jnp = (PJPN)PlugSubAlloc(g, NULL, sizeof(JPN) * mx); - - memset(jnp, 0, sizeof(JPN) * mx); - g->Message[0] = 0; - + PJPN jnp; + if (!jsp) { strcpy(g->Message, "Null json tree"); return NULL; } // endif jsp - // Write to the path string - Jp = new(g)JOUTSTR(g); - Jvalp = jvp; - Imax = mx - 1; - Jpnp = jnp; - Jp->WriteChr('['); + try { + jnp = (PJPN)PlugSubAlloc(g, NULL, sizeof(JPN) * mx); + memset(jnp, 0, sizeof(JPN) * mx); + g->Message[0] = 0; - switch (jsp->GetType()) { - case TYPE_JAR: - err = LocateArrayAll((PJAR)jsp); - break; - case TYPE_JOB: - err = LocateObjectAll((PJOB)jsp); - break; - case TYPE_JVAL: - err = LocateValueAll((PJVAL)jsp); - break; - default: - err = true; - } // endswitch Type + // Write to the path string + Jp = new(g)JOUTSTR(g); + Jvalp = jvp; + Imax = mx - 1; + Jpnp = jnp; + Jp->WriteChr('['); - if (err) { - if (!g->Message[0]) + switch (jsp->GetType()) { + case TYPE_JAR: + err = LocateArrayAll((PJAR)jsp); + break; + case TYPE_JOB: + err = LocateObjectAll((PJOB)jsp); + break; + case TYPE_JVAL: + err = LocateValueAll((PJVAL)jsp); + break; + default: + err = true; + } // endswitch Type + + if (!err) { + if (Jp->N > 1) + Jp->N--; + + Jp->WriteChr(']'); + Jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, Jp->N); + str = Jp->Strp; + } else if (!g->Message[0]) strcpy(g->Message, "Invalid json tree"); - return NULL; - } else { - if (Jp->N > 1) - Jp->N--; + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); - Jp->WriteChr(']'); - Jp->WriteChr('\0'); - PlugSubAlloc(g, NULL, Jp->N); - return Jp->Strp; - } // endif's + PUSH_WARNING(g->Message); + } catch (const char *msg) { + strcpy(g->Message, msg); + } // end catch + return str; } // end of LocateAll /*********************************************************************************/ @@ -1137,6 +1166,72 @@ inline void JsonFreeMem(PGLOBAL g) PlugExit(g); } /* end of JsonFreeMem */ +/*********************************************************************************/ +/* SubAlloc a new JSON item with protection against memory exhaustion. */ +/*********************************************************************************/ +static PJSON JsonNew(PGLOBAL g, JTYP type) +{ + PJSON jsp = NULL; + + try { + switch (type) { + case TYPE_JAR: + jsp = new(g) JARRAY; + break; + case TYPE_JOB: + jsp = new(g) JOBJECT; + break; + default: + break; + } // endswitch type + + } catch (...) { + if (trace(1023)) + htrc("%s\n", g->Message); + + PUSH_WARNING(g->Message); + } // end try/catch + + return jsp; +} /* end of JsonNew */ + +/*********************************************************************************/ +/* SubAlloc a new JValue with protection against memory exhaustion. */ +/*********************************************************************************/ +static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp) +{ + PJVAL jvp = NULL; + + try { + if (!vp) + jvp = new (g) JVALUE; + else switch (type) { + case TYPE_JSON: + case TYPE_JVAL: + case TYPE_JAR: + case TYPE_JOB: + jvp = new(g) JVALUE((PJSON)vp); + break; + case TYPE_VAL: + jvp = new(g) JVALUE(g, (PVAL)vp); + break; + case TYPE_STRG: + jvp = new(g) JVALUE(g, (PCSZ)vp); + break; + default: + break; + } // endswitch type + + } catch (...) { + if (trace(1023)) + htrc("%s\n", g->Message); + + PUSH_WARNING(g->Message); + } // end try/catch + + return jvp; +} /* end of JsonNew */ + /*********************************************************************************/ /* Allocate and initialise the memory area. */ /*********************************************************************************/ @@ -1289,8 +1384,11 @@ static int *GetIntArgPtr(PGLOBAL g, UDF_ARGS *args, uint& n) for (uint i = n; i < args->arg_count; i++) if (args->arg_type[i] == INT_RESULT) { if (args->args[i]) { - x = (int*)PlugSubAlloc(g, NULL, sizeof(int)); - *x = (int)*(longlong*)args->args[i]; + if ((x = (int*)PlgDBSubAlloc(g, NULL, sizeof(int)))) + *x = (int)*(longlong*)args->args[i]; + else + PUSH_WARNING(g->Message); + } // endif args n = i + 1; @@ -1303,7 +1401,7 @@ static int *GetIntArgPtr(PGLOBAL g, UDF_ARGS *args, uint& n) /*********************************************************************************/ /* Returns not 0 if the argument is a JSON item or file name. */ /*********************************************************************************/ -static int IsJson(UDF_ARGS *args, uint i) +static int IsJson(UDF_ARGS *args, uint i, bool b) { int n = 0; @@ -1320,8 +1418,20 @@ static int IsJson(UDF_ARGS *args, uint i) else n = 2; // A file name may have been returned - } else if (!strnicmp(args->attributes[i], "Jfile_", 6)) + } else if (!strnicmp(args->attributes[i], "Jfile_", 6)) { n = 2; // arg is a json file name + } else if (b) { + char *sap; + PGLOBAL g = PlugInit(NULL, args->lengths[i] * M + 1024); + + JsonSubSet(g); + sap = MakePSZ(g, args, i); + + if (ParseJson(g, sap, strlen(sap))) + n = 4; + + JsonFreeMem(g); + } // endif's return n; } // end of IsJson @@ -1534,10 +1644,14 @@ static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i) { if (args->arg_count > (unsigned)i && args->args[i]) { int n = args->lengths[i]; - PSZ s = (PSZ)PlugSubAlloc(g, NULL, n + 1); + PSZ s = (PSZ)PlgDBSubAlloc(g, NULL, n + 1); + + if (s) { + memcpy(s, args->args[i], n); + s[n] = 0; + } else + PUSH_WARNING(g->Message); - memcpy(s, args->args[i], n); - s[n] = 0; return s; } else return NULL; @@ -1574,9 +1688,12 @@ static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i) return "Key"; if (!b) { - p = (PSZ)PlugSubAlloc(g, NULL, n + 1); - memcpy(p, s, n); - p[n] = 0; + if ((p = (PSZ)PlgDBSubAlloc(g, NULL, n + 1))) { + memcpy(p, s, n); + p[n] = 0; + } else + PUSH_WARNING(g->Message); + s = p; } // endif b @@ -1665,15 +1782,16 @@ static char *GetJsonFile(PGLOBAL g, char *fn) return NULL; } // endif len - str = (char*)PlugSubAlloc(g, NULL, len + 1); - - if ((n = read(h, str, len)) < 0) { - sprintf(g->Message, "Error %d reading %d bytes from %s", errno, len, fn); - return NULL; - } // endif n + if ((str = (char*)PlgDBSubAlloc(g, NULL, len + 1))) { + if ((n = read(h, str, len)) < 0) { + sprintf(g->Message, "Error %d reading %d bytes from %s", errno, len, fn); + return NULL; + } // endif n - str[n] = 0; - close(h); + str[n] = 0; + close(h); + } // endif str + return str; } // end of GetJsonFile @@ -1759,6 +1877,41 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL) return jvp; } // end of MakeValue +/*********************************************************************************/ +/* Try making a JSON value of the passed type from the passed argument. */ +/*********************************************************************************/ +static PJVAL MakeTypedValue(PGLOBAL g, UDF_ARGS *args, uint i, + JTYP type, PJSON *top = NULL) +{ + char *sap; + PJSON jsp; + PJVAL jvp = MakeValue(g, args, i, top); + + //if (type == TYPE_JSON) { + // if (jvp->GetValType() >= TYPE_JSON) + // return jvp; + + //} else if (jvp->GetValType() == type) + // return jvp; + + if (jvp->GetValType() == TYPE_STRG) { + sap = jvp->GetString(g); + + if ((jsp = ParseJson(g, sap, strlen(sap)))) { + if ((type == TYPE_JSON && jsp->GetType() != TYPE_JVAL) || jsp->GetType() == type) { + if (top) + *top = jsp; + + jvp->SetValue(jsp); + } // endif Type + + } // endif jsp + + } // endif Type + + return jvp; +} // end of MakeTypedValue + /*********************************************************************************/ /* Make a Json value containing the parameter. */ /*********************************************************************************/ @@ -1861,9 +2014,9 @@ my_bool json_array_add_values_init(UDF_INIT *initid, UDF_ARGS *args, char *messa if (args->arg_count < 2) { strcpy(message, "This function must have at least 2 arguments"); return true; - } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { - strcpy(message, "First argument must be a json string or item"); - return true; + //} else if (!IsJson(args, 0, true)) { + // strcpy(message, "First argument must be a valid json string or item"); + // return true; } else CalcLen(args, false, reslen, memlen); @@ -1891,23 +2044,14 @@ char *json_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, args->arg_count, true)) { - char *p; PJSON top; PJAR arp; - PJVAL jvp = MakeValue(g, args, 0, &top); + PJVAL jvp = MakeTypedValue(g, args, 0, TYPE_JAR, &top); - if ((p = jvp->GetString(g))) { - if (!(top = ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - return NULL; - } // endif jsp - - jvp->SetValue(top); - } // endif p - if (jvp->GetValType() != TYPE_JAR) { arp = new(g)JARRAY; arp->AddValue(g, jvp); + top = arp; } else arp = jvp->GetArray(); @@ -1915,7 +2059,6 @@ char *json_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result, arp->AddValue(g, MakeValue(g, args, i)); arp->InitArray(g); -// str = Serialize(g, arp, NULL, 0); str = MakeResult(g, args, top, args->arg_count); } // endif CheckMemory @@ -1952,10 +2095,10 @@ my_bool json_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) if (args->arg_count < 2) { strcpy(message, "This function must have at least 2 arguments"); - return true; - } else if (!IsJson(args, 0)) { - strcpy(message, "First argument must be a json item"); - return true; + return true; + //} else if (!IsJson(args, 0, true)) { + // strcpy(message, "First argument is not a valid Json item"); + // return true; } else CalcLen(args, false, reslen, memlen, true); @@ -1994,22 +2137,38 @@ char *json_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, PJVAL jvp; PJAR arp; - jvp = MakeValue(g, args, 0, &top); + jvp = MakeTypedValue(g, args, 0, TYPE_JSON, &top); jsp = jvp->GetJson(); x = GetIntArgPtr(g, args, n); if (CheckPath(g, args, jsp, jvp, 2)) PUSH_WARNING(g->Message); - else if (jvp && jvp->GetValType() == TYPE_JAR) { + else if (jvp) { PGLOBAL gb = GetMemPtr(g, args, 0); - arp = jvp->GetArray(); - arp->AddValue(gb, MakeValue(gb, args, 1), x); - arp->InitArray(gb); - str = MakeResult(g, args, top, n); + if (jvp->GetValType() != TYPE_JAR) { + if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) { + arp->AddValue(gb, JvalNew(gb, TYPE_JVAL, jvp)); + jvp->SetValue(arp); + + if (!top) + top = arp; + + } // endif arp + + } else + arp = jvp->GetArray(); + + if (arp) { + arp->AddValue(gb, MakeValue(gb, args, 1), x); + arp->InitArray(gb); + str = MakeResult(g, args, top, n); + } else + PUSH_WARNING(gb->Message); + } else { - PUSH_WARNING("First argument target is not an array"); -// if (g->Mrr) *error = 1; (only if no path) + PUSH_WARNING("Target is not an array"); + // if (g->Mrr) *error = 1; (only if no path) } // endif jvp } // endif CheckMemory @@ -2048,9 +2207,6 @@ my_bool json_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message) if (args->arg_count < 2) { strcpy(message, "This function must have at least 2 arguments"); return true; - } else if (!IsJson(args, 0)) { - strcpy(message, "First argument must be a json item"); - return true; } else CalcLen(args, false, reslen, memlen, true); @@ -2087,7 +2243,7 @@ char *json_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, uint n = 1; PJSON top; PJAR arp; - PJVAL jvp = MakeValue(g, args, 0, &top); + PJVAL jvp = MakeTypedValue(g, args, 0, TYPE_JSON, &top); if (!(x = GetIntArgPtr(g, args, n))) PUSH_WARNING("Missing or null array index"); @@ -2186,9 +2342,14 @@ long long jsonsum_int(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *err if (g->N) { // Keep result of constant function - long long *np = (long long*)PlugSubAlloc(g, NULL, sizeof(long long)); - *np = n; - g->Activityp = (PACTIVITY)np; + long long *np; + + if ((np = (long long*)PlgDBSubAlloc(g, NULL, sizeof(long long)))) { + *np = n; + g->Activityp = (PACTIVITY)np; + } else + PUSH_WARNING(g->Message); + } // endif const_item return n; @@ -2252,13 +2413,21 @@ double jsonsum_real(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *error } else { *error = 1; n = -1.0; - } // end of CheckMemory + } // endif CheckMemory if (g->N) { // Keep result of constant function - double *np = (double*)PlugSubAlloc(g, NULL, sizeof(double)); - *np = n; - g->Activityp = (PACTIVITY)np; + double *np; + + if ((np = (double*)PlgDBSubAlloc(g, NULL, sizeof(double)))) { + *np = n; + g->Activityp = (PACTIVITY)np; + } else { + PUSH_WARNING(g->Message); + *error = 1; + n = -1.0; + } // endif np + } // endif const_item return n; @@ -2312,13 +2481,20 @@ double jsonavg_real(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *error } else { *error = 1; n = -1.0; - } // end of CheckMemory + } // endif CheckMemory if (g->N) { // Keep result of constant function - double *np = (double*)PlugSubAlloc(g, NULL, sizeof(double)); - *np = n; - g->Activityp = (PACTIVITY)np; + double *np; + + if ((np = (double*)PlgDBSubAlloc(g, NULL, sizeof(double)))) { + *np = n; + g->Activityp = (PACTIVITY)np; + } else { + *error = 1; + n = -1.0; + } // endif np + } // endif const_item return n; @@ -2348,12 +2524,15 @@ char *json_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, args->arg_count, false, false, true)) { - PJOB objp = new(g)JOBJECT; + PJOB objp; + + if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i++) + objp->SetValue(g, MakeValue(g, args, i), MakeKey(g, args, i)); - for (uint i = 0; i < args->arg_count; i++) - objp->SetValue(g, MakeValue(g, args, i), MakeKey(g, args, i)); + str = Serialize(g, objp, NULL, 0); + } // endif objp - str = Serialize(g, objp, NULL, 0); } // endif CheckMemory if (!str) @@ -2394,13 +2573,16 @@ char *json_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, args->arg_count, false, true)) { PJVAL jvp; - PJOB objp = new(g)JOBJECT; + PJOB objp; + + if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i++) + if (!(jvp = MakeValue(g, args, i))->IsNull()) + objp->SetValue(g, jvp, MakeKey(g, args, i)); - for (uint i = 0; i < args->arg_count; i++) - if (!(jvp = MakeValue(g, args, i))->IsNull()) - objp->SetValue(g, jvp, MakeKey(g, args, i)); + str = Serialize(g, objp, NULL, 0); + } // endif objp - str = Serialize(g, objp, NULL, 0); } // endif CheckMemory if (!str) @@ -2444,12 +2626,15 @@ char *json_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, args->arg_count, false, true)) { - PJOB objp = new(g)JOBJECT; + PJOB objp; - for (uint i = 0; i < args->arg_count; i += 2) - objp->SetValue(g, MakeValue(g, args, i+1), MakePSZ(g, args, i)); + if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i += 2) + objp->SetValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i)); + + str = Serialize(g, objp, NULL, 0); + } // endif objp - str = Serialize(g, objp, NULL, 0); } // endif CheckMemory if (!str) @@ -2731,6 +2916,82 @@ void json_object_list_deinit(UDF_INIT* initid) JsonFreeMem((PGLOBAL)initid->ptr); } // end of json_object_list_deinit +/*********************************************************************************/ +/* Returns an array of the Json object values. */ +/*********************************************************************************/ +my_bool json_object_values_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count != 1) { + strcpy(message, "This function must have 1 argument"); + return true; + } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "Argument must be a json object"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of json_object_list_init + +char *json_object_values(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->N) { + if (!CheckMemory(g, initid, args, 1, true, true)) { + char *p; + PJSON jsp; + PJVAL jvp = MakeValue(g, args, 0); + + if ((p = jvp->GetString(g))) { + if (!(jsp = ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + return NULL; + } // endif jsp + + } else + jsp = jvp->GetJson(); + + if (jsp->GetType() == TYPE_JOB) { + PJAR jarp = ((PJOB)jsp)->GetValList(g); + + if (!(str = Serialize(g, jarp, NULL, 0))) + PUSH_WARNING(g->Message); + + } else { + PUSH_WARNING("First argument is not an object"); + if (g->Mrr) *error = 1; + } // endif jvp + + } // endif CheckMemory + + if (initid->const_item) { + // Keep result of constant function + g->Xchk = str; + g->N = 1; // str can be NULL + } // endif const_item + + } else + str = (char*)g->Xchk; + + if (!str) { + *is_null = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of json_object_values + +void json_object_values_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of json_object_values_deinit + /*********************************************************************************/ /* Set the value of JsonGrpSize. */ /*********************************************************************************/ @@ -2795,7 +3056,7 @@ my_bool json_array_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) PGLOBAL g = (PGLOBAL)initid->ptr; PlugSubSet(g, g->Sarea, g->Sarea_Size); - g->Activityp = (PACTIVITY)new(g) JARRAY; + g->Activityp = (PACTIVITY)JsonNew(g, TYPE_JAR); g->N = (int)n; return false; } // end of json_array_grp_init @@ -2805,7 +3066,7 @@ void json_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*) PGLOBAL g = (PGLOBAL)initid->ptr; PJAR arp = (PJAR)g->Activityp; - if (g->N-- > 0) + if (arp && g->N-- > 0) arp->AddValue(g, MakeValue(g, args, 0)); } // end of json_array_grp_add @@ -2820,12 +3081,16 @@ char *json_array_grp(UDF_INIT *initid, UDF_ARGS *, char *result, if (g->N < 0) PUSH_WARNING("Result truncated to json_grp_size values"); - arp->InitArray(g); + if (arp) { + arp->InitArray(g); + str = Serialize(g, arp, NULL, 0); + } else + str = NULL; - if (!(str = Serialize(g, arp, NULL, 0))) - str = strcpy(result, g->Message); + if (!str) + str = strcpy(result, g->Message); - *res_length = strlen(str); + *res_length = strlen(str); return str; } // end of json_array_grp @@ -2834,8 +3099,8 @@ void json_array_grp_clear(UDF_INIT *initid, char*, char*) PGLOBAL g = (PGLOBAL)initid->ptr; PlugSubSet(g, g->Sarea, g->Sarea_Size); - g->Activityp = (PACTIVITY)new(g) JARRAY; - g->N = GetJsonGroupSize(); + g->Activityp = (PACTIVITY)JsonNew(g, TYPE_JAR); + g->N = GetJsonGroupSize(); } // end of json_array_grp_clear void json_array_grp_deinit(UDF_INIT* initid) @@ -2868,7 +3133,7 @@ my_bool json_object_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) PGLOBAL g = (PGLOBAL)initid->ptr; PlugSubSet(g, g->Sarea, g->Sarea_Size); - g->Activityp = (PACTIVITY)new(g) JOBJECT; + g->Activityp = (PACTIVITY)JsonNew(g, TYPE_JOB); g->N = (int)n; return false; } // end of json_object_grp_init @@ -2893,7 +3158,7 @@ char *json_object_grp(UDF_INIT *initid, UDF_ARGS *, char *result, if (g->N < 0) PUSH_WARNING("Result truncated to json_grp_size values"); - if (!(str = Serialize(g, objp, NULL, 0))) + if (!objp || !(str = Serialize(g, objp, NULL, 0))) str = strcpy(result, g->Message); *res_length = strlen(str); @@ -2905,8 +3170,8 @@ void json_object_grp_clear(UDF_INIT *initid, char*, char*) PGLOBAL g = (PGLOBAL)initid->ptr; PlugSubSet(g, g->Sarea, g->Sarea_Size); - g->Activityp = (PACTIVITY)new(g) JOBJECT; - g->N = GetJsonGroupSize(); + g->Activityp = (PACTIVITY)JsonNew(g, TYPE_JOB); + g->N = GetJsonGroupSize(); } // end of json_object_grp_clear void json_object_grp_deinit(UDF_INIT* initid) @@ -3051,7 +3316,7 @@ my_bool json_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) char *json_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *) { - char *p, *path, *str = NULL; + char *path, *str = NULL; PJSON jsp; PJVAL jvp; PJSNX jsx; @@ -3067,17 +3332,10 @@ char *json_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, if (CheckMemory(g, initid, args, 1, true, true)) { PUSH_WARNING("CheckMemory error"); goto fin; - } else - jvp = MakeValue(g, args, 0); + } // endif CheckMemory - if ((p = jvp->GetString(g))) { - if (!(jsp = ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - return NULL; - } // endif jsp - - } else - jsp = jvp->GetJson(); + jvp = MakeTypedValue(g, args, 0, TYPE_JSON); + jsp = jvp->GetJson(); if (g->Mrr) { // First argument is a constant g->Xchk = jsp; @@ -3088,9 +3346,9 @@ char *json_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, jsp = (PJSON)g->Xchk; path = MakePSZ(g, args, 1); - jsx = new(g) JSNX(g, jsp, TYPE_STRING, initid->max_length); + jsx = JsnxNew(g, jsp, TYPE_STRING, initid->max_length); - if (jsx->SetJpath(g, path, true)) { + if (!jsx || jsx->SetJpath(g, path, true)) { PUSH_WARNING(g->Message); *is_null = 1; return NULL; @@ -3203,9 +3461,9 @@ char *jsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, jsp = (PJSON)g->Xchk; path = MakePSZ(g, args, 1); - jsx = new(g) JSNX(g, jsp, TYPE_STRING, initid->max_length); + jsx = JsnxNew(g, jsp, TYPE_STRING, initid->max_length); - if (jsx->SetJpath(g, path)) { + if (!jsx || jsx->SetJpath(g, path)) { PUSH_WARNING(g->Message); goto err; } // endif SetJpath @@ -3320,9 +3578,9 @@ long long jsonget_int(UDF_INIT *initid, UDF_ARGS *args, jsp = (PJSON)g->Xchk; path = MakePSZ(g, args, 1); - jsx = new(g) JSNX(g, jsp, TYPE_BIGINT); + jsx = JsnxNew(g, jsp, TYPE_BIGINT); - if (jsx->SetJpath(g, path)) { + if (!jsx || jsx->SetJpath(g, path)) { PUSH_WARNING(g->Message); *is_null = 1; return 0; @@ -3339,9 +3597,14 @@ long long jsonget_int(UDF_INIT *initid, UDF_ARGS *args, if (initid->const_item) { // Keep result of constant function - long long *np = (long long*)PlugSubAlloc(g, NULL, sizeof(long long)); - *np = n; - g->Activityp = (PACTIVITY)np; + long long *np = (long long*)PlgDBSubAlloc(g, NULL, sizeof(long long)); + + if (np) { + *np = n; + g->Activityp = (PACTIVITY)np; + } else + PUSH_WARNING(g->Message); + } // endif const_item return n; @@ -3434,9 +3697,9 @@ double jsonget_real(UDF_INIT *initid, UDF_ARGS *args, jsp = (PJSON)g->Xchk; path = MakePSZ(g, args, 1); - jsx = new(g) JSNX(g, jsp, TYPE_DOUBLE); + jsx = JsnxNew(g, jsp, TYPE_DOUBLE); - if (jsx->SetJpath(g, path)) { + if (!jsx || jsx->SetJpath(g, path)) { PUSH_WARNING(g->Message); *is_null = 1; return 0.0; @@ -3453,9 +3716,17 @@ double jsonget_real(UDF_INIT *initid, UDF_ARGS *args, if (initid->const_item) { // Keep result of constant function - double *dp = (double*)PlugSubAlloc(g, NULL, sizeof(double)); - *dp = d; - g->Activityp = (PACTIVITY)dp; + double *dp; + + if ((dp = (double*)PlgDBSubAlloc(g, NULL, sizeof(double)))) { + *dp = d; + g->Activityp = (PACTIVITY)dp; + } else { + PUSH_WARNING(g->Message); + *is_null = 1; + return 0.0; + } // endif dp + } // endif const_item return d; @@ -3501,7 +3772,7 @@ my_bool jsonlocate_init(UDF_INIT *initid, UDF_ARGS *args, char *message) char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { - char *p, *path = NULL; + char *path = NULL; int k; PJVAL jvp, jvp2; PJSON jsp; @@ -3529,16 +3800,20 @@ char *jsonlocate(UDF_INIT *initid, UDF_ARGS *args, char *result, *error = 1; goto err; } else - jvp = MakeValue(g, args, 0); + jvp = MakeTypedValue(g, args, 0, TYPE_JSON); - if ((p = jvp->GetString(g))) { - if (!(jsp = ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - goto err; - } // endif jsp + //if ((p = jvp->GetString(g))) { + // if (!(jsp = ParseJson(g, p, strlen(p)))) { + // PUSH_WARNING(g->Message); + // goto err; + // } // endif jsp + //} else + // jsp = jvp->GetJson(); - } else - jsp = jvp->GetJson(); + if (!(jsp = jvp->GetJson())) { + PUSH_WARNING("First argument is not a valid JSON item"); + goto err; + } // endif jsp if (g->Mrr) { // First argument is a constant g->Xchk = jsp; @@ -3845,9 +4120,9 @@ long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result, jsp = (PJSON)g->Xchk; path = MakePSZ(g, args, 1); - jsx = new(g)JSNX(g, jsp, TYPE_BIGINT); + jsx = JsnxNew(g, jsp, TYPE_BIGINT); - if (jsx->SetJpath(g, path)) { + if (!jsx || jsx->SetJpath(g, path)) { PUSH_WARNING(g->Message); goto err; } // endif SetJpath @@ -3856,9 +4131,14 @@ long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result, if (initid->const_item) { // Keep result of constant function - long long *np = (long long*)PlugSubAlloc(g, NULL, sizeof(long long)); - *np = n; - g->Activityp = (PACTIVITY)np; + long long *np = (long long*)PlgDBSubAlloc(g, NULL, sizeof(long long)); + + if (np) { + *np = n; + g->Activityp = (PACTIVITY)np; + } else + PUSH_WARNING(g->Message); + } // endif const_item return n; @@ -4335,18 +4615,23 @@ char *jbin_array(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!bsp || bsp->Changed) { if (!CheckMemory(g, initid, args, args->arg_count, false)) { - PJAR arp = new(g) JARRAY; + PJAR arp; - bsp = JbinAlloc(g, args, initid->max_length, arp); - strcat(bsp->Msg, " array"); + if ((arp = (PJAR)JsonNew(g, TYPE_JAR)) && + (bsp = JbinAlloc(g, args, initid->max_length, arp))) { + strcat(bsp->Msg, " array"); - for (uint i = 0; i < args->arg_count; i++) - arp->AddValue(g, MakeValue(g, args, i)); + for (uint i = 0; i < args->arg_count; i++) + arp->AddValue(g, MakeValue(g, args, i)); + + arp->InitArray(g); + } // endif arp && bsp - arp->InitArray(g); } else - if ((bsp = JbinAlloc(g, args, initid->max_length, NULL))) - strncpy(bsp->Msg, g->Message, 139); + bsp = NULL; + + if (!bsp && (bsp = JbinAlloc(g, args, initid->max_length, NULL))) + strncpy(bsp->Msg, g->Message, 139); // Keep result of constant function g->Xchk = (initid->const_item) ? bsp : NULL; @@ -4377,9 +4662,6 @@ my_bool jbin_array_add_values_init(UDF_INIT *initid, UDF_ARGS *args, char *messa if (args->arg_count < 2) { strcpy(message, "This function must have at least 2 arguments"); return true; - } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { - strcpy(message, "First argument must be a json string or item"); - return true; } else CalcLen(args, false, reslen, memlen); @@ -4394,24 +4676,17 @@ char *jbin_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!bsp || bsp->Changed) { if (!CheckMemory(g, initid, args, args->arg_count, true)) { - char *p; PJSON top; PJAR arp; - PJVAL jvp = MakeValue(g, args, 0, &top); + PJVAL jvp = MakeTypedValue(g, args, 0, TYPE_JAR, &top); PGLOBAL gb = GetMemPtr(g, args, 0); - if ((p = jvp->GetString(g))) { - if (!(top = ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - return NULL; - } // endif jsp - - jvp->SetValue(top); - } // endif p - if (jvp->GetValType() != TYPE_JAR) { - arp = new(gb)JARRAY; - arp->AddValue(gb, jvp); + if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) { + arp->AddValue(gb, jvp); + top = arp; + } // endif arp + } else arp = jvp->GetArray(); @@ -4458,9 +4733,9 @@ my_bool jbin_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) if (args->arg_count < 2) { strcpy(message, "This function must have at least 2 arguments"); return true; - } else if (!IsJson(args, 0)) { - strcpy(message, "First argument must be a json item"); - return true; + //} else if (!IsJson(args, 0)) { + // strcpy(message, "First argument must be a json item"); + // return true; } else CalcLen(args, false, reslen, memlen, true); @@ -4488,20 +4763,32 @@ char *jbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, PJVAL jvp; PJAR arp; - jvp = MakeValue(g, args, 0, &top); -// jsp = jvp->GetJson(); + jvp = MakeTypedValue(g, args, 0, TYPE_JSON, &top); + // jsp = jvp->GetJson(); x = GetIntArgPtr(g, args, n); if (CheckPath(g, args, top, jvp, n)) PUSH_WARNING(g->Message); - else if (jvp && jvp->GetValType() == TYPE_JAR) { + else if (jvp) { PGLOBAL gb = GetMemPtr(g, args, 0); - arp = jvp->GetArray(); + if (jvp->GetValType() != TYPE_JAR) { + if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) { + arp->AddValue(gb, (PJVAL)JvalNew(gb, TYPE_JVAL, jvp)); + jvp->SetValue(arp); + + if (!top) + top = arp; + + } // endif arp + + } else + arp = jvp->GetArray(); + arp->AddValue(gb, MakeValue(gb, args, 1), x); arp->InitArray(gb); } else { - PUSH_WARNING("First argument is not an array"); + PUSH_WARNING("First argument target is not an array"); // if (g->Mrr) *error = 1; (only if no path) } // endif jvp @@ -4539,9 +4826,6 @@ my_bool jbin_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message) if (args->arg_count < 2) { strcpy(message, "This function must have at least 2 arguments"); return true; - } else if (!IsJson(args, 0)) { - strcpy(message, "First argument must be a json item"); - return true; } else CalcLen(args, false, reslen, memlen, true); @@ -4565,7 +4849,7 @@ char *jbin_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, int *x; uint n = 1; PJAR arp; - PJVAL jvp = MakeValue(g, args, 0, &top); + PJVAL jvp = MakeTypedValue(g, args, 0, TYPE_JSON, &top); if (CheckPath(g, args, top, jvp, 1)) PUSH_WARNING(g->Message); @@ -4578,8 +4862,8 @@ char *jbin_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, PUSH_WARNING("Missing or null array index"); } else { - PUSH_WARNING("First argument is not an array"); - if (g->Mrr) *error = 1; + PUSH_WARNING("First argument target is not an array"); +// if (g->Mrr) *error = 1; } // endif jvp } // endif CheckMemory @@ -4625,13 +4909,18 @@ char *jbin_object(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!bsp || bsp->Changed) { if (!CheckMemory(g, initid, args, args->arg_count, true)) { - PJOB objp = new(g)JOBJECT; + PJOB objp; - for (uint i = 0; i < args->arg_count; i++) - objp->SetValue(g, MakeValue(g, args, i), MakeKey(g, args, i)); + if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i++) + objp->SetValue(g, MakeValue(g, args, i), MakeKey(g, args, i)); - if ((bsp = JbinAlloc(g, args, initid->max_length, objp))) - strcat(bsp->Msg, " object"); + + if ((bsp = JbinAlloc(g, args, initid->max_length, objp))) + strcat(bsp->Msg, " object"); + + } else + bsp = NULL; } else if ((bsp = JbinAlloc(g, args, initid->max_length, NULL))) @@ -4676,14 +4965,18 @@ char *jbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!bsp || bsp->Changed) { if (!CheckMemory(g, initid, args, args->arg_count, false, true)) { PJVAL jvp; - PJOB objp = new(g)JOBJECT; + PJOB objp; - for (uint i = 0; i < args->arg_count; i++) - if (!(jvp = MakeValue(g, args, i))->IsNull()) - objp->SetValue(g, jvp, MakeKey(g, args, i)); + if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i++) + if (!(jvp = MakeValue(g, args, i))->IsNull()) + objp->SetValue(g, jvp, MakeKey(g, args, i)); - if ((bsp = JbinAlloc(g, args, initid->max_length, objp))) - strcat(bsp->Msg, " object"); + if ((bsp = JbinAlloc(g, args, initid->max_length, objp))) + strcat(bsp->Msg, " object"); + + } else + bsp = NULL; } else if ((bsp = JbinAlloc(g, args, initid->max_length, NULL))) @@ -4732,13 +5025,17 @@ char *jbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!bsp || bsp->Changed) { if (!CheckMemory(g, initid, args, args->arg_count, false, true)) { - PJOB objp = new(g)JOBJECT; + PJOB objp; - for (uint i = 0; i < args->arg_count; i += 2) - objp->SetValue(g, MakeValue(g, args, i+1), MakePSZ(g, args, i)); + if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i += 2) + objp->SetValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i)); - if ((bsp = JbinAlloc(g, args, initid->max_length, objp))) - strcat(bsp->Msg, " object"); + if ((bsp = JbinAlloc(g, args, initid->max_length, objp))) + strcat(bsp->Msg, " object"); + + } else + bsp = NULL; } else if ((bsp = JbinAlloc(g, args, initid->max_length, NULL))) @@ -4989,7 +5286,7 @@ my_bool jbin_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) char *jbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { - char *p, *path; + char *path; PJSON jsp; PJSNX jsx; PJVAL jvp; @@ -5006,17 +5303,10 @@ char *jbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, if (CheckMemory(g, initid, args, 1, true, true)) { PUSH_WARNING("CheckMemory error"); goto fin; - } else - jvp = MakeValue(g, args, 0); + } // endif CheckMemory - if ((p = jvp->GetString(g))) { - if (!(jsp = ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - goto fin; - } // endif jsp - - } else - jsp = jvp->GetJson(); + jvp = MakeTypedValue(g, args, 0, TYPE_JSON); + jsp = jvp->GetJson(); if (g->Mrr) { // First argument is a constant g->Xchk = jsp; @@ -5027,16 +5317,16 @@ char *jbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, jsp = (PJSON)g->Xchk; path = MakePSZ(g, args, 1); - jsx = new(g) JSNX(g, jsp, TYPE_STRING, initid->max_length); + jsx = JsnxNew(g, jsp, TYPE_STRING, initid->max_length); - if (jsx->SetJpath(g, path, false)) { + if (!jsx || jsx->SetJpath(g, path, false)) { PUSH_WARNING(g->Message); goto fin; } // endif SetJpath // Get the json tree if ((jvp = jsx->GetRowValue(g, jsp, 0, false))) { - jsp = (jvp->GetJsp()) ? jvp->GetJsp() : new(g) JVALUE(g, jvp->GetValue()); + jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_VAL, jvp->GetValue()); if ((bsp = JbinAlloc(g, args, initid->max_length, jsp))) strcat(bsp->Msg, " item"); diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h index cd3b9768f7a..23e8c0e1aed 100644 --- a/storage/connect/jsonudf.h +++ b/storage/connect/jsonudf.h @@ -89,6 +89,10 @@ extern "C" { DllExport char *json_object_list(UDF_EXEC_ARGS); DllExport void json_object_list_deinit(UDF_INIT*); + DllExport my_bool json_object_values_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *json_object_values(UDF_EXEC_ARGS); + DllExport void json_object_values_deinit(UDF_INIT*); + DllExport my_bool jsonset_grp_size_init(UDF_INIT*, UDF_ARGS*, char*); DllExport long long jsonset_grp_size(UDF_INIT*, UDF_ARGS*, char*, char*); diff --git a/storage/connect/macutil.cpp b/storage/connect/macutil.cpp index b9600bdac2e..f95f3adcc6e 100644 --- a/storage/connect/macutil.cpp +++ b/storage/connect/macutil.cpp @@ -230,13 +230,13 @@ bool MACINFO::GetOneInfo(PGLOBAL g, int flag, void *v, int lv) case 11: // Description if ((p = strstr(Curp->Description, " - Packet Scheduler Miniport"))) { strncpy(buf, Curp->Description, p - Curp->Description); - i = p - Curp->Description; + i = (int)(p - Curp->Description); strncpy(buf, Curp->Description, i); buf[i] = 0; p = buf; } else if ((p = strstr(Curp->Description, " - Miniport d'ordonnancement de paquets"))) { - i = p - Curp->Description; + i = (int)(p - Curp->Description); strncpy(buf, Curp->Description, i); buf[i] = 0; p = buf; diff --git a/storage/connect/myconn.cpp b/storage/connect/myconn.cpp index 9da9c268c3d..253c42bb002 100644 --- a/storage/connect/myconn.cpp +++ b/storage/connect/myconn.cpp @@ -248,7 +248,7 @@ PQRYRES MyColumns(PGLOBAL g, THD *thd, const char *host, const char *db, while (true) { p2 = strchr(p1, '\''); - len = MY_MAX(len, p2 - p1); + len = MY_MAX(len, (int)(p2 - p1)); if (*++p2 != ',') break; p1 = p2 + 2; } // endwhile @@ -933,8 +933,9 @@ PQRYRES MYSQLC::GetResult(PGLOBAL g, bool pdb) crp->Prec = (crp->Type == TYPE_DOUBLE || crp->Type == TYPE_DECIM) ? fld->decimals : 0; - crp->Length = MY_MAX(fld->length, fld->max_length); - crp->Clen = GetTypeSize(crp->Type, crp->Length); + CHARSET_INFO *cs= get_charset(fld->charsetnr, MYF(0)); + crp->Clen = GetTypeSize(crp->Type, fld->length); + crp->Length = fld->length / (cs ? cs->mbmaxlen : 1); uns = (fld->flags & (UNSIGNED_FLAG | ZEROFILL_FLAG)) ? true : false; if (!(crp->Kdata = AllocValBlock(g, NULL, crp->Type, m_Rows, diff --git a/storage/connect/mysql-test/connect/r/json_udf.result b/storage/connect/mysql-test/connect/r/json_udf.result index 7d81ca5e73d..09544bb1ecb 100644 --- a/storage/connect/mysql-test/connect/r/json_udf.result +++ b/storage/connect/mysql-test/connect/r/json_udf.result @@ -50,17 +50,19 @@ SELECT Json_Array_Add(Json_Make_Array(56, 3.1416, 'foo', NULL), 'One more') Arra Array [56,3.141600,"foo",null,"One more"] SELECT Json_Array_Add(JsonValue('one value'), 'One more'); -ERROR HY000: Can't initialize function 'json_array_add'; First argument must be a json item +Json_Array_Add(JsonValue('one value'), 'One more') +["\"one value\"","One more"] SELECT Json_Array_Add('one value', 'One more'); -ERROR HY000: Can't initialize function 'json_array_add'; First argument must be a json item +Json_Array_Add('one value', 'One more') +["one value","One more"] SELECT Json_Array_Add('one value' json_, 'One more'); Json_Array_Add('one value' json_, 'One more') one value Warnings: Warning 1105 Error 2 opening one value -Warning 1105 First argument target is not an array SELECT Json_Array_Add(5 json_, 'One more'); -ERROR HY000: Can't initialize function 'json_array_add'; First argument must be a json item +Json_Array_Add(5 json_, 'One more') +[5,"One more"] SELECT Json_Array_Add('[5,3,8,7,9]' json_, 4, 0); Json_Array_Add('[5,3,8,7,9]' json_, 4, 0) [4,5,3,8,7,9] diff --git a/storage/connect/mysql-test/connect/r/json_udf_bin.result b/storage/connect/mysql-test/connect/r/json_udf_bin.result index 0c009d612fe..d0819619c33 100644 --- a/storage/connect/mysql-test/connect/r/json_udf_bin.result +++ b/storage/connect/mysql-test/connect/r/json_udf_bin.result @@ -272,10 +272,9 @@ Json_Serialize(Jbin_Array('a','b','c')) ["a","b","c"] SELECT Json_Serialize(Jbin_Array_Add(Jbin_File('not_exist.json'), 'd')); Json_Serialize(Jbin_Array_Add(Jbin_File('not_exist.json'), 'd')) -Null json tree +[null,"d"] Warnings: Warning 1105 Open(map) error 2 on not_exist.json -Warning 1105 First argument is not an array # This does not modify the file SELECT Json_Serialize(Jbin_Array_Add(Jbin_File('bt1.json'), 'd')); Json_Serialize(Jbin_Array_Add(Jbin_File('bt1.json'), 'd')) diff --git a/storage/connect/mysql-test/connect/r/vcol.result b/storage/connect/mysql-test/connect/r/vcol.result new file mode 100644 index 00000000000..e0fd37203e4 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/vcol.result @@ -0,0 +1,29 @@ +create table t1 ( +#linenum int(6) not null default 0 special=rowid, +name char(12) not null, +city char(11) not null, +birth date not null date_format='DD/MM/YYYY', +hired date not null date_format='DD/MM/YYYY' flag=36, +agehired int(3) as (floor(datediff(hired,birth)/365.25)) +) +engine=CONNECT table_type=FIX file_name='boys.txt' mapped=YES lrecl=47 ending=1; +select * from t1; +name city birth hired agehired +John Boston 1986-01-25 2010-06-02 24 +Henry Boston 1987-06-07 2008-04-01 20 +George San Jose 1981-08-10 2010-06-02 28 +Sam Chicago 1979-11-22 2007-10-10 27 +James Dallas 1992-05-13 2009-12-14 17 +Bill Boston 1986-09-11 2008-02-10 21 +drop table t1; +create table t1 ( +#linenum int(6) not null default 0 special=rowid, +name char(12) not null, +city char(11) not null, +birth date not null date_format='DD/MM/YYYY', +hired date not null date_format='DD/MM/YYYY' flag=36, +agehired int(3) as (floor(datediff(hired,birth)/365.25)), +index (agehired) +) +engine=CONNECT table_type=FIX file_name='boys.txt' mapped=YES lrecl=47 ending=1; +ERROR 42000: Table handler doesn't support NULL in given index. Please change column 'agehired' to be NOT NULL or use another handler diff --git a/storage/connect/mysql-test/connect/t/json_udf.test b/storage/connect/mysql-test/connect/t/json_udf.test index 35dbbfed706..d45131f32ba 100644 --- a/storage/connect/mysql-test/connect/t/json_udf.test +++ b/storage/connect/mysql-test/connect/t/json_udf.test @@ -29,12 +29,12 @@ SELECT Json_Make_Array(Json_Make_Array(56, 3.1416, 'foo'), TRUE); --error ER_CANT_INITIALIZE_UDF SELECT Json_Array_Add(Json_Make_Array(56, 3.1416, 'foo', NULL)) Array; SELECT Json_Array_Add(Json_Make_Array(56, 3.1416, 'foo', NULL), 'One more') Array; ---error ER_CANT_INITIALIZE_UDF +#--error ER_CANT_INITIALIZE_UDF SELECT Json_Array_Add(JsonValue('one value'), 'One more'); ---error ER_CANT_INITIALIZE_UDF +#--error ER_CANT_INITIALIZE_UDF SELECT Json_Array_Add('one value', 'One more'); SELECT Json_Array_Add('one value' json_, 'One more'); ---error ER_CANT_INITIALIZE_UDF +#--error ER_CANT_INITIALIZE_UDF SELECT Json_Array_Add(5 json_, 'One more'); SELECT Json_Array_Add('[5,3,8,7,9]' json_, 4, 0); SELECT Json_Array_Add('[5,3,8,7,9]' json_, 4, 2) Array; diff --git a/storage/connect/mysql-test/connect/t/vcol.test b/storage/connect/mysql-test/connect/t/vcol.test new file mode 100644 index 00000000000..cdf37175f41 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/vcol.test @@ -0,0 +1,31 @@ +let datadir= `select @@datadir`; +--copy_file $MTR_SUITE_DIR/std_data/boys.txt $datadir/test/boys.txt + +create table t1 ( + #linenum int(6) not null default 0 special=rowid, + name char(12) not null, + city char(11) not null, + birth date not null date_format='DD/MM/YYYY', + hired date not null date_format='DD/MM/YYYY' flag=36, + agehired int(3) as (floor(datediff(hired,birth)/365.25)) + ) +engine=CONNECT table_type=FIX file_name='boys.txt' mapped=YES lrecl=47 ending=1; +select * from t1; +drop table t1; + +--error ER_NULL_COLUMN_IN_INDEX +create table t1 ( + #linenum int(6) not null default 0 special=rowid, + name char(12) not null, + city char(11) not null, + birth date not null date_format='DD/MM/YYYY', + hired date not null date_format='DD/MM/YYYY' flag=36, + agehired int(3) as (floor(datediff(hired,birth)/365.25)), + index (agehired) + ) +engine=CONNECT table_type=FIX file_name='boys.txt' mapped=YES lrecl=47 ending=1; + +# +# Clean up +# +--remove_file $datadir/test/boys.txt diff --git a/storage/connect/odbconn.cpp b/storage/connect/odbconn.cpp index cfd7546524e..f7b1a43a95d 100644 --- a/storage/connect/odbconn.cpp +++ b/storage/connect/odbconn.cpp @@ -2261,10 +2261,10 @@ public: return (SQLCHAR *) (m_part[i].length ? m_part[i].str : NULL); } // end of ptr - size_t length(uint i) + SQLSMALLINT length(uint i) { DBUG_ASSERT(i < max_parts); - return m_part[i].length; + return (SQLSMALLINT)m_part[i].length; } // end of length }; // end of class SQLQualifiedName @@ -2438,7 +2438,7 @@ int ODBConn::GetCatInfo(CATPARM *cap) else if (vlen[n] == SQL_NULL_DATA) pval[n]->SetNull(true); else if (crp->Type == TYPE_STRING/* && vlen[n] != SQL_NULL_DATA*/) - pval[n]->SetValue_char(pbuf[n], vlen[n]); + pval[n]->SetValue_char(pbuf[n], (int)vlen[n]); else pval[n]->SetNull(false); diff --git a/storage/connect/plgdbutl.cpp b/storage/connect/plgdbutl.cpp index f248e72be12..e296553d8e2 100644 --- a/storage/connect/plgdbutl.cpp +++ b/storage/connect/plgdbutl.cpp @@ -519,7 +519,7 @@ bool EvalLikePattern(LPCSTR sp, LPCSTR tp) { LPSTR p; char c; - int n; + ssize_t n; bool b, t = false; if (trace(2)) diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp index 0e6caa953b1..887527e38ab 100644 --- a/storage/connect/plugutil.cpp +++ b/storage/connect/plugutil.cpp @@ -162,7 +162,7 @@ PGLOBAL PlugInit(LPCSTR Language, uint worksize) /*******************************************************************/ if (worksize && AllocSarea(g, worksize)) { char errmsg[MAX_STR]; - sprintf(errmsg, MSG(WORK_AREA), g->Message); + snprintf(errmsg, sizeof(errmsg) - 1, MSG(WORK_AREA), g->Message); strcpy(g->Message, errmsg); } // endif Sarea @@ -559,7 +559,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) if (trace(1)) htrc("PlugSubAlloc: %s\n", g->Message); - throw 1234; + throw 1234; } /* endif size OS32 code */ /*********************************************************************/ diff --git a/storage/connect/preparse.h b/storage/connect/preparse.h index f16624548fb..3db7a2af1cd 100644 --- a/storage/connect/preparse.h +++ b/storage/connect/preparse.h @@ -8,7 +8,7 @@ /***********************************************************************/ typedef struct _datpar { const char *Format; // Points to format to decode - char *Curp; // Points to current parsing position + const char *Curp; // Points to current parsing position char *InFmt; // Start of input format char *OutFmt; // Start of output format int Index[8]; // Indexes of date values diff --git a/storage/connect/rcmsg.c b/storage/connect/rcmsg.c index 75759e03314..895f8f5862b 100644 --- a/storage/connect/rcmsg.c +++ b/storage/connect/rcmsg.c @@ -27,9 +27,9 @@ char *msglang(void); -char *GetMsgid(int id) +const char *GetMsgid(int id) { - char *p = NULL; + const char *p = NULL; // This conditional until a real fix is found for MDEV-7304 #if defined(FRENCH) @@ -55,7 +55,8 @@ char *GetMsgid(int id) int GetRcString(int id, char *buf, int bufsize) { - char *p = NULL, msg[32]; + const char *p = NULL; + char msg[32]; if (!(p = GetMsgid(id))) { sprintf(msg, "ID=%d unknown", id); diff --git a/storage/connect/rcmsg.h b/storage/connect/rcmsg.h index b22e77f5175..499ca3b2dd4 100644 --- a/storage/connect/rcmsg.h +++ b/storage/connect/rcmsg.h @@ -5,7 +5,7 @@ extern "C" { #endif -char *GetMsgid(int id); +const char *GetMsgid(int id); int GetRcString(int id, char *buf, int bufsize); #ifdef __cplusplus diff --git a/storage/connect/reldef.cpp b/storage/connect/reldef.cpp index 072bd25c5a7..e4f169575f8 100644 --- a/storage/connect/reldef.cpp +++ b/storage/connect/reldef.cpp @@ -547,14 +547,12 @@ PTABDEF OEMDEF::GetXdef(PGLOBAL g) } // endif dladdr #endif // 0 - // Is the library already loaded? - if (!Hdll && !(Hdll = dlopen(soname, RTLD_NOLOAD))) - // Load the desired shared library - if (!(Hdll = dlopen(soname, RTLD_LAZY))) { - error = dlerror(); - sprintf(g->Message, MSG(SHARED_LIB_ERR), soname, SVP(error)); - return NULL; - } // endif Hdll + // Load the desired shared library + if (!Hdll && !(Hdll = dlopen(soname, RTLD_LAZY))) { + error = dlerror(); + sprintf(g->Message, MSG(SHARED_LIB_ERR), soname, SVP(error)); + return NULL; + } // endif Hdll // The exported name is always in uppercase for (int i = 0; ; i++) { diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index 6ead8e40cd9..29cbbb35765 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -1647,8 +1647,8 @@ int TDBDOS::TestBlock(PGLOBAL g) /***********************************************************************/ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) { - int k, n, rc = RC_OK; - bool fixed, doit, sep, b = (pxdf != NULL); + int k, n, rc = RC_OK; + bool fixed, doit, sep, b = (pxdf != NULL); PCOL *keycols, colp; PIXDEF xdp, sxp = NULL; PKPDEF kdp; @@ -1694,8 +1694,8 @@ int TDBDOS::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add) try { // Allocate all columns that will be used by indexes. - // This must be done before opening the table so specific - // column initialization can be done (in particular by TDBVCT) + // This must be done before opening the table so specific + // column initialization can be done (in particular by TDBVCT) for (n = 0, xdp = pxdf; xdp; xdp = xdp->GetNext()) for (kdp = xdp->GetToKeyParts(); kdp; kdp = kdp->GetNext()) { if (!(colp = ColDB(g, kdp->GetName(), 0))) { @@ -2881,4 +2881,3 @@ bool DOSCOL::AddDistinctValue(PGLOBAL g) } // end of AddDistinctValue /* ------------------------------------------------------------------- */ - diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index 66339a49de2..63fa2a63668 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -934,7 +934,7 @@ int TDBCSV::ReadBuffer(PGLOBAL g) if (p) { //len = p++ - p2; - len = p - p2 - 1;; + len = (int)(p - p2 - 1); // if (Sep != ' ') // for (; *p == ' '; p++) ; // Skip blanks @@ -978,7 +978,7 @@ int TDBCSV::ReadBuffer(PGLOBAL g) return RC_NF; } else if ((p = strchr(p2, Sep))) - len = p - p2; + len = (int)(p - p2); else if (i == Fields - 1) len = strlen(p2); else if (Accept && Maxerr == 0) { @@ -996,7 +996,7 @@ int TDBCSV::ReadBuffer(PGLOBAL g) } else len = 0; - Offset[i] = p2 - To_Line; + Offset[i] = (int)(p2 - To_Line); if (Mode != MODE_UPDATE) Fldlen[i] = len; diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 3acc2389975..b66682e0190 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -94,7 +94,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) return NULL; } // endif Multiple - pjdc = new(g) JSONDISC(g, (int*)length); + pjdc = new(g) JSONDISC(g, length); if (!(n = pjdc->GetColumns(g, db, dsn, topt))) return NULL; @@ -157,7 +157,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) /***********************************************************************/ /* Class used to get the columns of a JSON table. */ /***********************************************************************/ -JSONDISC::JSONDISC(PGLOBAL g, int *lg) +JSONDISC::JSONDISC(PGLOBAL g, uint *lg) { length = lg; jcp = fjcp = pjcp = NULL; diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index fb0ee786f74..0341c0f8aa0 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -47,7 +47,7 @@ typedef struct _jncol { class JSONDISC : public BLOCK { public: // Constructor - JSONDISC(PGLOBAL g, int *lg); + JSONDISC(PGLOBAL g, uint *lg); // Functions int GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt); @@ -66,7 +66,7 @@ public: PJOB row; PCSZ sep; char colname[65], fmt[129], buf[16]; - int *length; + uint *length; int i, n, bf, ncol, lvl; bool all; }; // end of JSONDISC diff --git a/storage/connect/tabmac.cpp b/storage/connect/tabmac.cpp index a28b5d7108c..8260ab65391 100644 --- a/storage/connect/tabmac.cpp +++ b/storage/connect/tabmac.cpp @@ -367,13 +367,13 @@ void MACCOL::ReadColumn(PGLOBAL g) case 11: // Description if ((p = strstr(adp->Description, " - Packet Scheduler Miniport"))) { strncpy(buf, adp->Description, p - adp->Description); - i = p - adp->Description; + i = (int)(p - adp->Description); strncpy(buf, adp->Description, i); buf[i] = 0; p = buf; } else if ((p = strstr(adp->Description, " - Miniport d'ordonnancement de paquets"))) { - i = p - adp->Description; + i = (int)(p - adp->Description); strncpy(buf, adp->Description, i); buf[i] = 0; p = buf; diff --git a/storage/connect/tabmul.cpp b/storage/connect/tabmul.cpp index 1afd21db452..649fc6706c6 100644 --- a/storage/connect/tabmul.cpp +++ b/storage/connect/tabmul.cpp @@ -203,12 +203,12 @@ bool TDBMUL::InitFileNames(PGLOBAL g) // Data files can be imported from Windows (having CRLF) if (*p == '\n' || *p == '\r') { // is this enough for Unix ??? - *p--; // Eliminate ending CR or LF character + p--; // Eliminate ending CR or LF character if (p >= filename) // is this enough for Unix ??? if (*p == '\n' || *p == '\r') - *p--; // Eliminate ending CR or LF character + p--; // Eliminate ending CR or LF character } // endif p diff --git a/storage/connect/tabmysql.cpp b/storage/connect/tabmysql.cpp index a80abcdd19f..605b3822430 100644 --- a/storage/connect/tabmysql.cpp +++ b/storage/connect/tabmysql.cpp @@ -124,8 +124,8 @@ bool MYSQLDEF::GetServerInfo(PGLOBAL g, const char *server_name) DBUG_RETURN(true); } // endif server - DBUG_PRINT("info", ("get_server_by_name returned server at %lx", - (size_t) server)); + DBUG_PRINT("info", ("get_server_by_name returned server at %p", + server)); // TODO: We need to examine which of these can really be NULL Hostname = PlugDup(g, server->host); @@ -681,7 +681,7 @@ bool TDBMYSQL::MakeCommand(PGLOBAL g) strlwr(strcpy(name, Name)); // Not a keyword if ((p = strstr(qrystr, name))) { - Query->Set(Qrystr, p - qrystr); + Query->Set(Qrystr, (uint)(p - qrystr)); if (qtd && *(p-1) == ' ') { Query->Append('`'); diff --git a/storage/connect/tabmysql.h b/storage/connect/tabmysql.h index 39fba87bcc9..4b61c7eb762 100644 --- a/storage/connect/tabmysql.h +++ b/storage/connect/tabmysql.h @@ -135,7 +135,7 @@ class TDBMYSQL : public TDBEXT { int m_Rc; // Return code from command //int AftRows; // The number of affected rows int N; // The current table index - int Port; // MySQL port number (0 = default) + unsigned Port; // MySQL port number (0 = default) //int Nparm; // The number of statement parameters //int Quoted; // The identifier quoting level }; // end of class TDBMYSQL diff --git a/storage/connect/tabodbc.cpp b/storage/connect/tabodbc.cpp index 1f89fd7af9c..f7bc3934fbd 100644 --- a/storage/connect/tabodbc.cpp +++ b/storage/connect/tabodbc.cpp @@ -289,7 +289,7 @@ void TDBODBC::SetFile(PGLOBAL g, PCSZ fn) sprintf(Connect, MulConn, fn); } // endif MultConn - DBQ = (PSZ)fn; + DBQ = PlugDup(g, fn); } // end of SetFile /***********************************************************************/ diff --git a/storage/connect/tabtbl.cpp b/storage/connect/tabtbl.cpp index 53af28354e7..e194568ccf8 100644 --- a/storage/connect/tabtbl.cpp +++ b/storage/connect/tabtbl.cpp @@ -656,7 +656,7 @@ bool TDBTBM::IsLocal(PTABLE tbp) return ((!stricmp(tdbp->Host, "localhost") || !strcmp(tdbp->Host, "127.0.0.1")) && - tdbp->Port == (int)GetDefaultPort()); + (int) tdbp->Port == (int)GetDefaultPort()); } // end of IsLocal /***********************************************************************/ diff --git a/storage/connect/tabxml.cpp b/storage/connect/tabxml.cpp index 759bb370b43..c96e0844497 100644 --- a/storage/connect/tabxml.cpp +++ b/storage/connect/tabxml.cpp @@ -1319,7 +1319,7 @@ void TDBXML::CloseDB(PGLOBAL g) Docp->CloseDoc(g, To_Xb); // This causes a crash in Diagnostics_area::set_error_status -// throw (int)TYPE_AM_XML; +// throw (int)TYPE_AM_XML; } // endif DumpDoc } // endif Changed diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index 90c01f72b35..e159efaa989 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -1374,7 +1374,7 @@ bool TYPVAL::SetValue_char(const char *cp, int n) } else if (cp != Strp) { const char *p = cp + n - 1; - for (p; p >= cp; p--, n--) + for (; p >= cp; p--, n--) if (*p && *p != ' ') break; @@ -1747,7 +1747,7 @@ DECVAL::DECVAL(PSZ s) : TYPVAL(s) if (s) { char *p = strchr(Strp, '.'); - Prec = (p) ? Len - (p - Strp) : 0; + Prec = (p) ? (int)(Len - (p - Strp)) : 0; } // endif s Type = TYPE_DECIM; @@ -2656,7 +2656,7 @@ bool DTVAL::SetValue_char(const char *p, int n) // Trim trailing blanks for (p2 = p + n -1; p < p2 && *p2 == ' '; p2--); - if ((rc = (n = p2 - p + 1) > Len)) + if ((rc = (n = (int)(p2 - p + 1)) > Len)) n = Len; memcpy(Sdate, p, n); diff --git a/storage/connect/xobject.cpp b/storage/connect/xobject.cpp index 02d3e974dcc..c595ce5d6c4 100644 --- a/storage/connect/xobject.cpp +++ b/storage/connect/xobject.cpp @@ -204,7 +204,7 @@ STRING::STRING(PGLOBAL g, uint n, PCSZ str) *Strp = 0; Next = GetNext(); - Size = Next - Strp; + Size = (int)(Next - Strp); Trc = false; } else { // This should normally never happen @@ -239,7 +239,7 @@ char *STRING::Realloc(uint len) p = Strp; Next = GetNext(); - Size = Next - p; + Size = (int)(Next - p); return p; } // end of Realloc @@ -439,4 +439,3 @@ bool STRING::Resize(uint newsize) return newsize > Size; } // end of Resize - From 7bd95307f2aa901c141d4d6c4c568a2ed85ba01b Mon Sep 17 00:00:00 2001 From: Teemu Ollakka Date: Tue, 30 Jan 2018 05:37:22 -0800 Subject: [PATCH 009/139] Bump wsrep patch version to 25.23 --- cmake/wsrep.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/wsrep.cmake b/cmake/wsrep.cmake index cc6627d1eea..2ba32b469e0 100644 --- a/cmake/wsrep.cmake +++ b/cmake/wsrep.cmake @@ -18,7 +18,7 @@ # so WSREP_VERSION is produced regardless # Set the patch version -SET(WSREP_PATCH_VERSION "21") +SET(WSREP_PATCH_VERSION "23") # MariaDB addition: Revision number of the last revision merged from # codership branch visible in @@visible_comment. From 26e4a48bda8a09ac4ddef64e12841fbee29f4d7d Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Fri, 2 Mar 2018 10:50:38 +1100 Subject: [PATCH 010/139] MDEV-8743: ib_logfile0 Use O_CLOEXEC so galera SST scripts don't get fd --- storage/innobase/os/os0file.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index e946bcf5f30..df096dcc6fd 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -3578,7 +3578,7 @@ os_aio_native_aio_supported(void) strcpy(name + dirnamelen, "ib_logfile0"); - fd = ::open(name, O_RDONLY); + fd = ::open(name, O_RDONLY | O_CLOEXEC); if (fd == -1) { From 0f0776b2adf10f559bc5c2bbea95a4c5832a47f8 Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Thu, 8 Mar 2018 10:55:52 +0100 Subject: [PATCH 011/139] MDEV-13549 Fix and re-enable test galera.MW-286 This test failed to work properly because the fixes it came with were not merged from upstream. The test would fail with a spurious ER_LOCK_DEADLOCK error for a conflict that happened earlier in the test execution, while wsrep is disabled. The original fix was to set THD::wsrep_conflict_state only if wsrep is enabled (see wsrep_thd_set_conflict_state() in sql/wsrep_mysqld.cc) --- mysql-test/suite/galera/disabled.def | 1 - mysql-test/suite/galera/t/MW-286.test | 1 - sql/wsrep_mysqld.cc | 2 +- 3 files changed, 1 insertion(+), 3 deletions(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index d82d5dd2023..d61da97355d 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -48,7 +48,6 @@ galera_toi_ddl_nonconflicting : MDEV-13549 Galera test failures galera_parallel_simple : MDEV-13549 Galera test failures galera_admin : MDEV-13549 Galera test failures galera_var_max_ws_rows : MDEV-13549 Galera test failures 10.1 -MW-286 : MDEV-13549 Galera test failures 10.1 galera_as_master: MDEV-13549 Galera test failures 10.1 galera_pc_ignore_sb : MDEV-13549 Galera test failures 10.1 galera_lock_table : MDEV-13549 Galera test failures 10.1 diff --git a/mysql-test/suite/galera/t/MW-286.test b/mysql-test/suite/galera/t/MW-286.test index 08deb317fbe..1b2e322f078 100644 --- a/mysql-test/suite/galera/t/MW-286.test +++ b/mysql-test/suite/galera/t/MW-286.test @@ -25,7 +25,6 @@ SET wsrep_on = FALSE; --error ER_QUERY_INTERRUPTED ALTER TABLE t1 ADD PRIMARY KEY (f1); -SET SESSION wsrep_sync_wait = 0; SET wsrep_on = TRUE; SET GLOBAL wsrep_desync = FALSE; diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 285bb520b87..9d4037082bc 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -2336,7 +2336,7 @@ extern "C" void wsrep_thd_set_query_state( void wsrep_thd_set_conflict_state(THD *thd, enum wsrep_conflict_state state) { - thd->wsrep_conflict_state= state; + if (WSREP(thd)) thd->wsrep_conflict_state= state; } From a3ba3aab5ad62b03d58ab64b0e10679ae9f583c8 Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Thu, 8 Mar 2018 11:11:03 +0100 Subject: [PATCH 012/139] MDEV-13549 Wrong usage of mutex 'LOCK_wsrep_thd' and 'LOCK_thd_kill' test galera.MW-286 Test MW-286 occasionally failed with error the following message: ``` safe_mutex: Found wrong usage of mutex 'LOCK_wsrep_thd' and 'LOCK_thd_kill' Mutex currently locked (in reverse order): LOCK_thd_kill mariadb-server/sql/sql_class.h line 3535 LOCK_wsrep_thd mariadb-server/sql/wsrep_thd.cc line 88 ``` The fix consists in calling thd->reset_killed() in wsrep_mysql_parse() after LOCK_wsrep_thd is unlocked. Which avoids the taking locks LOCK_wsrep_thd and LOCK_thd_kill in reverse order. --- sql/sql_parse.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 4dd8d9e124e..97d743d9a42 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -7193,7 +7193,6 @@ static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, thd->wsrep_conflict_state == CERT_FAILURE) { thd->reset_for_next_command(); - thd->reset_killed(); if (is_autocommit && thd->lex->sql_command != SQLCOM_SELECT && (thd->wsrep_retry_counter < thd->variables.wsrep_retry_autocommit)) @@ -7221,17 +7220,18 @@ static void wsrep_mysql_parse(THD *thd, char *rawbuf, uint length, thd->thread_id, is_autocommit, thd->wsrep_retry_counter, thd->variables.wsrep_retry_autocommit, thd->query()); my_error(ER_LOCK_DEADLOCK, MYF(0), "wsrep aborted transaction"); - thd->reset_killed(); thd->wsrep_conflict_state= NO_CONFLICT; if (thd->wsrep_conflict_state != REPLAYING) thd->wsrep_retry_counter= 0; // reset } + mysql_mutex_unlock(&thd->LOCK_wsrep_thd); + thd->reset_killed(); } else { set_if_smaller(thd->wsrep_retry_counter, 0); // reset; eventually ok + mysql_mutex_unlock(&thd->LOCK_wsrep_thd); } - mysql_mutex_unlock(&thd->LOCK_wsrep_thd); } /* If retry is requested clean up explain structure */ From 90247658e066c3ae5da1fefb988f5564e24abc90 Mon Sep 17 00:00:00 2001 From: Jacob Mathew Date: Fri, 9 Mar 2018 19:14:20 -0800 Subject: [PATCH 013/139] MDEV-14019: Spider + binlog_format = ROW => CRASH The crash occurs when inserting into, updating or deleting from Spider system tables. These operations do not go through the normal insert, update or delete logic, so binary logging of the row is not properly set up and leads to the crash. The fix for this problem uses the same strategy as is used for the servers system table that contains entries for the servers created with CREATE SERVER. Binary logging is now temporarily disabled on insert, update and delete operations on Spider system tables. Author: Jacob Mathew. Reviewer: Kentoku Shiba. --- storage/spider/spd_sys_table.cc | 167 +++++++++++++++++++------------- 1 file changed, 97 insertions(+), 70 deletions(-) diff --git a/storage/spider/spd_sys_table.cc b/storage/spider/spd_sys_table.cc index 09dbeff328e..61f4fc104ba 100644 --- a/storage/spider/spd_sys_table.cc +++ b/storage/spider/spd_sys_table.cc @@ -35,6 +35,86 @@ extern handlerton *spider_hton_ptr; +/** + Insert a Spider system table row. + + @param table The spider system table. + @param do_handle_error TRUE if an error message should be printed + before returning. + + @return Error code returned by the write. +*/ + +inline int spider_write_sys_table_row(TABLE *table, bool do_handle_error = TRUE) +{ + int error_num; + THD *thd = table->in_use; + + tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */ + error_num = table->file->ha_write_row(table->record[0]); + reenable_binlog(thd); + + if (error_num && do_handle_error) + table->file->print_error(error_num, MYF(0)); + + return error_num; +} + +/** + Update a Spider system table row. + + @param table The spider system table. + + @return Error code returned by the update. +*/ + +inline int spider_update_sys_table_row(TABLE *table) +{ + int error_num; + THD *thd = table->in_use; + + tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */ + error_num = table->file->ha_update_row(table->record[1], table->record[0]); + reenable_binlog(thd); + + if (error_num) + { + if (error_num == HA_ERR_RECORD_IS_THE_SAME) + error_num = 0; + else + table->file->print_error(error_num, MYF(0)); + } + + return error_num; +} + +/** + Delete a Spider system table row. + + @param table The spider system table. + @param record_number Location of the record: 0 or 1. + @param do_handle_error TRUE if an error message should be printed + before returning. + + @return Error code returned by the update. +*/ + +inline int spider_delete_sys_table_row(TABLE *table, int record_number = 0, + bool do_handle_error = TRUE) +{ + int error_num; + THD *thd = table->in_use; + + tmp_disable_binlog(thd); /* Do not replicate the low-level changes. */ + error_num = table->file->ha_delete_row(table->record[record_number]); + reenable_binlog(thd); + + if (error_num && do_handle_error) + table->file->print_error(error_num, MYF(0)); + + return error_num; +} + #if MYSQL_VERSION_ID < 50500 TABLE *spider_open_sys_table( THD *thd, @@ -983,11 +1063,8 @@ int spider_insert_xa( table->use_all_columns(); spider_store_xa_bqual_length(table, xid); spider_store_xa_status(table, status); - if ((error_num = table->file->ha_write_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_write_sys_table_row(table))) DBUG_RETURN(error_num); - } } else { my_message(ER_SPIDER_XA_EXISTS_NUM, ER_SPIDER_XA_EXISTS_STR, MYF(0)); DBUG_RETURN(ER_SPIDER_XA_EXISTS_NUM); @@ -1017,11 +1094,8 @@ int spider_insert_xa_member( } table->use_all_columns(); spider_store_xa_member_info(table, xid, conn); - if ((error_num = table->file->ha_write_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_write_sys_table_row(table))) DBUG_RETURN(error_num); - } } else { my_message(ER_SPIDER_XA_MEMBER_EXISTS_NUM, ER_SPIDER_XA_MEMBER_EXISTS_STR, MYF(0)); @@ -1051,11 +1125,8 @@ int spider_insert_tables( SPIDER_LINK_STATUS_NO_CHANGE ? share->alter_table.tmp_link_statuses[roop_count] : SPIDER_LINK_STATUS_OK); - if ((error_num = table->file->ha_write_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_write_sys_table_row(table))) DBUG_RETURN(error_num); - } } DBUG_RETURN(0); @@ -1077,11 +1148,8 @@ int spider_log_tables_link_failed( if (table->field[3] == table->timestamp_field) table->timestamp_field->set_time(); #endif - if ((error_num = table->file->ha_write_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_write_sys_table_row(table))) DBUG_RETURN(error_num); - } DBUG_RETURN(0); } @@ -1115,11 +1183,8 @@ int spider_log_xa_failed( if (table->field[20] == table->timestamp_field) table->timestamp_field->set_time(); #endif - if ((error_num = table->file->ha_write_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_write_sys_table_row(table))) DBUG_RETURN(error_num); - } DBUG_RETURN(0); } @@ -1148,14 +1213,8 @@ int spider_update_xa( store_record(table, record[1]); table->use_all_columns(); spider_store_xa_status(table, status); - if ( - (error_num = table->file->ha_update_row( - table->record[1], table->record[0])) && - error_num != HA_ERR_RECORD_IS_THE_SAME - ) { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_update_sys_table_row(table))) DBUG_RETURN(error_num); - } } DBUG_RETURN(0); @@ -1188,14 +1247,8 @@ int spider_update_tables_name( store_record(table, record[1]); table->use_all_columns(); spider_store_tables_name(table, to, strlen(to)); - if ( - (error_num = table->file->ha_update_row( - table->record[1], table->record[0])) && - error_num != HA_ERR_RECORD_IS_THE_SAME - ) { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_update_sys_table_row(table))) DBUG_RETURN(error_num); - } } roop_count++; } @@ -1239,11 +1292,8 @@ int spider_update_tables_priority( SPIDER_LINK_STATUS_NO_CHANGE ? alter_table->tmp_link_statuses[roop_count] : SPIDER_LINK_STATUS_OK); - if ((error_num = table->file->ha_write_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_write_sys_table_row(table))) DBUG_RETURN(error_num); - } roop_count++; } while (roop_count < (int) alter_table->all_link_count); DBUG_RETURN(0); @@ -1259,14 +1309,8 @@ int spider_update_tables_priority( spider_store_tables_connect_info(table, alter_table, roop_count); spider_store_tables_link_status(table, alter_table->tmp_link_statuses[roop_count]); - if ( - (error_num = table->file->ha_update_row( - table->record[1], table->record[0])) && - error_num != HA_ERR_RECORD_IS_THE_SAME - ) { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_update_sys_table_row(table))) DBUG_RETURN(error_num); - } } } while (TRUE) @@ -1284,11 +1328,8 @@ int spider_update_tables_priority( table->file->print_error(error_num, MYF(0)); DBUG_RETURN(error_num); } - if ((error_num = table->file->ha_delete_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_delete_sys_table_row(table))) DBUG_RETURN(error_num); - } } roop_count++; } @@ -1324,14 +1365,8 @@ int spider_update_tables_link_status( store_record(table, record[1]); table->use_all_columns(); spider_store_tables_link_status(table, link_status); - if ( - (error_num = table->file->ha_update_row( - table->record[1], table->record[0])) && - error_num != HA_ERR_RECORD_IS_THE_SAME - ) { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_update_sys_table_row(table))) DBUG_RETURN(error_num); - } } DBUG_RETURN(0); @@ -1358,11 +1393,8 @@ int spider_delete_xa( MYF(0)); DBUG_RETURN(ER_SPIDER_XA_NOT_EXISTS_NUM); } else { - if ((error_num = table->file->ha_delete_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_delete_sys_table_row(table))) DBUG_RETURN(error_num); - } } DBUG_RETURN(0); @@ -1389,7 +1421,7 @@ int spider_delete_xa_member( DBUG_RETURN(0); } else { do { - if ((error_num = table->file->ha_delete_row(table->record[0]))) + if ((error_num = spider_delete_sys_table_row(table, 0, FALSE))) { spider_sys_index_end(table); table->file->print_error(error_num, MYF(0)); @@ -1424,11 +1456,8 @@ int spider_delete_tables( if ((error_num = spider_check_sys_table(table, table_key))) break; else { - if ((error_num = table->file->ha_delete_row(table->record[0]))) - { - table->file->print_error(error_num, MYF(0)); + if ((error_num = spider_delete_sys_table_row(table))) DBUG_RETURN(error_num); - } } roop_count++; } @@ -2305,7 +2334,7 @@ int spider_sys_replace( char table_key[MAX_KEY_LENGTH]; DBUG_ENTER("spider_sys_replace"); - while ((error_num = table->file->ha_write_row(table->record[0]))) + while ((error_num = spider_write_sys_table_row(table, FALSE))) { if ( table->file->is_fatal_error(error_num, HA_CHECK_DUP) || @@ -2357,13 +2386,11 @@ int spider_sys_replace( last_uniq_key && !table->file->referenced_by_foreign_key() ) { - error_num = table->file->ha_update_row(table->record[1], - table->record[0]); - if (error_num && error_num != HA_ERR_RECORD_IS_THE_SAME) + if ((error_num = spider_update_sys_table_row(table))) goto error; DBUG_RETURN(0); } else { - if ((error_num = table->file->ha_delete_row(table->record[1]))) + if ((error_num = spider_delete_sys_table_row(table, 1, FALSE))) goto error; *modified_non_trans_table = TRUE; } From 926edd48e1e67bf9a315b3602638a76c4c445ef6 Mon Sep 17 00:00:00 2001 From: Varun Gupta Date: Tue, 6 Mar 2018 19:59:57 +0530 Subject: [PATCH 014/139] MDEV-15235: Assertion `length > 0' failed in create_ref_for_key The issue is that we are creating a materialised table with key of length 0 which is incorrect, we should disable materialisation for such a case. --- mysql-test/r/subselect_mat.result | 15 +++++++++++++++ mysql-test/t/subselect_mat.test | 13 +++++++++++++ sql/opt_subselect.cc | 4 +++- 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/subselect_mat.result b/mysql-test/r/subselect_mat.result index d4dc519227b..00448ac4f91 100644 --- a/mysql-test/r/subselect_mat.result +++ b/mysql-test/r/subselect_mat.result @@ -2642,3 +2642,18 @@ a b sq 4 4 1 4 2 1 drop table t1, t2; +# +# MDEV-15235: Assertion `length > 0' failed in create_ref_for_key +# +CREATE TABLE t1 (i INT); +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (f CHAR(1)); +INSERT INTO t2 VALUES ('a'),('b'); +explain +SELECT * FROM t2 WHERE f IN ( SELECT LEFT('foo',0) FROM t1 ORDER BY 1 ); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY t2 ALL NULL NULL NULL NULL 2 Using where +2 DEPENDENT SUBQUERY t1 ALL NULL NULL NULL NULL 2 +SELECT * FROM t2 WHERE f IN ( SELECT LEFT('foo',0) FROM t1 ORDER BY 1 ); +f +DROP TABLE t1, t2; diff --git a/mysql-test/t/subselect_mat.test b/mysql-test/t/subselect_mat.test index 09c6b3e1747..5211f35b48b 100644 --- a/mysql-test/t/subselect_mat.test +++ b/mysql-test/t/subselect_mat.test @@ -254,3 +254,16 @@ SELECT a, b, (a, b) NOT IN (SELECT a, b FROM t2) as sq FROM t1; drop table t1, t2; + +--echo # +--echo # MDEV-15235: Assertion `length > 0' failed in create_ref_for_key +--echo # + +CREATE TABLE t1 (i INT); +INSERT INTO t1 VALUES (1),(2); +CREATE TABLE t2 (f CHAR(1)); +INSERT INTO t2 VALUES ('a'),('b'); +explain +SELECT * FROM t2 WHERE f IN ( SELECT LEFT('foo',0) FROM t1 ORDER BY 1 ); +SELECT * FROM t2 WHERE f IN ( SELECT LEFT('foo',0) FROM t1 ORDER BY 1 ); +DROP TABLE t1, t2; diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index 028bf44bf79..24f35a0c14c 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -873,8 +873,10 @@ bool subquery_types_allow_materialization(Item_in_subselect *in_subs) Make sure that create_tmp_table will not fail due to too long keys. See MDEV-7122. This check is performed inside create_tmp_table also and we must do it so that we know the table has keys created. + Make sure that the length of the key for the temp_table is atleast + greater than 0. */ - if (total_key_length > tmp_table_max_key_length() || + if (!total_key_length || total_key_length > tmp_table_max_key_length() || elements > tmp_table_max_key_parts()) DBUG_RETURN(FALSE); From c439fdc953e9e4f48290c7fc26cee45392475645 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Sun, 11 Mar 2018 16:50:37 +0200 Subject: [PATCH 015/139] MDEV-15540 The error log redirection for wsrep_recover run does not work in old version. For the wsrep_recovery run, error logging is supposed to go into: mysql-test/suite/galera/include/galera_wsrep_recover.inc In old version, this works only partially, 4 first lines of error messages after mysql startup do go into the galera_wsrep_recover.log, but after that the default error log file is enforced and remaining error logging goes into the default error log file. In this patch this problem is fixed by passing --log-error option in mysql startup This fix was tested with galera_gcache_recover test, which is currently in disabled state. Note that the test does not pass even after this fix, as there are further more issues in later test phases. --- mysql-test/suite/galera/include/galera_wsrep_recover.inc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/suite/galera/include/galera_wsrep_recover.inc b/mysql-test/suite/galera/include/galera_wsrep_recover.inc index 090ffe5f5df..d2956ea99e6 100644 --- a/mysql-test/suite/galera/include/galera_wsrep_recover.inc +++ b/mysql-test/suite/galera/include/galera_wsrep_recover.inc @@ -1,5 +1,5 @@ --echo Performing --wsrep-recover ... ---exec $MYSQLD --defaults-group-suffix=.$galera_wsrep_recover_server_id --defaults-file=$MYSQLTEST_VARDIR/my.cnf --innodb --wsrep-recover > $MYSQL_TMP_DIR/galera_wsrep_recover.log 2>&1 +--exec $MYSQLD --defaults-group-suffix=.$galera_wsrep_recover_server_id --defaults-file=$MYSQLTEST_VARDIR/my.cnf --log-error=$MYSQL_TMP_DIR/galera_wsrep_recover.log --innodb --wsrep-recover > $MYSQL_TMP_DIR/galera_wsrep_recover.log 2>&1 --perl use strict; From f9cf2df077f0f680f807c00b142971601ca4bb6f Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sun, 11 Mar 2018 23:46:33 +0100 Subject: [PATCH 016/139] - Fix MDEV-15429 CONNECT engine JDBC handling Postgresql UUID type Also handle Postgresql sending type VARCHAR for TEXT column and setting length to b x7FFFFFF when the length is unknown. modified: storage/connect/Client.java modified: storage/connect/JavaWrappers.jar modified: storage/connect/JdbcInterface.java modified: storage/connect/PostgresqlInterface.java modified: storage/connect/global.h modified: storage/connect/ha_connect.cc modified: storage/connect/jdbconn.cpp modified: storage/connect/jdbconn.h modified: storage/connect/mysql-test/connect/r/jdbc_postgresql.result modified: storage/connect/mysql-test/connect/t/jdbc_postgresql.test modified: storage/connect/mysql-test/connect/t/jdbconn.inc modified: storage/connect/plgdbsem.h modified: storage/connect/tabjdbc.cpp modified: storage/connect/tabjdbc.h --- storage/connect/Client.java | 27 +- storage/connect/JavaWrappers.jar | Bin 44053 -> 19192 bytes storage/connect/JdbcInterface.java | 54 ++- storage/connect/PostgresqlInterface.java | 5 +- storage/connect/global.h | 6 +- storage/connect/ha_connect.cc | 21 +- storage/connect/jdbconn.cpp | 339 ++++++++++++------ storage/connect/jdbconn.h | 11 +- .../connect/r/jdbc_postgresql.result | 35 +- .../connect/std_data/JavaWrappers.jar | Bin 0 -> 19192 bytes .../mysql-test/connect/t/jdbc_postgresql.test | 33 +- .../connect/mysql-test/connect/t/jdbconn.inc | 7 +- storage/connect/plgdbsem.h | 3 +- storage/connect/tabjdbc.cpp | 11 +- storage/connect/tabjdbc.h | 2 + 15 files changed, 383 insertions(+), 171 deletions(-) create mode 100644 storage/connect/mysql-test/connect/std_data/JavaWrappers.jar diff --git a/storage/connect/Client.java b/storage/connect/Client.java index aaf1b7bf2f8..afa54fa4256 100644 --- a/storage/connect/Client.java +++ b/storage/connect/Client.java @@ -1,9 +1,13 @@ + package wrappers; import java.io.BufferedReader; import java.io.Console; import java.io.IOException; import java.io.InputStreamReader; +import java.sql.Date; +import java.sql.Time; +import java.sql.Timestamp; public class Client { static boolean DEBUG = true; @@ -58,6 +62,9 @@ public class Client { String query; System.out.println("Successfully connected to " + parms[1]); + s = jdi.GetQuoteString(); + System.out.println("Qstr = '" + s + "'"); + while ((query = getLine("Query: ", false)) != null) { n = jdi.Execute(query); System.out.println("Returned n = " + n); @@ -79,7 +86,11 @@ public class Client { private static void PrintResult(int ncol) { // Get result set meta data int i; + Date date = new Date(0); + Time time = new Time(0); + Timestamp tsp = new Timestamp(0); String columnName; + Object job; // Get the column names; column indices start from 1 for (i = 1; i <= ncol; i++) { @@ -112,6 +123,7 @@ public class Client { case java.sql.Types.VARCHAR: case java.sql.Types.LONGVARCHAR: case java.sql.Types.CHAR: + case 1111: System.out.print(jdi.StringField(i, null)); break; case java.sql.Types.INTEGER: @@ -120,14 +132,17 @@ public class Client { case java.sql.Types.BIGINT: System.out.print(jdi.BigintField(i, null)); break; - case java.sql.Types.TIMESTAMP: - System.out.print(jdi.TimestampField(i, null)); - break; case java.sql.Types.TIME: - System.out.print(jdi.TimeField(i, null)); + time.setTime((long)jdi.TimeField(i, null) * 1000); + System.out.print(time); break; case java.sql.Types.DATE: - System.out.print(jdi.DateField(i, null)); + date.setTime((long)jdi.DateField(i, null) * 1000); + System.out.print(date); + break; + case java.sql.Types.TIMESTAMP: + tsp.setTime((long)jdi.TimestampField(i, null) * 1000); + System.out.print(tsp); break; case java.sql.Types.SMALLINT: System.out.print(jdi.IntField(i, null)); @@ -141,6 +156,8 @@ public class Client { case java.sql.Types.BOOLEAN: System.out.print(jdi.BooleanField(i, null)); default: + job = jdi.ObjectField(i, null); + System.out.print(job.toString()); break; } // endswitch Type diff --git a/storage/connect/JavaWrappers.jar b/storage/connect/JavaWrappers.jar index ef407f6a9c2d1e0ad1730c4b7194b012572ba8ce..33b29e7685b66ee03d1b3f5921f2ba32d48f76b3 100644 GIT binary patch delta 13062 zcmZ8|1yCMKvo=m}65K<8H)w$H;_eXK-QC?~gS)%C1oz-B!QI{6-EYqS)qn4~x3{-; zXZD$?>7J^t?U`p17N8yyAW>x{pkXi}K79B9Q4wz@8H1_?Q}oZ%1`KE@D5zk*vG|`v z6Gq!JQ;U@+p_j>-$AChWh5AHDdy-9lt~w3OOZpMG4Jx#p zALb177)_;g4`iw|K>(QggUt?H$of-^UKJ7QhP(K1DgZGR%+yn(VISx4Y z-J!G&gEdKs{jfzg$M=pFl=xZN8M9|?bmNMZ?L3teE&ig5Z zc&YR-pvD-jeI8ytrW-fH9;9ZzC=uJ@_K~J&PtnBbTCcIlLSClRfKeB`iMyn^yj+Vk*r^gQ9W&W{)9~ql5!BAoNaj4L7 zzDB-@a60*OQf$eB7}2-)5G@$82R=c3lSLEBVRbjFxq`3CjIyofq4DNr4)}$( znKon+ZuJJ*&A44)WAbkmw_PIjfohsx7~Q!oT$_Ky@SVG7{fM8`GLSm;XfM-4RXJc! zqS}FJXy0oF%u}M6J$+?TrWN3xD>ry?+apu4NN}FlDcyORD@grlC$-+KP1-yV=O!4U zxV{e6ZANfPca73)I6V?|q%gmLbkD_;CFM{KJ)27s8k-X{B8)w2DKZ*o=G@k2##;8Q zP@%hsKeVU~G?dLAPHZr|^ivGTPnSjN2}UNb9=f9d8SaT4sXND>TKB%Bet|_{-Y-^AhObga=3l1wdYI}#D=}g{r-WQ`U zf81CCM24L+IVbvb0#@40wfL1=MuR_jAl=iU)4P`Q3w&5VRkqY{qIm;pG8xfC^!Rjj z%GQXJ`+cF>ra5>ejtM%jS=SPwCAKnOSLyc-#ImB^-XOtkN=N4DIZzF26A^7g z^OWp&+S&x&^1y4Fd@Og;FW$=*4`=()&-2XVk@Ehs>vVt_`xX^-QzNp!MQoE8bsL@# z$Vrl;oxZ{$?12t{9LzGoOX)GcQqj%XNOFf2UhptTFD=@ zA_s3Y9cUeIIIF02)&qD-)Lh;fL=q48eS#5+SI?K71gj?Wzh8hE^p4MKQ3YAlMi$Se7c; z{+d;%QOfsKvN)1g=Hyq9J)UcxnVVZuU#^@}uTN07sxMccdT&j!WKjRQKCCqcjB`J? z-KT7$k-*v}rW<4@WuJpMXI3m1L#|3pdqXKu8ew6N0YL)6-AUQ9YR}2kTV*z?3&ruw zdd~_z0eF^F|KY(T#3P=<-efjp8|t*b*;V*KcZN4Ggs#uRiXr7ZImO6l2wb@ zb5v6l(7>FPxccov_c3hpXOa|UXZi4HtRUVr?M`NYqjVcz{y z#e*M%2Y484Vfj>xU6?wjdRH@nuSHNC5hOT?Mar4GePXCxK$c4K4Vjq{7t|+F5k{^! zj}b1Uv?7%wCdrU(sAR<|7w1eq$5!tc;KSs8w1NJD8}bP{?@x7D{+(H!B}M^dUO=j# zbN5zyb`?4NK0dU8>6WycF-uVQahWv1Gb8@CBJg1UL$yX8Hia+Fu^Y!y+%ni%FlvC! zFXyK&jzGjzRUyHiWS`_&`{5T(%t+^T$5;Vs7(Y(ZudwN|S=Q-BxrnolUU>>wJ)K(GDjISpeO&IYZUQOp zjDXID4&e`SmcCOsqWRe8bRomi0>1z#2eXAp(L8({>KBC5J$(5%CJ2rrcsq7I6A3-9_269ELaiNRH!D- z4fvsPcGRvP@xi8LG6>CBp{C$!RLOHLC#^Dm1{R!~{9k3r3JHA_pj>ldo9}Be=!{^3 zV5T$&QzL!9P($#sBDio*B3X<&=%q>7UYt6!?z?B}f>?#bC-D|n^-D^6mRgYrdEiAE zBgtGHua;F~Q*r8QOTw4ON%_0cK(S}IE!N5R*;D&|`qdjee8nw93>E#f&VkqCG9l_z zM{2c*{zD~DML)A+d7c2g5_d;;SY-|2h-M3mfdgkdRaZva13raDY|RNBzEMJBSMPj+ zM?0LT6M4*SEY7-anNvjADe9xsB)}@QQU;QXFbi{7h+!}W*>>{L^c^#1i3O&n zzKU)CHzmPCrCW|x;gxF`eX>PCGY>5l} zh`*c$d532?e%Ed&Se)`J0zmVZ8-2B!$|b{@td|O{ovtJ3`$A==esg1*$My-;KVDXP z!P11YkfZi72=o>JTpuK3?MsnkK1*WLB=m_6 zu+_ijjb{rd1}iTCCnoYQy7xz|DdNMS>vFYshU_Sn=SGwoFI^mN}fNK_&zlg zmz_Atsrh|!#9YC{9>|V6KyyK|ntyso%|uuxjlPL#58R1+MZf_oAnsr;1<$tSY`lki;oHG3otKR+_w7vG_?L2&|F{V}1`k%n5!IC`{3>*+tu+gVWorThpNP9Gb zI`|reP8;oFD;wbUm55=LqoVUb;E$fA4SyB1ixryY21i1Bd-uKLv$x6UZ30mz0Rrt2 zOoOga;zp-6M>tVFyt91aw+YCnD&9bbFV2jA0I)u)KJq&}F8eDkSpf2Hg^&-Qn1V0m z{XgP{y7kf*&C_or40q$T3P%bd=2fLsHl0%TFz-}D8`#nGT(u+zdwEK6``2EujrCu9 zvb6Ok1JL^$xP@Xik_kSZ#3=nu_AJIj;E`B&9#3V1235~N&7rR&O2sk9c9|=Ek6xoHN$qH;Hpxa4h zz;QwqsJW36>FJtpaOYigB_5F2ad&qYwYb*Pka5~z>9e>OFCWaYIBFwWco4i!Ws~AY zw(MS8o}#7FSsi#y?M;8BFMFF|JY`w zTVQ74d^x>)d+;KU;6-O-iI~>_Y#)c7PVUM*Ui30W&B4N&2jkkf=2C%PLvuV)*(92h0`u6`>)oCuM1tZ zXDn{WEjtJ+-{|$DT0=Q^q{x%=?*G77?Xoe+WN^@93`wIx3+^(14RX6oKAsq#~Od0BCQcZEP-U*LK(+IUoa06<70`YUevi{TjNy{xJcikaVpN zSVcbJ&SPO>BIAP_v$JL1wY`m%u}YXyDB^Ja+;14$;kg1Bg*iW3_ShyaK^j$%*%tI! zCUp8@OCF|D?NpUX`QBQ|D+02CbwgFk>;|M>N+R*tzfPAyAq`JttKPgvO z|H*#h^+8CVeCF+z(AUm;xj1Odqt6!Cq=}jEYEIKPU!vf-MzBPi_CC%fh#t0mB|@5G z{~cW=na#a%1L9!yCoKy$v9G{kcf7%Nyi?AWX<)?&Xx+~craCfR^iZo@jii%^QAw|y z9hW+;z7UTNkQrJwqbE%oV}l*$6(q>nb!Cqke24xgspzPWpoK(+fQZ3@_`f;D3jqf} zPD<)eyHRCAaqe_a4#cDf();+kDA>v3Z?16TR6U!ENoO7*WPYkY z`BXf+G*{tVseW3jRUdlww%+7&wjvJtdAhB#zvhv6{_?)=-0ooWv;r^V84+Aq;zcYk zDd^Jol9AD++G=ffoXBU8PlLOBga!j}?-&oQ@2wwJB0ZB?ISv1|M1pHh)-1Ps!Dc?p zJgbmxJQDy~+lD(R2+K@{%n|B^U$j=MKTpFR_K#Tq2(>&64ftYp+ic{b9w z0j3ZyvfwYP z%{7!O26G?lCIpe>J?o8R2tf?RKG^(LlV|xofw@%*lvA)O2v3}RRtU`Q2LgrN;4KvO zA2^Igze9w#>;Qrpw*1_hjsc_T@9Jkcd;ZBYO32O0$_#EC1yr=&YLhxbyZBvG`V-WD zqZ1bMa!yD#N;{fcAwO_Y-+5>BCOzD*CJ~G8&_#@1SH+FH(C%)8G5r88E$o5pzQebp z*FprkJ#{)sLWEj8HGJE}T-a!JS;J(?E2)z(y}>&=PZXa+BSpES{3mZX zHxe5+QvVxyI12P=46;*a_H7+$8jo<+-2_ndFjL~(s|b|xXXT7TDQ0tG3zjewJc|@9 zmR!$s!+B-4|8(V}J%-gA3VoyWpYwwAia}-E1qC8rekRP$e}}0p)&9w&JE8aVY2|yQ znH;W-%hAbHC?rUw;&RH*w(E?|@}z)%dv3m|6wBQ;nfP{sY#}G^IsVSy=ri zguw)Vnl>=8w3eKhp^%Pd8pqR8knO6e%+asNUPbXday5lfW3Y_EQw`~PCjO94kDK)4 zGo}q%!`_6#4GNkUcMO|CAMTCR6ZTFWg^N((CRT#U!tXw}cB=^lceroCmJ7Kqhk97S zZH@#@$nTN-4u?=^uV`_VQ30uWT48ysG0L1I?gv0im96|lD5yok9Ew+-zCvR=XL$2r zC0G$3tv7eGDkW2iwi8@ZNpPbfbQH)Ih{IZ-RjeI8T({n?LoOcQ9LLvht&M)laSTvTm*n9DItH7iIg&bJ>1H zFe;F4*LnlbcboI~Ka{y@K>dN)Pdf=VGy{oN67_nDV8TVvwvj#lZv(%DTv)s&I z>)&%-a+0NM4;&7FtrS0Ut%L)CAj`e23gG*j{=>)I_FTr*yL7>#6z+>})I_SqU7>5D z+~_jFy$|SbelEracqepw_;_2i>iP2@*dzs!d3mqF=nF&N zW{7PKNkk+8E7G8cf<1pyI2=1j{ZAOFx%lJq3U6DsGg*Y~3xwL{ws>evyT_2Y9RSul z9m~9Furpcqi8#t$ehggH*0to^-iMLS!cG*1csA=|L3tU`B6d4ffsS1MmhmaufUX3t zF~`p>Xu0w-FoSA;YT#A+aO|D@w4>ma-6t6zixjPy7b;Y@Uw?x0rD?EtC_SgE7 zvzT{e^rq-nbm-V;qFcCTPsbYxk9u zAEmq}T$`fY+Q;M4X%!x!dcDxg#|YP{xuxYvy z3C3GAy^yEw!G}Eb*~*7A|Ii1$XT7m&H1Q$;kfS396(ELu)g;;+8%AkY1py{-HxTrd z{^~{uczDxpeen70t)T?mtxG9KCuQ5a^6LEf_iwQpr}W?uPhOmk_ zCeU9mqo7hu6(V=W+tx`9l>>#k`Q@cr{g$k3v<88x1|xnBN72Dua(MEFsFPa?q9+V& zd+7RK#8LVAI_d2B?S)+wok{J)QhrZwXxT^N1-eC)%Nm$kP~tiucVqt+jGg=>exI88 zV3L}WI6RUEE#` zoTy^+lAP}@>m6TyLMCB-kWPeJM?8dV7`**{7YDbVv0)k5zIU?gm)w!d8Jq_%B;m!o zW)fnNu^V=<%4QIBlLojmGKTh!<9VV1Yb%#lJ6jxPcy5o{Gek6}FdG*4176y1UkDMZ zX;_MbRRofSHOQ{6p#MONpK(ei*E}R4^K?_rMr0d!K0%*C*vhWpeEs#bwd%w7cda@U z8Ij!&n>{W+N$wF%V(Vb8T$)WMX6H$U{+_REk1(r1!7`o3?g}vL#}jI8oZtAmqqrZL z4VgNN6_XM>b(>D*i@iXDV!Y6e{e(FERKNhpFh-MdzUzdGDU-mr>`=Wyn5y zQoHUAlZQ0vB%3Q6gg~n%V~UjaF*8ivcY_P@r=k;|9I;Y%(H$O}i~EHvwnuV`CNlhD ziZ)VQ(Izd;$wq*b$U5d4+7F<#?@g(~ffcYD+&OSv+#rz%O2^iK@)mENzm!3FO92}P zZPo0iP_$VTk$BDoiY(I9@AT#9HY%+(BTN5jV4(11!T#k@;A$6@jAbL(E zr9WfZO=pcbH-7r3;T;Kzx8MiEhQm)Xsc-%uIon9JdO&SqTEp;{L_jdwpcKIz-x$|_zNG`W!-G?1-0XN6VStvoVU5~@I*X9$V;qqR!J9= zl{dDr%f?UDW_d`x-NbujfoPHsw5R%Q79=D^WtYdvR4~oN2PI^bR4y^k^sPp9X9ri+ zIQr098K|eRIB4fO94664iSOz6H3bvih>kUe3N_*D7k>${Ulc4CpVFiq{j;yOkmrk_ zBUcbet+4xNs#_z;5*91s7yNm4vT{VU4`s7x7CXYx<)V2${;6Ev1`iccAU;`te0_fI z*yq>r+tk)&=>&-6@7;lF0l!L`>DlmdyJmmf`vbMU)7!PPLXfP32+|z#d2JEATOH7HIL+dlR}uehSB}-~ zbod(384!*v^(Cl+!iKz;C-H_&o~0^-X3oBnVR+QmGDyNFW>fQ4#ToMK$V-Lm)-H~L zg5EPW=Byj#Nd0X`_KjK&ik;_yPu%4Qf^6fl>cR`oBhr{EQeGcsB?_e<4FgVBo`8JD z=BVt&SI}@!x8-%-=2rCEOTyKYr*C*s3{1Ef_2N!KrLpVPhWKX4jzgj}8~faBHvdzq zgxgc9jNfhjPsCfN4_JrCvea|)dWyd52+>MEjG{Foi>e!kI*`5N19{7mIM6y$nw$JR zn*%PuQ<>kf=%&)tcHPrODE1AHV}NbQ7J+f55jj=;bPBz2icRbA`09NXZt3k(%_zBo z7ey7U86FWQr59YO*et3mCMj>@KjV^*zbdJF4t7%GsuAU|ntZ+?)i{{3nMOfeU0)pM zO{&)WipbHOffSfWv+*OWiksR4q|WxDTMe9sX_S$SItA3!Z?Q_a`22`-+<;l`y2hZ? z5?d{VV&9$uJ4{`W98oy)N0H?4+ak6(s``1Vg4sH_tNEi8F5KTq6T)-85p%wNRk~Vn zzaozESmrfPai??EuQ#EXh_2w(u9$R}oLoN{Gb=+>`R#29{*y@DG>` zHv39NO&iH5;XE*TP)=wbrvjSjM>(hPl%;dr?2P2pg67h)j7#Pd9uwy4q^C-zw86)7 zP)(zb)f71UfrOg+w`+%r}d=@byEy4VY8Y92F4*hg0 zf5)2j4Ac1c1aUKA&Ajh<(a?qVIR^>f<7WP!5d$H2H4LudtU>fj;=HQ3I{E-Ne zR&P>sVT;srVpsdXVTlv$JeTymatr8w!in3_PgTQZKf7gv%vER}A+-#C#SuL-jpH%T z3$*=)yV}q&Ufg0Bzg!JN+!%DmBLZLQe&E3EXoI$@li?G9e_4u?Y7&Z@gl;D!B)0AZ zi?&(B!K8%WJmDA~`2|Qu?aKT6=1yKym&}O3*oN*`D9-JuDdfdWC8X|gXd(G2L}|Cj z+o3tb)K~5=lQgFQF@JtZ=RiSJrzxsaAg?j<>$JlYN0ju@u{~S< zAFq(WcG2gMA zd0x^6PDg>&4g8X{jo!>M=B8eq-+(;Myd0}cK?YZtv)=7$>(9H307rK8lU*!F*vbML zH)yTASL*^pwRZ5POQQ!dPyf@I$BnN){+TxUqsUHo*?Hb5%Awkaqoinloa}x08^M+G z&I}m$z`OBHFmQlrx<6+e=^B-YWf-BepR=Fnc3@Z=l(^HmC^NyNTxPuGJ{7K<89|KT z-?O6W{WCC>8?I%I@Z`yX{`HUWXi)KlrF)O{cPov!s#6)gE27~FSJhnKX-k4ES!8gmkW@{GyF`V(3Ow|UUBCeTK43Z3@4;Rt=T8NqNo`X}=D z=M9?xb8G&>C){v1g6?b7=G3#^KP?m2;>}q+yJFBmr(j!E0-xK}F!$ebxHM@QDR9U` zY{C@8Pr}-(fAGVf;a{hJl&x1^X%|>TIv*ctyK8h+nh@r0?Ukrs?y?8{VX(R8O;+$0 zLK+V`hy$KDy6;q&qP$Scb@-m!7A()8o9Oj>NpIGp=*}Q_>d8_&6=N|33Z$}Q*QVVh z^?L8?c~QCoNr}m7+LjC?seAr7sk^3#hwt9+$g_pv)XI=Xq}5f_m}2=%hu5TPKXt-l zs(~Vc>I?0oEtB$3^5$?R zg?;jC!&?Qg_p!M(^QB~Um8j46?mpiXc7OQRFQmT;m zppb@{(Z6W_-T!@XZ<;`%Wv^XPqBvv9W)EpN@9xSRP`=NFma2{@To6$}1`F1w8JmNx zzXJG+51}zA(`Lq!^hxP{#@@jVBtFN(6C*O?B`NmA-k{O&jq54%!rUaU{rnV_i8~3~ zS1j=La;>>BIEp>axc{mVQa@xM%g;uhE% zF2sxJG%p2D`HVZiHK^ebUIeyCuW#eZ14za$`C?Kl-Vp78p4Ii75P(m?#vldITWKi{ zUKf;p<1^@xC1D0FE`0r~YHpt~mbJO>C~zJ?XG2s2&85t3$<;|Vm`@A8;Kr_2bpbkr5A`1z~PcnR< z7tRy|t=p$bHH{V$65JwjbR`{uPkfo%P~R!Zo$!&wVF$|`&SPm;zWHlOcFGu(#BbF< zo{wYqel-o(bl-hGwX~!fE-{B;wnn*TdG}j1swprOwCM}BYKp8$Q5>|3lv^b$GTatp zfEhHv#hJ40r~H;X8GMHLtNa|0cb!c+BV{zRjoRXz{F$4POujtJp5NHFFVd@Hf~ZS|T3=X$*9{UNT)G5~E8o zRg-ujvIJ6MFUk?}i~dVj+mdYcUg21kj6MmU95)b6ty@Ho7KwI&m+K(VBa9$%X@Om7 z!dz+gwYa-XA6bzxV8t+zZ`!>nWsqf##~3^YcG8W~(u>Skqyczzk3W-TEHF1LNXYj# zm%6)Vq{D!2Q>wbs8Ga?P7D!%jz-v*@E_^JeM5RJbVY?#J9``JzQryxUdoYVB%z@fldZkX%W z@;xm_g!3FaPYNqwx73tVM~cX^oQyixnK(>+vV!4D#r1+R4fRNCn#`Jl>PTlt8LTp& z<6G;NlNszCFe75~x^ME`q)q9xn*MA>1alj#KD>yf&J#Q!GzjLH5bt!PPm zEzRf1Ymbzg)k8Dx6vHNWJRaOUgy~eDu77N&vpzg;UaQ7gXjEs##+)fBuI2Lc@@sSh z4?WT+ybJI88gu`dP~@l~V#X?i5KCkwI|8%XQ6JwdnotRG zfz`6MAR-D_SSl8*3eG!IacIQzmzJ$~7B{!5aN5kFX!9LPM--5<%`3>9QoLQ@E{v~+ zrXKj#UFRNJ2D5#02%nwV!}dbl6?yx4_fnAIjqJ~nP7eMF4$2Fg)rR)NHO9sKwivNa zUg-nX(!jdo$>_6{)9LTo`f>Rrf)HaL?=A3**({*fo~YGSSRqoA^7zs7vY6xDts*6O66u=fs` zyFfDqaNdLOu7({^-fS0n7AENYZn3?a@q&embZ^oFHXLev4Kpnd6 zz;}6-#kd3~I zM;Obc&PFoOH+IKrzTZI>@dnMg!WqdHxrkS;&_Xs1@&L@wb=0jS71W7pwCpQ)V`c7K z2A`)}3`_Rac-&RZRtuw46)yV0XOGcz51P#0aPMt6eS_j&UcGY<@|fc=NpdfcFZTdA zLV@KZZ--!MCivJ<$7sqDlg7g9pILdZ_ob z&OpJ#vE!p}-Vd=~--qOPC)V7+;t@dp^(>`&QU3&p89S9g)XJQzz7+9DHeu$MIbmw( z;y-#+_r*J|{MQ|()NYIhc=l9wdi!p3kK>rE*K`^qD2>=9{1C!zCFsv7M=meGRT|kC zc~GfvrF58oEn5hR6_qOd#2oNrR*^$SDlcZ}u}Qx!plPPt#2a-GWY`-YM ze;#Do1$}lyRo?xg)4^Ct=$JxKUIOdo_ zIu-jJxlcpKx8AvC1l8OF1(wb|z#E$0ef(Yb8YG6%>7_9#he1d(D-e|TQ;Ls(bl-9o z#&nochZ%aw9zW+>V6O56fLlKVX_Jm(ldg1|ktDC~HmKA%Q}Q{pZ%=RQxO!+E@AWd` z8>d)PZfB~txAdVUt!)RAR_YmtDiK#M-ds=A4(YV}0|GIjF{7J#E#{y1b-Ma91uNY})I?I(QRifGN#+tiq3g)XJfNEMYA^mG+bY$wc2dX+k69uaiU z$h#V-?*tU*LbSm4D^G$r>BRi4UDc{?4ZP&Hptd{tjAma@35{85^B&Z<0;PnB}vB1ZDa+XjoeO&K-%-s!_OpSJL zj`aq!_M+Y8*`TUKeR!ROoP%Ld-wHjNZn=!#7vs`#->rSVOJddI>#8}gcb|IA#QJy@ zBQLfaU{U&}DU|s8c{tvKEeA!lC6Spx`5AnLq2OuAK_`D%x16S{)@-qyaV>o8e1KI79H*H)>O(w`N9%p8g%cF{)gQaZksbcAyw=7p)E)VK7i zDgQ8hx6rFg&Zy~tu#$(We431Wi`e|*v6P$Z&#&Ck-jMk^Fo)-UHxi#;p`*PY1*0nj z4w=ypLsD;gA;n}KtMZFD#Iv%hv3LA@z~wA|RxMr( zVn9vKuDm^1jQ#AuJBT8YYQ3m7mGoItpu3fsa3uh*K}NXnZ*!-XNN>j9^Nc#j^vF9{ z{aso8s}e@VBK4T|MMbSM)#7yDEdVR0k0l1z6V6F*X`ozAEr+OIMgudUYG+rUTq@Ov z*_f@l)z{Q_7eXmK>J zD!)qgvMv6+lyW+}0BT2C;>8Go zaiSe#i%9n>NepxBQpba7Q|xlx9~$eN zJSK*K$QS;*DPZGTgzsT||6%G12#LX;4yRSx7*EB;2vLY>OSjty z!2g!i&fg>2Arc(?5iY902||Bd9SL7cj-cUVpeVblFmK=(coif5$eXQGF?} z`$ED~B9IUe4bcA>2?>P>@jquWY5=G+0tg5QyuUXl#9wmqUx`C6`iAh&L5=@MSdJ4d z5+(VEk!;aw%)js#(f$Df;;+m8C8=@cMRfn+DX$gbL$xEuEhB!87tm{NFulvgU@Y{~bxw-;oggFQ6J{+$%fD ze*@j!<7!v_0{i{~N&Xkmk1vk+7hRm+FJkqwzjOc1*51b4$iVU6?)hi7|Lr=&|J6aB i$lq25mU<2jwt9}HvJ$Xx|9nRLdv!rUK#+?4Q~EDlvRBjq delta 37949 zcmZ^~b8P5O&^KCJr*?a4+qP}nw%hNiPi@<_acZ2}wr%$m_xC*aP2S`tcXzUzY_fmM z&SvtN`OLhgfTpa1At}j%L!g7ez`%g)L%hi(An8NI_;1Q2II;ouu4wA$0S;Ma3ugaB z&U8bTNgS-11p;$BMZ=FqR@9Z#yGYb$HKPg9Sct-1$hs4v3jjXaXysjjN@@e z69h3n7zbj7=$=jA7sK6Xf{6A_e_?lU*`iwW& z*X*{9r#RC=xd1yl1Vm>guBA(c-IE-#+az7)cDazLpOFGFlf_9i3`C|H#E$}E0mh>m z>n1BXdMuVOSLGQkHao=)?XAoO0@Z8UfnRM_onZcFnvltn`9I|4-5TY^d0Q7er=K8r z7G&Su^>v@+WO-}4qH>4is87*X4Nk~{klZGyM8QY~cYto8hv+iPy4)q%6tBX>t8{%j zTy37y2t#d8-WDsnxL7ddr{aHlE2p~A{abjt3tim4Q zHOt`q>#}1p;hI6-D1+Edr{biRFB<7+gCL4WIfIi|8kGFYg4eqSv!r!s9l_dTjP zpvEX;mS!N=Zb;zK`$FrYlDG|wWSefBNl%TrU$2(+>^33xUP4l7Y?!V{AMxIfp*vcN z^%iE1#F?aNZ=ZwKjZ>f8Lyefb&>mbckA(2y{Fhu9eT|DZ+H*2NbLzW~=U^Gg* z2$)a!Rqvzujc4bwtOAhV1GgTF)m|N|-x6~VbM6>8W)au#odi#hWYq;Gpxz3}ID=VE z*+E%peOdRs&aAet@_pN?Ml9m&#Sdra6+u;aYil+myt?xd6AL^qS!M9G-dTe}_0S{^ z%MJ9l{yc142))j04To%1rmOL!7Mh&~fM}T+@7RIp+4tHK>PpGOwVQq_AErc$tij*7 zpr9bewrMg!EmU1eL2DTke)rOAVe7$)k-ndTF?!A(_Z6?-WQcm81!B8D2CDSfiN`HN z9Gx&i#u;B9T4tj$d&otKgN#&E&ppUJ_(U$ioB72b(A@L_LTU}?TF-rxe_z4e0&6l8 z_M2@*Nt|jVo;A9f@kf==%qG1f?yN(uG#peY1D;*ryenIgy|61{YdQx`Modh}2$5z| zWGO?qKE0BUpzj+mDL)j4g07$^yrPVUgUs>Yz)91R3d*qkgUCqvQq@c%`M3nI@!!$r z(g!?oZVFVY=YJNvwJ?>9sZYSQzGie2Gd*aLc!W9^|lIj0A% zSkES$Cl7?N!+*FUKJjIo^~8c!{NvN^K^U{cY`LO&7vG^IBXWlLi*LbWfw>9iY9sXC zS)UYDH3}fm+zhE|%rH1K$ncyU-S}ULwrIp0+kKaeU6wcUfdGCxojJPT;2;ZL5v1TA zk3&7j%WrSAw(^#NC40YXR?s5#+$6E|VKpa77=d0Tra~q$1kScYbUFoUt;qRL{a)nW zcPu4YXc#houGe!25Rhva6cCXA>l^>aCnin8;WYI_LPP$qhisaJBZP#&4f^l=8krtY zw4iWg;S)(GlOncSO{#|wRu&N=hQ&re8BhV&%9bNEHex~Bo~&vKw()E!{`O{FGisLa;q>G_$@NYB&i1`&I6xMJ+C|{r z5=u3i#pIpcc35kOJa_sVIfU2_>5T_~Wr1Vt2^F`PNIyW)Vq2Uf-4E?D_kp*@m|;VZ=v>+BeAW32dq|tr4RuElw0DSX*BY zvdVi;pLd<1@}eNTY`Jwds=-D2YT*_(cvaay?bwGkP8BY&fDeV!$UU1N{ZTRv=g{>q zkBBqQwk*VDNAR!0SdoGovAmtEY)m@oPFLKmwW_Y8yW7{3@Di5`^=SZMI%HV5vH7$e{7e_wD z`-?$Yhhddvy(<#-CiVk(KrL{JGWVUM!)~SffuPf{+9}4>T|<_>zGg!mA!)s<(@bSm ztq8g$b7aNdKqz-(hp1mH&P0uNA+2gnU{U!yhGO7c+>viR2jxdI-fxyHRxNXrq)M~q z3pK97>!^(1RUw;@8h+L!xNRyf-K%mHvZl^mW*TE&jIE6lF`8dMl$>6*wfUk$OHf4n z(QrvnV2lQzv80t>$Ytd!)>13uk6wfYOD&~P#;n!Y2}thE-nmHf0q3&BC29rF%yuwM zmgZR4Ewz{r8OEM*7befG&g7Y^(^PEF=Y1QSRyO0c4Rmw$_T?dW8h*`wWwgkWqQl{p z-!0qD5nDCA44rep(8>~drAm!`+prEs;0}{Bt3$!-RG`yn zBQ#-RTa_Avxt_0?7GSjGE6KOGoR?2=c0@~PYnf3lh*LoJ9>kyiDBbFX$V_z*a*;&M zJ#Yaej;(LZ`}4q!vn0PQ;fT~{o@`h#XUMR`J2B|%%a1zM36~y@8R3?hsGht;f2$sj z>4hCGlx_LU$krFGYeaQUH6reP)|Y2#U|A~`&m^E{NLFN#fhP94EV4 z=2F^ugLMwlc>^>EWo*2$Q7x<@sRlKySprRY?6U96B0G&%14yWII>yk&wRK4;7^L17 z9c)LtOI@74qEE+@%T+HHt%T1|ePlaAvC}41K=5XGyf1`zIFlHX@hPZ+xiL22N&q;6 z?^(=*3u9j^^_dUzi_olAVk^EGN1y6vUdd%nqp}YFT2LC`2=|GVq4(eyom-R@aFh4! zD)rS{Xo`_!?wr^CT6I8A#_B91>|Z^*M)IO{{i}T16L-PM4Oh;mgzCi8+wqZl5AS9o zvLJj&I6{Uy9~h<}CmmD9bHJUGo(kaRokf?qnfTcFwX!mt4Mi_{m9kD>h^gD~WgU>z zO#rbMFvkqE_nn_DXSt)j#9I^3D;+H-f{K(hF(e06Q0O?#1>iQhfbxx>azeroswV2h zJ&9+Z5}rPi=Cklt`K)Ss&d^C^*iPi3aP{@jIt8%H6twAOgL;v;8ZU;&4J6gm#yEt9 zuqa*fe5VrTBGIV@a!cD~ElqjCzC|XziV#t!u}^Iy?))ZQoeL~<=&5@=crYwk=TtxD z+iKa-c9mD>Ja{t0-yU2IY_JX*^ceiHDb@*Kxu2W!^kHKfLHuyW6E6MD{X630361-0 zNb2XF2OM|gU)E?ZoY)2~4B#XYfGi3SU=+g~xsBySf{h;4=xOoD$JbODkxviNg_*c9 zqItfnc|e)&OX5PCxl?{XW&XyT9`Q~83C^S%c7LZ5?RqFTnWCLeDesqBEnK7JXQ?*{ zJ)*EU#?Ga9z5gLVmq3Pls7;r0;w|_OWP-rWP|Odlzn!3K4Qbf69RS}6QW~-I%lbpN zrciTZHUpW~9oMz$N~*g(ymjdvPkN)YV)xZcMtZC7#$*XSmen49-`%_!>g!C@v1|nT z9G^-0qMExf___HFcj)O%GTH~8i8By1l#AUy#cvb;m7&j<;4=m#!TZUrvY$z+Zxs?} zdM0|nI6GWcEb8?24*Y;4u%1_34@rkBNZDKP01yhS#To4zV5*ThavhdlIELk{X6*VlCU>;eeIKRKo&6jleb+3 zm9qZ$`n*5qn;J|3lGR8OmixQm+Rv-o*CS5#&8$+Rqqep-4p_Uk*cg6JbPMblqArQ| zYjp@%DUOUa<36HicP!R*>||fKxaT4q^kDBb{aC);7`*`ta8#aX#+^|qLfTy5y%Rwd zyy%z2m4Zfa9*N_}zwr#;c8&AMaEtVfP@2OPOm*QQ)J^%9IjKZb;Og{BW8oeBtJOg-Da9Ib%ol-Krn* zIV!VmN@4h^#^L5eCQzl>UYW_ss4RzSoe$=Vg%Q#?P4Hp2b{Xw?y1BA#oKn_2W~u~z z*9B;VzYJ<5QHse|XJ~xlfBMFs*d)>t9%m{n0}+zgm6Z5x>^yCe`N<2gPCjVaXaEH5 zrr5N(vWTanvZ5n7RH?74@?YN#GMhOm&u!YIx2jq?Q4i@_4OY&aQ4xv&Ocv^^!f-vF z19ajYe0wpP$Ht$kU-17E6p#nDosPqUfPAC>e`5lW|1*+!@L7PscoY##C$lLTkhQ_w zdI(Vz5@OucGm zK9;(Qe39_Z^JOSVE^0m|V<#DXZJq#LeEq_0B6t0@*2zA&cVh?jDgrXd9sVbMk@4F6 z6K%Q|tE$4EUS4!ZST_jXPq~Z+VHNA0txHpjG1!eTo7^RN8(SReX`# zlz&35lUAPtTAtPgd3xlDxhkIhixSfkl1{1c)nZ_LglDK|y_h#Hehh{#6sM0hbhazB zpK?ez;yD==BWrurC{Zl(Y+**uISq$z1w7DE$n26Lh;Q=%+MbplmKf7uRjg0wzfr`T z$JIS^@4*PFDsf@}k)Ku@l)*#pur4&dga&W=rz$9+mCCIAI;Y+dH=D2%S5{_HOIu=- zeX4zA3gforA|n#YgZ>IObOU1iUwR}SX;1QN)`@LYXf50brrCnPI)&gr#LfsLkW2vO zH*xr4v1_@o{4$L@N#U?8kqJw^hNoR!9XmeaB|7##lpS(^9h+5959AzB6s6=Pf1TIB?P}KID!EI;crBxz+5eaie)=|>>P`#X~P#BH$nbiUP5#i zkjCTp{Zt{#Wr*(xPb`SC2pIdqTtauduojv*AfMI3lBcdt6I`X@#@+lCO|s+R>3R+Q zvdI0AzWenJX@ny+b+(>(kGC-V#|4rNqLeSxSFlx|KMo7ih4acr3xYlV;; zxQON{z&&Jz??S5Gf+|d`WRcR3N*Mpf+H<}RLEUzg?YZ_C>Cpr#b(5~9iW(L&`krQp z?aN`9&T5`k#V_P;{>T_f*335F!902E6sA1WZYORuj@mAfOe1}sw?Ab0@D9eDCqe%6 z_Ylvh;MnTr-b%4}YC4?o9f}UfMsIE%yH3pPi(8Dkc=kHA;S%5^Nf`leu;Q{SwZwpD ziorXD3dT(6{299^E^m!hiB{sjlTS2@{KFr#-OF>y7Iv_8;JLVQ0_4@6&~Zze}r zR&^)YS%$QmrRw^%qitANYrB{+3GHEuW3A}8JBRW4CwKSkh;v42K(TjBRJ3qykhzEI zSSX{}+T$O7B&E-yFL`NX+^2;XeI4E&9p9n`%X&9c8zT3;UcCZsn|eh**MomGQnIHf zF|c{z8yYooNE$(LKsx*?7;+ZPN%Z0Anv5tyd&fXr_fHL*K&;)BE(KrnM@jJ^k(w4s z9e|rEkH(L*N0)AaQp6aBb3F_1_~DJ+Yp2LGYucF=on?G0cXzazzt+=Jg*Yd%GS zmZKsu&FsaHViE)VdhBy7(X)aX2DjLMy!ZQ6{UQn0@1(_X=`}!$g3~>_E6&2rnh)*e z4GU(N8lS*S5F|i6wda$2@MDU+DTpB7pkeU$P%-+QYrJyD+^)7;RT`tmZ=_Xig1bT2 z?r0NkUumw(1u~-&e6x$CFMhm_JwjR)JXbFQ!}A}bG=wa`0baL3Ge@KTOl@44FAwS6 z$Zv+<=3f%QjIw6fIKQa(th&@F~1k}SjX{AbD34c?bu)8a8#_2=Jr)3 z?O0cDP)taqkpge}=)(dJ*K^Cs>>^|`u-Lr1l0S-=>lzSk5bTj5O=E>u5j~_gYvU)& z&!iosR1Gr$xD9J3t<-S`&*?f|FQ?LgR<@B6<|80bj(P%sOFP-@=r$aUjW&r>0c!k8kzu)4c$1 z>hKJu?;F&rPb0+OKg6Dd7t?BtdC^O%Eb~jyV8M7`*DkE*_|iQ2EVqSUSnUkOAD(5+ zERVd_K9{kOvo>0P-tl~Rhziw_^A)LQVH4l}aG%E-$NQ~lwQyw(wr^!G?q7Gbi~wE( zEeBPcrck=LF6sR}|6g$F>n^$U##iHD!4ay>n0!;e_of{bCxO6pd6lPOUog<@{x;HJ z5ru1j#WL~n-v4(VSuEk!dC0>rvX!>(4{NH`?1Lt^NPY_fB-kmcmEkBNw$~;X5Esc= zkg4X^Iyry z2szv1h@3n9l6u*(TaPluHQy8Sm4Es7?^2!t^wB$nk1B zjYk5SUqck~RJ%}XIpV}LXvQWKrl}WFP#Y;Y4wao!Q zob|jnLCldjUL_%-l*RY#E)i_tq=!DD^n&6n8Qe58JU+ayPgHdxY#r_g?)QKi|1q18 zbxvWR$p$yi=N3WB7(nVO0 zdrsT66lkO7v$kuSs$zMxLHzgI>^2X0V=N^=JxUvjx&Fv<>tPoI6H7^`sp@WGXw*CW zSl9*btQWSb2b7j){cmJ;GJY-#4V%!#NDK-Cht*Y5$+T$&o2W;P;W|mh+^U%gGfnl9 zkZ3)HjR+|S*Qi!kkqGT}f0+)#RB zDtaDH@we-rpLKz%Q8CZx!1v({Uf42&DZaq9gMFY?+-~7MVHanVC>1l}YH?_iIr9nxAxl4-nx8X!+DQ z^R?sfrlX&quvAAF_vko^Sg1@Sx@Jk5J@|WL!IYttiHbZtNrzgR!7fkk*oZ4De7FU5 zZ)*8<|KVOkN#%r|*iw@Z4N(o)6cHDkT#9{~Z}VGIw&Ndv79oMxsl};|!v*?~GWaWM z)>)E287Z}tR(dW9t%TZetL8qr#<2saNp!Gibg(Z*#nCycZxHgeZu|-u_p0g{h7Vjz z8utfm@&OEqc^UVtx&g=Iiu&f%5|k_1vFg7^67gvM27!eMLy{K!GATEryB-mRJW>)u zRAVAwwE)jhtP%t2{tI-vAjxiUw(c|ML~!brQq=--X^@n!i<;Xj<5FLgRmp2FK-8Vu z*<$@B$*PY*M}Nd?%Ow$9B(Gj1&-$!`yx~2`5e9BdoSNA7OW5}7(P?d%*%Y0(Pr0Xi zM9ZAE;W8*4#OK+L&HS?F+>`dQE2Tf}!mn}QtDA-I28RWLSk<2Bjs&D>IgT9tgK9ri z)O!S*;=rcZYtP;a1lGNOH`;72potc;6u*;U6x87U39ot9pErmY`Dk~TO4}gxzi@s^ z+(3+?z+i(t{+SteR2KXf0VEF+V@hH|kIIj*oiQJYYNw3aCcbrEq**s9v5UW5HGNCO z_sr(+cDXrViNq^k#P24rF=v5%hx0S0Dt!I%yY^=&r8k}@y}sqSj=z*7)*VKzw!=bC zN&QVyv!m)4qT=koD!qk&ry~OIzqE8cPx5)cgT2I7q%&)wH~p}4vv}TGL6e+;pzBpG zqeXp|@hdGb-S;MV#q>r zuS!V(gkzTLQEgV{b(lsc`mdI-yc;?Df`^W5PIIh7q3oH4)+ZDGXOq+Yfzv~pHSK3& zZT^nYVb)<;TSbbbYCNS1^oq)F%UPscN!|vZ^fVR9x?&giYJJBjsS-E!T3TRdZp ze&Pc+PA~GW*cV-nb8X~{PA>$BH)ITxKCt$HgVcc@}K&3(XvebW)kS>fW+Ju6Lz!}Rg30(zeg!z{JrBwQ8t-N z=7FbwKWYV^7hd!hH)SC$7QR>IOYN)BT0ei-=`JV=8lM*#9)9DN4li81kDaZVq8$K$ zW$gDKQ!(nHrR3)YA`C?AkF@?^+t`i09wkR<-@aoTr%tH1Tb}eMpG+CfSr#86HCJ#1 zFH-xW&r|+ixybu37U|EJKJgBo#et+AxLk!Xeec)2K5fd_v{^YBPzd8(^23DhXSeI{ zRm|6pkEtnId2ROwp|*h@7nK1%y8U&4<%^24t9i`peUJgmCouxw{!8gZ^DW|1?(g-^(Uck!n+83)20 zcEZ&Xp{WXvd@bw1gq#UoZ<$8Fc6YizuJvYd%S@p4P6YO$9#L z1_rh7V%gj?j#e2~6p{03$84DPUk2Hk(97jB53b*2{oYE_pt4XZHc*zO!z#-&xet9V z|KT~&cV{E-%WSR2mhrHm#K93IEteVa2{bES(6i6J`^An1r|q@+=v_bf2}BeU1Eu1I z*Y}P-L^|$=&6MHmrhIQGd&4jw*dJ1NYlXln(vD4>Bap!Wb0>eu%(pLYcGg_V zYKKby}0j)7vhRG)$ zbPb_%1kA+5NNvg@4DI$Dg*xqDy;hfVfDYE(j_Ze zlX_|us{g^~41ElXbid?dRjRcXDrIyE)ha)fChR{(%$4riArx657OZFB(u^@I8STO} zl+IP$=E82UT;|qka!#3Gnm^2GM_{Z3h$>osvzDXdEJ7oot;qsOTv3(3{)D*_blcDy zz#&YD%2Z6*qFzJY(%|&T5+0M&4LfRe=PqT=fCU^(SzqfK6(K+3f?JhK)# z*H(c0UYfXGZUz|8n9>uN6{B@cVs?>;S9dkWY06mh+4qI0ABR|fZYHzzU*I?Ej3*T- zDPv}vke~~onePKSdTo1FPV}{&CV(%Drer|`c-npfQym?p@izH zB&#?6)X7~l!jUQK{2))R99~B@$4{TOpJE%{aN{Et%~-;&1SY$mem}M#W;!xXW^-mA z`Sm1U?!eUS;m@!c3rCeao}Kymrx4u|(`*MB77O09?N@;$o=a>+gNgbJJ2XSfHiD*` zp;gn8a=POvV~ZQAx2ID>$BHikq^i0-S-PqTLWWv&cX}%Rpo^4Ze`U%L_!d{xrM(@Q z4_#ye!^Iif3?)>079x(*@U!DVich9Fzji&rXbz&Z^58tG;jsj4&u*V~(ym=;+^0CwZb3(6?9%huV zr4NB`5Q%M5ji8WOW!QksQsNB+{o< ziF2SKRt1i398h>l>NwY^O{fb)Uxk74 z2blZEllZK-pW^tE1_2?McSX%m8#hqbMO%JBwR-`bfxW*z{*e*vmYre-WI7yJ;&VSE z>6Uo*@g477a9ofv$E02$E`_35?PGfoA7LM()nyNrRb>LNS~}{Zg(7KW$KyP-jg1~<1_(SK6Jamt=`ibGSt~+j4Pu{@{10(u7oxkCX1FI zblLabxWkOIUI9PIM>wsZ0JN@k+2$0(Fw>r(oU5+I{vA==PXUj1BJO=I3(IDPt5`YR ztIrBkCu?`_%=vSM4RT%jG(W3)+3lQZa{4^J;%0^g$#Uw`d$wz@I{H&Tc0T|U;QuZ9 zP3`{&_|^M`fE$4E^^UuFq&a(8XAS!erMre#P81Lo|4l$}_$_XK3*DLaE%ow{^$7AE zL3k**aPsOHi+&q_RfoLx_0CNTxoPTq22O3j6STQs;;5P?y7O&+(X~47X;gGySP334o>CCESI$llM&B2rtfhms;50@qPe~a!bv@nz+aHX-zMLLx*cnucffHtNyt=GvJkhMbw zoyn-0MSlotMp&tN=?qw@?e8i2tCOP0*vy0*8DgBk2OXNk2BG~TKlh&)<<4w6G0=r$ zv8cn|aDH0QZh-`|%METhkf&UNCZO-XA*~W2kmn<62Z)xQiKyj4UcALe<(>TW;0c0x z*g@^9{l-<6erbq$12%B^OY>Un7?GJ0Xp0EY-O?Q0(i+a5!ZMIYlE)Gp8JxWm?{+FBQNF7|sQP5qCc_5Lk;1RWW}{7UXcHtT=FTXmPKjpMmw z=QOT!wDtw$={_Njw2{6E$m;~?4*n#L7lWMILUXN}8#ThpaSR&hmTTxMs4i(_R_He4{a+CBMi?8N2~eGzfpeXl-qt<3$rZ36{RUfaL{-dj%)0Oob@8}kc4 zCJ*fth|C!m`-X@2A2fPgAUFlC#lB_5r`Rc}yMWR-AUHYwDPEfV;-`E`?D2-$$XFTr z5+qHR5IGc$v(j1VTV*(Ad6iYg`i0anB1aBXmO`+2S*pw1%POya`BL`KNP;B7U|n7* z{s)7{gr;D{tO}Ak(C6?kxB96x>BbO)-%bVp`4(ll6AAq#aQ`Xa3!JJ4dD%y>+I;~q z0_ZgdbQ~XccJST-uDRZ#)AR^|lU0;cL5N3A@K1W7Z{z%4j8jMOciwOC$8nLm!|%2a zgIU@mp*ogn0_tzbM^DhtEg=ATmI&;9PzbpB7A>ZVBRHwk9?16FC>TW3}-Xwsm=M4^RG*HvPt_~dM61$%_9dtI>!e<6ca(7jS9U# z5PuH&66;5pSW?YnSWdQcVS!F$0gD2$U7+jwKxy#tW5YliujW}J^%_R?nRh4 zYo_^h=~Q5ruu=BI{ln5vwW`=2@rzAPPc9&IIqn=f9Fr<%&23fcL*$eOuuv z6icwiJ-Q zlC|@L=v(_Jb)PL=j4g~u{4E_AC%I(qPdLYDE-+NiyOL{q!g=nY*(5&WD;E`&%s;ci z&ec0L{zwoB{mVJm9P5?rGiEI)4!-30CJaO>WMS%E^%J9=4pkkRsssnzgR&w|dDL8Y z!Zc*8+P+4Tq}AOz)(boa$c$OOnI`aetpAAXNUjQu#!L3@wnOC>;yMMCu{LLPQ`-}w z2gwl5{M{)=Ja-u*-Mq|WL*R%MYE+r~2LB%dcuSYizk&b+WJ2Wsj{yFU2X1oV#rr?C zp$S6dkPv4e9x_P{5>n7PY2Jq0ZtIDXGx;3Q&rlcUH??c!(5-mU)Z9T=E!B2`i9`_X|n(l3bvp zNu`p+T3&5j4&ObC;-`i z$}zZ_ySZ6ASh_Mgx>z!po7!1BxtcTBn7J{SI=YxMIJuZxxTeGlMuGoC8U82K6B*AC zBOe*eE7E@Mqq)Q2dP=2;c(^f2e%7?{m-r#;1zJF+f+;jzxLlImvtCBSmxquAEBIv` zuo|&6;j55>7)7ZUim^f;Ry;bI1L^~G%CkrYmcgf-h5yCWPGyYcPI}X>wP=ReJ~ON* z6a;ztS*S+rODxmNfT|i!;@&PVK&=0S?|fE1Myy@_!Z)WkqttaR8%?_efq$5l9$8Wr z|A#b7(S)!<%-S|ToBQ0z*iu0%q(yS@ggNP-V2hK{E;hg=C~* zqe94p=IP>pHiWGESRPxnGi7;R*Du=W>(sQ`EOnwZtXB@Vv&UUONs#}2&_W^#P5-=1G|r}u`x4g8S$G(O~mW%Kuw?ai7>bE82*9on zoMnxyM~kxDW z)$e!q7`KItBqR-Fv({X0U!js$Dw@i|6c5|$QKgI|4%p(BamiGzAC1j=cuaXRqKBD@ zMB_|7{TqMJ*+zvPd|yloPpW8{?=&+lWqo@Qx<>S95=%APSwRuO!iT#ZZO-Zs;Ci9n z!-6<6e@`Av_85%%mphE~g|Q9tQ~1A+_R!mx=Vo>dXzT?Ef-EITeAH6XK~ycGO#3cD zKHr%*dayvuLFPh z_|tLyfgBBUWu99O>ITtKh08=#U<9^_9Bra(l8u~3WI>-c6-(cj6@H(PmQV;Sll{^> zP@Y-Z4z;wV2(kv@qc~wQYYTkdYr{?dz>c^0?vn>8KZd$wuhdJ=$k(cMX)Sl=81^Nt zf$U~>E>NK}eT+3^e5{dIiaNZpG(p9p%N>8{XJF%J>4MFFud_e$YZMU@KoPDB8?4?( z?R0zGqmO2zZA+}K8%L#1O>qvvIcHHRC6d%8O-Me%e>kXlY@+V};|rk&O}~@dm_izE z063|GFsI}0v&uA*Cs|&}T0s|Qq@ML)TGP~<^3Y{WvSdtBY;a6NIDt_kN(bIkM<@*b zw8X_Bal||Gq9}Rq9&WP)=oj-AQx^7!XSDMl%P&J@%*txiSU$MKmqKVPj@hy^m~uU< z%?N0-wp?*(KilKl(FIAp(!?IV@R+idq!4yQ^B>k;#R_^AN5Q?6j099yv!yA3Xv{}zD&G1a@b-gtmJg*Hxv%p{Lu@Q&v9Hn{ z#*VNubLUugh*T*9gA|;K*qriDxJG-8p77xGIk@0R;!-+_l{{deRYwGGs_^caiY~ zhQWpxUg{e07Dqa&+9%OrJ92n__FFY&rWUQM79_l^pGhqwg>Y+nsnleRvOF@Tq})?D zKNYPh=~2dj!>ja;nZ%RN0c*=Qim|vQKBFysRfpFj`;aQ~dxrit!)>zHXD4=Kp10$N zszArLwmw(TBT_9d(5PWZI`33n2D&514i&8YRlcuttx~G|)q81jcRyM7ZNxz>X7Y2@ zEOQw#MiwKC%B5|aYHGNy@W`(Y4XZSz1@1_9$uK2AAo}UCaG79pmr&Tgco}U|PtQxd zczJYGAHWEh|F3{uR?*4U>^BLd-A0FOyga zTR4|_!7eKUPp(m0`hjgR#5##OLWOe&YsdU?OudzSDce8r>x7;|Q0pC0y!vQq{`63e z7^wRIiW=S)7x=?xq=W5kb5&Z~%189(t^THPom~= zIavhKphVOd!lp%v@7JVocqbylF4l>%@B!W9oX``dai$&n)Vr)D_*A=_#HsT2P?Y6{ z#7}(3cdW1C40-D0^ik((WvTxH7<9FOO!cryh!d-ga;T3Bdp(DzG30aO=DN0x zK3%JUO9uX%~o=x zPDlOj&l{X$CnbvC0}GoE^Y^5$n3@9X1bwV}*7502BwP|)o496Khz$v&*j3@O{gO$4 z^jrLf0j50JhLI=7Y9Dah_HCUvsO39A|3gQgU&8vOJsB4|OKqVE7`B%SP4k`mN!Js! zZf>c}9*N6ww~u%&>xAWGjJ2s`=*?v;@JN6{;NKv}6Fdave!4dl~Tpf7_>x|X{_tq+c)^U}~jSCC-h z{NRb=E9(Nu9@3zuzJLVQZzn`W_*^ov%NFLuEthcQ{tZ$?FhDu@zV81;nUU=@;AAi$ zAd5)<7kt2gY=MKm{crCPAn)j4>Bu7O;AZY(VQgyt|ALJYZCF3Gr6s|Wse6hftW>(b z`q*l1X>m|kh_L}wIT7+-V69Bj|J#0KK|x2upzge)(!$=_;=r!cP{IIi)uKV)zTBc> zZ(pM>tGmKr19|i>@87nZ#Q^d1%Ny&>R>#e@>&#o;=lvoUfXpW+Mri}?HlH459|_gj zIcZhhNR4{^Th1D0bm}xyD61kV$$GZ4Z$!wP3gx<7Z?SXQE{e6|{A8ISXJ&{tebcpQ zZdwakHNbRdAk~Wv`#OE>Fv1QYBojP`BFsHO3A+e;LCDtHlMlPnj`=_ znGQeg&3!omaI-WG@2cU&jS|a%!`mRqNy!=T_O&(7$yDDF6=p@S@W|7*7lV5|qHS%(YHxFGtFfx(H>`-}od{Bmk3m(68P2+&}{kDeKc-A^=+ zFJqW@@PS>$WeXcIE%XYVM7Bc+n>xOp)?n=+;bX}(QHo{fnImRA8n?<62Di$O zR$#FLP!LmNtLFjNwvE%4Yv!p5y)g$JvZYlH=C)f{J?aDNtgL&X%%)z51* zFtX;dEf(&R(pQ>kBjt$5^RQcB`%R>3ZC;w;`#Yq zu~@iZQW`{;ho~chDCd(V7dk+U5jE)YptBzcv_8oPGTpG8k?ZCcQ{2XQjem!M* zRy%Z~={Kj<668%K<<=H0ePHZ(KA&15FKKc*jXNbHK)E($o#D=!aXtvm6Vf>5G6uG^ zd+%iPZz<#*xK_12C|4`?Z2XNJH z;l+ZZOVk6T2bT&D!V%xe`N4Dmf{bPg08(~RDr~xb{I26!n7*G?K6Fro@iX)JtQl+W z%4!4ACx{SjjC3opj{hNO@0lkBpfooo?TVl2n@;>!`GMCtb&8q(&rxWqm%=J+-SqW| z?-#FaRhmI@n!69|uM*bc#+!ju@hPa!RD*^~X{B7R=vh-r%bb94nS*kmp3v5hcXRCGV0=&_Ay#(W0R ztN9-_?Pk&|?~7)cMXBg2-QV$|8Syj;-=W=-^wMRaPx*tlv33cV#B&5f9?Uev4c;j~ zb;|(en6LZu(fse((ceKpjRv|g%@yTVg6_zRc(&phYtGjMTf5@0%8^^T(`>LCjGOYT zM^@Cb)VZxx8-y>8m0zH`8+Pl%k^*}=j=GnEE%A2i@_qLCK3zy}C;FvX+jK1gwGC-P zJkGBs0yBNDncrQ{lEc2S{RY!1*hFyR96xJo3~?`JR9rqaEvUnR2e15T@N}8j22*9v z?ke8x5TB)8VLt3s%{DjZ2@*98^HH8^$12p~zFiRp5o2`eDOP&=FQW1fWRC|(WOgs? zf4j`%x9KB<3ztLC-XZ;@-sp=&QanFsR=()_Cry=U#FDs42*?_+C+pLKhnxx4i*}dEZ_MI%IOoG3a-0Bu<3^< z<|%L+hfHSo&6zr|77fhqfA55)6Gl&uS@RQ6o6^}}WHz5w8ly?ROwG>@? z=J(;LSA%MZ(EzZzl-YI=Gj?O2wR2{3;bTO3X>?2y9L#)V(y$vV*qgjx;I+wxYdE-w zviMHQQ>8?g+UYY`7hYog$D@{zj7(#aF}aCl%Tri7)tO-a$<#)}!n@b#cNL_%`&*tX zHZHm*m-O32!?IJ7hrE6N_+*IRV{NRGu!P|wH5Q^CCV=>?KH;hO`thON)<3=|fO@FX znW?N>d0V01lS|)ahpvC#$l<#FtQ$1Fe^+vg)S8t=&KdE-5#C`g`LjnF-Yhh%6*QE* zZ{GfN2j$x-lEmJprn`m)8BSVl@C!>tTZw9HQ|)-yqErY%Dh7rpGUTGP7WNtzipKX| zXYqxuB7lF6W*J?X=ea>IVa1&g%tZ)82Ku<)Sc-NauRPze7b>>B&)bW(u|D>6> z$3l)i^59WZdw>4|Rcp8!|{bo3rviGQLpYSIWe!>udKwl7vdm5v`(_*Mka_b z!N#abTP&bKA4w24H}>gz-@{j~-hc z1xRGeHLrZRH7B-a_Zjrnv*s-f2bY4#C84Ww`1bd^AOBg{UK^JV$n+Qy-e2+{gQM*+ zNW)f^LL0b7PyZrzPy885WP2VCpbyGZa^+V zKU zB)Xk1TGO0YoI}n0U>6LB0;=2v(97M4(JP*iaVob4E?TGkkjlPO|C7FWVnsK} z?aT+mcU&(|s6HlLn&rQmk>iip~s@vTrdh?=*_UbkvABy`?j#XebQR%njKNn z()dp*T9$r8AF5hoPxCjoTtWmYhs=5etOv|01dF#^S_JI31W?oSLpXZp>@M8;PDyW@cGtoG8ZOKoRfI1ZmdPq6Q_Co1h~-(M{OsELd0@ z)|Z7|h1b)c=Zx*Goa3z=*j-ZcTv95lnn$^xq=S|p3Ya$pp%l8uwuDAhz~{;aTUc}7 zF@#?yN^&&}@--wtthQ-lQ*B{|yaO>yZp^=0W$HBVlwOhdLK-majDJ;x{9WL0Ld_|30#0lgQ%Y;vx3$4Pu9Sa) z=5wT^f-k_WvKoh>n1S|nj6ylLzy_W(<3go>4-R1n4y5vk0F96N29kw5!7&gz4bk`0 z=r6a!tuyEi;OYMa2AR3Nw*ne^jc(=ykGPClJrfx0mF zb%9Ll^Xa*%bN6hoDNX~h7bpnYvfd-iumu?P^Sk!riIhB_AWpGFssdaj^KNLzHYvROTT6|+F4#}6K z-9C)$X{ndw-PLKjFVmN$m;?nM-#*g`4YYILwMa)M2e(I`tE6VCn6mW`k`8PF^)*;~ zlx5;MfrUEVaZFo(%DE5_7theedyq=p>%*{b>zpJd8xTLcuu~o3@Xpk^7HsSD)^{Vj zSG*}zZ&1s37}^dL&?!n%>!~4a%6h3FM^64VQ7>j#vqxE#3_!&2Qg!RPEO0+5Cvnu_ z?HV5L-NtHr>Mv>5rfauG>#JBo=0R6fKDVD{fKD8KVM58Z4LL$?w|PpK*Nyfpe3-|L z0r9{)R6fz$3Iu)p&m#(tHb^bOEH+EqvDwn{$X)$#7s8&4{tbeaJ#haEA?@HNV^g0b zfqy zpl{PS1igqc_D8rcn}MuJoh{ghoLJ(_sCUK5wcre&xau>4w2mCAJ&f)fj)9|`D%Fwtw7qhLIjFZ$pTlxocgdbVp(1{ zHfdtVq85=~jSwLc%Dp8&^c6Wy*car1ACM0`lLdmr3q1jN_Y~?qkOf=fhu8Aj*!5!= zCoVc+*$cZunJSv#we-_dY-j3v57;MoqGYdpm&v~^%lz*GxC&Cw>85h(esdXmt+neO zHWZaIEL3d>Jk?nUvXoCFAd4l^0Qf4p_HvHhnYYh;-j?ChtSrCpDmO|<-JQX$ZuB$H z=bw~+DL^NAw^V{M3DR-dAv^yKR3;Xzjn$T^L^%%eLiLRo!(k26DbN)Bhg7 zF-wwk6-Ppl@!e%bd6zrihh2TNM2e|)fS#K((m&kaIf=6>8d2F|(m;xSYB_SWUZ$;ltY2E2x{$ftxr-`3x47rPlHlT=?w4umMYiAI6ikaWqxAC1kek!4 z3@zRoNuKpjHN*>$01?0b*o;zZy|qwNfx}sgtZ=25UqMifNq-unN`b~Ob4SF*#xxx6 zv{VOz8u8vn0`cwy=vj;=lJ$1~+GAm_;INr}zU15;qkH(b6Hfwp#yd%`6p7jMd_Rzg zJw37@g=elMWX=t~SU697Ca0$gfcU2ZHH6g?1dZIh)B*N^9_T zb=)b}HLTj@KR3FzDAV($*FH6om==xgLe@LtH3?Zwt{c@MFRHfSj7!EFg|@q9Yw7}W zFHdUMUTtrz+zXIq)W?(1?U*MVPCVnISLyjbm)T`h@q zLQAs+A&@@w24T7lnW5M2FW$)mxE9C;)BIQuiZI*$_S`4(V}B5ChorPt7Jo-f6%v#$@$ zV2AKMU2~+MTOh60X@u@84e!MyEE?ZQmmZdIupL+Bd&gXCJ_GYekDcm}seT3qy&gxV zeEY9v4(nqOb}2JPM^sMdl=;XMPcuj#l|cJb;yBTXoZEWg(&$X@OUvpG{@?DZ?LM`K zYTQT)cU+~X-Dr=8Xs0U3W;x!L^+;QdS?)xk91kRh5diI_4b8U1l|HPdk%KA?{3o8g zS}wu!*kT6w*zR<{4kH4Mh97SQo}cz%m*O!=_lWb|?q#>C=An6ZJSFi8!t#FUZ4Stu z=z?b4b@y2}V@mgQ0kgk3y3>r6|97Gsozd~j6Af9MA!`d1cSZ8LGE{Ea#SO72BZ@M| zWNmJ73SgLe5VskXA+AJKCfzl{86HQ;PbU$N(?_GK5C8T_x13T3bw^91pC`99}KHGQj4HA-&J=*k}QCi*7r8F>>Hw{85W; zPJ74g4Vp02%ohgXk|}t?O#uF5%d_0wss0w!Z-(Pw_%hW(ppxNJ40SDY^^j%&o_<=A zX%c;emUmoDAYNe#LJ%H_VaMPhvD2S$9fq`nsM|lCO7qT*>yCZquK8QXazb9O3@S3% z81Uk?u*5l|%jIPNDIi#q_R^xip=xqIq`-jAQY~(l&(?uTv>C5W?WpK>SJ%FhY7x1~ z<3&*XG46h|^||HgoayeBxE$COmskG;;D$-!AJYcGC5v28Dxl)Jr@m(U=7g!nds45f zhY)Tfa(i16Z3~I{P$C@SpXH;{8&j|P0PoT0s?1*M>_#jdu+E4hhf3^~tXlo-tFNze z7^iInbNtH&9XVPh-nj;ndS2RdF|Q~+cq>PkA ze;GzPac{2z@Q`M((w7|H>y1rXIw%R!5`FP{jmHHTG8CE47(3(mXh1co4iZHa0gE3H z7-Wxkakf%dp?Jzg7&0;kKH-RT?%{v5QQmpORpV{Y=5fU37Dq&qxgbx%ScW{KVV@nt zZ>(`fy%S;{g_0>1Dfy&OUozNO>E$C-CYGrjLrd2Nn7>&IO6uAAKbRT%_AIjB5k9-V z5$wUgL4Di`f11dBg};Yd?5 z)HGip=y|li|CWr5)FCle3ZZ5!Ft42+%mA$og;=%_JhuXZAyaJaW5>ten+%F|{a_H^ zVJ8o^XNCP@Oehz{gwO<)r~a-}Y*-H)W*u|CxeT_MuAT!+jTR81UpL97DRl079m@N%iM1eG>rSk!k zoX;xS;JZt=Kbr!Oz@9|Pl?BTh*_1sP)xVG%*+ZVxCoR8Beq-SNmBuxrZ1RHrf^In% zR@z_E&D3qIl_Mr3jVrY#z*jfsQJD}>SBX{ptAnYInMtzBpCnGkcik= zn%VugAA$MjN6w!^i+&MucF({c|&9hG4(^_4Lj4L!py0cDp%$ z)N5B*=;)l+LRvT5DOI)n$oWCtOoLG!shze2sbSO1;JU0$_W3lKXnXD4V}G>iJ`GIs zANT%jzq>yi&IPHLOs2m67QOZJ^t8!nl^p?QUXrI7)+c+j)Q zoKAn(t*C*IUPlHRmO*=wrGHJV8O1(r$gE1#?zM%FW0z`| zG)M8TH8n8_4)LE8^NKMzPRN{ir-^;lW4qq=bvOcL#-M7oHF3debY#qWPWzgGL~$tv zal}eSLyqL-+zVW4r6yfBhm8bIqv`+)9p;R#h?LVDLg*X=CzOW=E!%<-xdLYjv4if7 zKL_eQvgL^$Em~J*8Jnt*?;qJgc$p!bKZ-gOIP5l^tVxnCVMXn}F`6q<31Mm$@pv^Q zuP9p@%`$^t2)GXTajGae(Fq-Z@{Ey+C$73AIxf3z97Sk#(ijY6Z6%$MEi(N|wIM2a zLvSOGJ}JH0oD)q#a&e30N?;8bDF>Fk2u(u1wK1`AO4|L$uATkYFm{BaAh?`{vc?n zE@#2~sq{G~YtO1#sND$VWlvu@1WROYH#wc5JIvS3otZgQZlEA&jc&WbQVxqmb8cO9 zU{OyD+cpv#pxwONCdIEW1}8V}{(}G6KO6sBKXcLQ!lR?RNI2P6GT#kv>)cGlmr8%GI@DC{-`V8F;TyeNhro8O4@ zPZIUG#f+`U5VY11nN3L~(ro@JjQ|}oQGWqE;t-WTX%9 zm+mIR_bA!j=vuG6@RaVcO?Q0p+n|%^{&~bSvk{~=>AV5RkJF@lRw5hbUR`LXX7F_i z$>2Qb9<63a>@j;UrnV5d2PiV*o|Q!Qa&(JbU&(@$|2pvkW-!fGJO-HGwv(8v?UXmD zgCftn7A9@5HVixGLnwGnu0vf17VGw)NZ0WAJ36%#I;+sKVfMJ+C4VDX-o)qD?G8V~ zoc?T%v0bwDHW)Lu(MSou#3y$$mk5I2%o_{6W#v(6FB0O2KfZ=T=pC#(sZh;2Zyx_r zNEuLq^^+3^VBMB~^6(;zhmV%C&_OcR6JnaijJi;4b2}NYssL5HV(+nG)OENY^sXSO zYI~mVLOeGT`iwL(!o>!dH~+rZ8@u?>h1z3uJ>fnpckLUDUN%F?HQq&7W{dX>z~Xg3 zbOvbcsF^NZ^(@PDegjZVJ%8lRA0=X3YKtP+dhr>62)E2KF%(LUl+u8o4ADYc5444J zE2(3eNADs{!vUW&UimtxVhc0URi$5yM$y&GrU?VA1FaPN%M|IW%AsqS!FKrtTX^3${ z-a`-_UuT8Unf43|o21G$l_thhW|RFlT=QISkuGhWylCMB!Q&Q5n6$ zPFi_0q053C3U;4hpK%5b{4sJ;DA2|PxPbsTvZC@b>d*##q{@XcT1_ZB7-| z=X^~4p}POI?9K>yJliNRGJvx*YsyCA8M`G?bHm0)9mMIgl9D2W*{YbFGmIa zw8t9gg;LX`*M9B-eb^Ic3+FnYC9cMNG7j>`>oPupqD8@6&(j<&JeM)K-x{d`+5PaLH_xkE z^VjA#HjE5RJ!ucq@7Z-~We$$()O-p%_Fgw{`dthC5a~HX21jN$?qCbyjQ7(=?OI(v zSdk|I_E)4EH$0)Y=DH-8EC^Ow-M;}gvlY24l=QJ)N+U96Ov|;OMhJMdE7R1$$M@nFWx^qS?tK;i zh?CZ^;=a2K_F>aRlq!!yTl`yR{+ug%-8~=11`^5CHI8Z@?bhkfJ`~H94sDcklgaCR zqHWYTkI#o6CZUx6TVG1Ra6x*;=veA&&`vT zQ2*$B7LlB4iYsDW_|103hQ?Io@trBYU4YqP%VAC(KC(^bLkMueeomR(h*m==kk1QC zH&16v8Dmm(>xquDe1!I`@#VklamXSfk|#X7hT`xZ@Ox>2&Y8{e^LPQCkLHIKQINPT zQm+MQ@jE9tsAF{PGU0|T5K>ug!cO1ExdWE>R0dseYiNBtRDS59i6 zme@+oNS5)uZRsxt!=bf*p!lbly4W*SWl;}oo2AIvq_(CwDv~L%n^LKNqOlNI6>14D z8qb+#2Kw8lDL#npGaCSk96Ve&@ix8MB>fj**4bda|Cs{bfy74=;ySiIoV3Qzqnp+2! zt4Q6C%#kkh-GaItuLfXb&+_Fd6j~YA_M$|)HVsn@IGmdj`%8iGV!Hx236UkL>l^YW zoL{UCI;_E2;2h>Gm(kzh&#HM^Y;<7RvfyEV4@k%_JiUPD^wej$4ZloO(et~TsN{qN zqJ3XpBeM6UxGa#_+1U#h`lgdIW?U3%b0xFUgmk65rdhY@hV6pA@yd3Jk>zb6CBQ$FgORH%N~{n$zpTx_0_9lxVN8bTj-ue5xGLYC*<_VV}Axq zEzctmg&po`;Tt6>uOihzp}cTrHsvy~oE)jbQcgrFubhWS?aEQxguSAQyX{e6bunK2 z%ygg#5u!I^ft-xT#J;CCC6>_!ib=1yN3`YaI-;nnh>?Lv!*gZY5A%O7YsjE0BN}e! zNcK~G9=I_V!}e*pC*oh^T;$i7*}WjtkgKv>cZwCfawJ^aat1>0@T4{LDFpcpK?KEN z@I29AH%3QHr$(&$&jQ$1SONGVilF zj!+1o_7K2SJ(Cs(|KqHvNHwHuU9yo$%P3HxkmX(z{)mv&BNZ2wA?!Q%(pCa#G2=TP zjb>5YGNLjvcH`Py()s4I>+5Rg1+^jF`QG#6Cq%&~zSmMd>L@yKZNAj>tUG-jCZ}Zc z<;0OvRciP>WBNTwHw1dglTZZ>ZedW|rBDV0#CO0pZovYfV7<$xd@SZGnJ?DWiO4fPDC3Wp{G3XUiimVapU=kRUxGcS(*^+*Pl}+J zm(87GH7v4_)KRziX@`q|JLbjoTUA%&wY60BLnp9u7E_2*F}EK|Zap(oSqyzzn3y0n zjl&z_s|zmPqd-JqixDc2CMwf>Mbygq`x zJhScXYR6&d^mz`yZojphK4tC^>~rAeY)*qg)MyRQ-23+v4*S|@Tc#JhEr9CS$XXVj zLl}Y|5oXP!@%M4;pyqTWi2JwBJJ~IsCb3B$>~-^7NFO7gsLYp&*7A%v`j*D!9enDR z@(e~DzN=lt(1HT%rFfXkgv&umy}cA`6?_?@LHVQ)lJsH1U$RRJ)B~Nehz>}h-4RBm zg$a8@Wqs6xod{R>t29F}L;zV)Dmo==I0-z-q7+>r7mEX-jV}&L!Z}LP)7oC){$2Bn zw-1X?#pv4$3%#8WS4i6p^@ZyXkdKw}sVNFc(vQ4-{NE2WNl3SgO!EYe1rK_OTww*& zTtQ~gt`Str8SgObMYtlqR3I!KL8EyGxXlteLDYO}nIJ`3FDg*mEWlh*jV0)YGwCnJ%lt>et0L9T z9hBKU9K|;7@Fqhzi%;I@(_I?f#^tVR+ptVI-6`kGxEtAuWqD+;)oqZErbJ+lxP(|B zfdL6O?e?U_HU5@^CXhJ2?8tW@k#1gUWvOyNkbPS4WkNyt{exBUzEBPWiew~zR6>jk z)#{b!oqww@yFJQu&oL?Pj+1FuE)#2aMtgCP%+yyH?^S0Mkw07XA4T(&pXEs}?310g~^@P$IK)k_PtnRCp;S zjCW55$sIyhDr<}ce&AVDDu`U%V}CiggP|TFiqE9+-CNFY-c`fTtef3z6MTy#=^BaG z|Kd2F$X%4L9UT2ML1+iGz^ z`aj7|JASX8#}oz#JC(A%vG{7=NoZ2LPdQ3TxIC0~0}zSc$cIcXwx0+KDwZReVP0L1as{UcalG!oT#fSLw;);G5yrc!kFH&YRUXr`NC7vtYd-!o+8Bx-^{eddJ?Ni zlP|GYK2%zn@&QjqP&GoJ*m!U?(>b0-1;RyU0!)PbP06`V8lv7ijDCWTNJm-LvaGz( zh0Co3Jw{p_X?iazy`SGY=3V2R72$Qj0d*qr>bgO1-+UF3$*pv@m8PcBs4`b+E>|CQ zZ*bAol1@12n!7^K+4RY$#<~ePbgAOVv`Vr{I;#v@g7z&NWzD)QIev6IBXy82Y_=@B z0KPV0c}pK2+?G;179m%BI(z4f7?gS8oB1DVv=qwcG0XKYk#;U{nN$9CFWojLn{Fr4 zM2c|Vlr-Q%6U}d$?gB;9Dmu`WRlM?+6!z=)W!?F{E*}(DFi+t`tLUFO&f>`JTz_Rp zqn0nFda3?^Lljni$o*bqI#tR;g%y)9?#iF-KSG-p_Y=%K-G9bQrJQkNHz;Y$08kfy z*Rn}{CX=JM7OTsR04xvYd^?5gKCmXgw)rS`lnOguyEgU*1KK)@2(`|)$-ZVnxjnkmc`gyME)ZCM?RJ@u z%cFzCq&GH=a}1o+I7~iB0q?A~pO%W|Y3kwBKlm|pdcHM8xU}+|(aDvHB<-VZWUli{ z?!pdlV2L?D?J4ahFJtM!qQ}jD)}-SUkLwscgX>g5oxU+^y^S~%z zdt2wqZaR_e9C3Z2y!8>seN*j**IoM*Yz}5PGKDQED-spvSb`q&0`qv`je`V+Z@VmO zgm@t?MO?yc`?vk808PtA6KZ4&E};?(W9%PvPx7EpMZ+g|vg$6NC8(;oqIY6ZP~ls3 z;HVzW$r@mFK^F`H)g^*iOQ3b8*^egoAP%os5M5vq$I@g;RmjoHL zyv**HiCf1B3|FN*9h_3HhcL8^Wkoz-R`t>s7E*sHh6m~EF{&koPaJiwo87$V{ViN6 zEu!D?iuU0rf$UQx^1$_yh@RwROSp?mh6SOrTXinxcL?9{*fv<+Jw#1^*$D94Vg$*n zc@I^J_zEr|F?5O3^%^iGy+vj({d%n}h1J7U(QT8LT0S8Y+PFC+wpFM2k2s2XgW(f| z0!xrwb&pjKc?la)3DHFj{|OD|J7hF3tkY-fJ|};5U}m`@gdpT6ACv)CV6h9Db(|96 zOh@f#LL|$6JZtd1H-{m>N`jDQ1Sr@;cOu zT7S}~75Fkof6^A-kyN0XjMnpx?znF?T7k^%r&MY z5s@1JSZx29X#DngoWF2aF3ISBB^JCYQ_j0Bg8v$z?kg~gZlELlz~QA+B>>s*74un} zq{aLZ^a;cMFW^;dw%4Za*U?rZF>)9@QKIMu>Vf2IDC0FuPcBD_%K=6&m&6rOFdYvO5$4=o4WYrS-3kY z8u%tqpOo0RiE=5J_?|ib5T=?#Y5C(7(zaW9-OKZ$X>70(tJC-bTN)Z`BpLC5S=%Gp z5hQG6Qdvi^0@2!N%|eD`UhB^*`k)VL%z};u%~t4i4|qNhg>&Pr_?a+O!C}zHpUL&Wp#FP|(;+5U4 zAmu4d+zXcKm26*n(~=!>&O1n)T{l&v$YKNCTt-$UhbfnCqr;}TpW#ayzeG5{6xhlr zy5h>VNM_?u^egA%LcPf5;Kk;xoJV(t)X;vPTQd~-QIfm2#%5|(RG0@Ja4b~|RJx+K z=N5t{pORqW2~{gu8-`ca5Z5k=T#<0jxg>ZCM;|YAj8WnpzF0K9S*j&C298C-gVg*g zmt$m!oTHqGxgm)k#jT%B9y(N?p%a=JaiYC6svO(ZG&mgWWqWtt&HHy7>|e<%8lr|s zd30bc7L9AN(bSW-oicqS!d3u`CD^Vso||+@c~}LenVOCh)6_b(UB29z_{=KVwH^^B zCfM;49{V{)2QkNL-f@nN|v9#a`;WyRvN58q=Mn9O>G9Ic5osruE$ z-N;G2%=Z!Td#gta9&ae^S2z`~(q`rx1svP4RAo29^5yt1f(JL3~6%3;(sR(k;bWgc5L&^ckXWirBH$rUKz5|89U1 z4J3+g0;;T7fi-{M*s1TuK1pbC`-WK`jlB;Sm93DE8_eF zU0>Y(*2ybPW5&RfoVP8vu`R&4J)!KDCU>aHle_Ns@@iLZ*0YcP_-0q^z(&7!TMCll zJ;qj_`z^N*lKsJ(2ddzN?K@T8cu-eH+#QBr!1O-ht-2pV)%G6T%e2q;ubkF9@2;S) z0sX@xx0FeInmmAa5&p4ab)y8sxwY~*r!vF6f*wy1(;==SJ*YBH#ES4=bggG=N8rF2 zlf(kZU!}NP^0RM$)rLKLUoi|ylSYiVZ>!4oq##ar-`*R@zIUaZCIxqW_!}gc2DK6z z!_)ur3jBh0u}rQb*dx5Dl9Xj#z<*JkcrN#tOfY`)}`S6DEL6hI}S;J#oQDS7szHmT#IpY=z2S{XrcVgSw zmKx)1vkDOcr=*5fPz^mxQVl+yaBP&Jl&jo&FxRXud( zmQSPCmz9{Lv%}UbPpkA({osa|`E!%lX?)>FNlL|GcXJ*rt!pX=Z9=hi&IboVY8URl z;BqN?iEWJ>Q26JvocWbnGg;qoLaf0(H$R(1^bAn?#8vi`5%jDiL&0oT6_Ba*yVd5O z8WfGYV+^)3a|$vUjaQ2>CJrpjY8^~Big29S0W0eB{ac~%pyF2(^-H8w6%122U55r5 zM3(e*I_+iq^{H8u)CB?2eGqzfMmOfw2LW$K8*N5Gk=a0@#0MPgElG8fQyF19c&8@> z=LMKw=+7%~`>cKsNb7&+KHC@hcEx)N+Y?p0*LmvI10CSY$UkBrYPc;Ccl&WK_ld$E zx?# zDb&q_GB&a97Md%vOo=!4Mma?99(FCM!~tN&l#-M@zL<;k5j4va-oHTkrv`|Dfr$8Q zEtBlB872>9Sr+R=71d-b?TtkehjwXa`QL|AeIhx5sTB}?eZ3k*VX~C!w_lFG5CK9> z&dBPSQJTd-m38lw4#}UY+KZm~%4vtyW1`%t3G_?hdj4j`>o~t;YNPX)Fw_xK)`0U^ znxEEcve+!{HDb1D>excR@HDVhuF_eMl)Q4st|&Ef$5JZ1=`4$p$0%ub3O!t_@U%9x ztE%UYsvBr@wABI{i6+}gOK;6=CydDtWd)Rm=dc(nnc8Q0iQKdCL?U@$0e*4rcP{iB z$LDj-88{&6=oVFVcssC8AVT;a4X`~btBA@g(0jLiK6gKH5)guZ(LyUcJ|XyIc(}W} zXX^c-a2s`xCzI?Gr+@r+;7h-B`vytTU%%ifK6G1RjoeS~?QGQvDzL!Dz|8EeGD?1} zXAN=|sEj1Ka}KAFYDV;0m2Z~M&SY_^l$-a{lGNe}Q9nF5Ks%1ENi*lQ2BcDPk2PMR zk}u)j`t)9>IonG1v~uDyzaKNI6Db@xj)Et9XKUEj{5a+;k7T|nXaBVWO2ai}%kqn; zlmfm|D!p?vvr}$*lg_zyXdzjM9l`nSA)992dEKm*v~&zw4gIJ^$GOo=-pp8jGPa^U z$O`lbr@B28y8MKTFupxA9ngkYzD24ePtT*dYX?VC6o+iUf^woKt|TPlE6-D0=ZlDDBfZ@b{*U8f@xkZ8U8d#e8lj&Mt8 zH05qIB}lyaj(vJ$FNN@q8-aEQb#^iSN^rrhr&9BlVzO@>Bz2w-)rfGM)| zpRumCW^=7w18j5}XCIe?UaZE)zQ;taRsNk*-JKbtV;fglFQ)EC(#|eA`?p#*)@1CM z2urPwQL6fBJYz6p;zy5L%|+QSN2HUo=x6Iw5o9+UNGb@;gQqzNI_C!^Tc=nV6MZ1_j;w}Bbtl=>YaBH@!rNDft@71IRAVjt3SBfxDHO74{~mvE zE!X4>MQm;-rC9hjeXUM8--C!eQX?Bij57XwCM}JZiVJKA{f8rZdjbzdWuxWyJ+F3c ziQ_7aHK0Iyv}{R5(KcV-74cHZR!XRuGhk2OwfSSXr95WOBipe53@4gy&SNZ#G2rK4 zCO*ofb22%I+(#p!qAAQ%J;1EGrQeb4&LeMB7^<3#76oCk;=i?Kkvgs4> z_U8U8YfF(!YeRZ)&6*b|){9p4Nkmq**0b83I%&1)I-ToQmwV!`+q_9BQoXk!o==;N z8_u7N8+W-pFEd3TsS##4cmNprn+xb)%iLL>js@ zOxtp;PNQp(|H{-Calq(gBqd6nb>wj3O?k6&-zs|Sw+Rb^rB#$T*FR}cCe-6wmZ4-( zJv3-BgCrH=P8w?Vs1EEy1L~j<6xa>}`$ML(vpcvZB035zX0EvDk$^HNeKNm-p|siq zwYa3*JiNus;h23si2j*f>|nLrzs-r`Ju?^a4LwI5BhhxOm*GCswqEMg^IZ*xdpzygdPS{C^ju7mmW(o$&W{fTug z4bgG1_B}GAO9qVAgmclj=^MRELY=XyZeW*+NoVWL&NU}xtpJ%YVRe>vR{WSp)!>F> z?cGS#YzwwRDU#;YvpYk@ZaJrF&U(;e=6aMX&2*3>6Mi$=p99hr#zg^lvlD}ILye*V z6>6R!kH)Ai7Aq4?6Z_QxaK$Cs!ggXNC<`3mn8sBo(c;8UwaEyw`m!#081{af zXwtf6CX}Nu%>q+ac+EM{y5^%U6@!T2e}{&^Lb%&GlvOwXk;QI>KdF{Yk^G_{ z&qngZdd(IZ`|!f@8)F7HoRdL@Ky}4tB};p-mQenCGtmgTX!&uDc!+sPg><*My)B4;^=HYnH^Rk$pQ|w)UCxvN=?hcp zUK+MXZ{*n7l>GV`n@Z|J!M+nmf;w$Vi^)VUBCqOfzVfvQ$^eoK{2kh+B#zo1DOBws zsBi{?SO>3o9|aP*G7oyhQnOY?N=O^dvc)qM0+tBVHM6cv841L!?2xUNPmx(ryD!Z% zgh0t|5@4fMri`E)cCNL|YzkG%iP%)=ct3PEBC0`p6xrQ%D5e zKg~lS?jh5c_MVffv+>D~1~-@SwQctObWF#%U`fhK{w1qzK6h;4^HZ9mq`hsv@7SzZ zy5SV_B6`TSDq>P@7;}}CTqh`2KOj)jhLP881Gt*KF%vh;v()L_J1rKw)zGX>&}2bJ zx~$bx<&~XvWIQbAOpPq|8hKKKG7YzPyv#*Y5Okk%gu|E}q_09N{kXZ=dv?aU)Ipr&4=i&?jHb962}@!0CS z!%8-vX#5Xxvt^48R+j9#o~T79!f zbKXJiLp8bFZ_w@#L7q{qC;;bmrf6$CPjR5!y+-P|Abb}w+}yxUi0uFC>#T#K?AkxR z5=(c7Al)Ec3rKef(jhImk|H2WNwWw`w@4$g5&{xRtw>1-f*>qiQlhR3@-F(kkIOsr z`?;-_LjO*gl}f3;}_}TbHn_|IEz>Q^X`z(d_DdTKOQqRzD10sxK0TH`y1owvQz{;Y_9RSY8s>| z92^t8A2aQYx!;siHOX?Ct-GX!>(kJryF-~H<8q(jRljnB<#@)6uf0T?VaH|2z_dy% zOHEzkJ>J=H3<)-K9mF|*N0uXmL4x%JW z<0Zh>An!Oq=#{KJnK_0Ivtvh6>wH)-r@_`SQAF*wU-+g;_LO|FPd%Z-qlS2}04n30 zyB%5@4B*l<=7u`{z+UjX4TL0R%8A%eAK&zKBQc`Z- z$;Qw^Y(-X@{jxeB}-}U2NEt_?A1f+zvLV1{6)^YRIR=X&@4ab_^Q( z(QAjiDP==(_va$wZsv`WD!SoQb;O}>BqBaH&16Lyco+K%%I9&?F%EJziDTG-NcVA}`I`a1 zg+b6s2_c@*DAG@He^BKF-ob)k2kCFSHByV*X0WTsGPmq#IO>EvCnx8!;5t230gCOWigIc&NzY)?TW0N6{1%mB z`j_Cgm8WU}b}Kaz^chpYdCrpN z3B99fXS58qM$rQ{Z4;vc4)?YYV&m0*sCsmAo9Gq1j~Z0xK!VRPKORu!tf?C-QoNxy zR2$(hczQ_DhZ-qX4I+Iz$1PlMGD~>9L}`ztP3@%+7K>ZgOp?5%jAVJ<(m?mu%?Rtv zi6tzSo}W0bKOu{sU82JmzZw2Q*;;wG3yffOSJ6@Q3|ofM=lc&xD_P%5zh8K;beiTo zQ+%M`5>)*KT~wOw-g`H1B46`pxNPK8j#}oNRn};vYP{6$>t(L7(CJJ*&tL^x+@OhK ze1#oH63zChQJoaH9M#iR1?Ew2uNAiwTrH%lB6#&P`6NW9m#{ngl)zNPIN(xUC)q5 zwV#-xXo1q=mz325QedT-?XZ~4aP=E~I8-sC@MTmk9#RR~j#dOoj%hBB;MdtxQV2^2^X!kFKA6ze&^i3O(b5LP8{-bJ+#?HSz|;VU;m`HWjVYk zn8m75eR^%#rZ)<^zJ$npvI1N1BN||6PBrLxXQ{DqzF7%Cg`g*emgPpg!u}=%5iR`tRiA0FmO}c~u95WU3QNDs|yDQmIx=v(l zVUp3D0)Sg@LdP@7*s@+5YY3i+Tr?Pu72;mw5CZ@kG?)#>X2SNR2IK#L>HgbeV8V1E zS1I!vfmmWNAXa2LB`pDg?j+|8v}4QkjJ&usGJUSoKd#(tcXEEltj>y#ytdBD#*-<4 zGGW?^>&`8~*7**w(&N?BQe8D%xb=$?rKo=K6=G3!&1m?bRZgzM9M;?F5Z@?s8DHe}+|>ZK$_g5j5ab zzMD5y?Iq{nLwA=o)f!88zValf%mjOTw2lwa{4*uT`RE{?>uJXN9{WAs1&(ER1H>pL zzTjKrLy?I5MlIDtIac<{CTQ-oajf4JA+tuFMqcWo^_b~^CQ_2{_L^zqCUb^9iUO$3(XR+=&J%qZv-7fMUc|$1ZBN9uqdc;X0pdM1y z@F2Da>Rk?OjSH1{+O9~h{`QbIGO_&z;cX$6$&j3R)h;B9k`LSIqHSHutt*4gb_n1! zMckFCGzWsGM5C>XOhsLr2yRH^jU*(!F^8v=*>`v@Iam4j;AVKeFdLg})|gz%7ZN6E zNtADGaxqdTn=xP^)*GwCFZ*t#sUSyw5zj?Z*u4FqhF@5NsQQxh>o2Lm|8VGXWjll1aWOksT zn<#&R2X-oBXwSU;Ad^lT{xtc?h*4#l#MzcpiduS>|48cdsqu;{`-?)$a?am_vY*n+ zoxq7<Vm&alyPP;9KvxxE_!?8;IC z3g(0%<^9oeEWLfnZpE32x@jvyve_sOIhKLAx!jx019~}7%dJx>?XxN<;t)>Y;jlkK zK?whjgFEZ2PlMnI3?nSAFt@FsIFK-u<)^9Y#Qza>zKVrHG+?yq9`3cxLFX0+er@Xitb$@eXFiMh*LYYq0Roj;$!kF zSGIc+MbM#b&@hcv3Zqo}{ob~1Up99F0fE((#4HXd1=HSjF*K$jSS|w0(SVdMIG1Lo zl8Y7X9qPABVikKf2XoIS>{U;nftcfDbZ-D^eE;xtcRWeIX<26nfKm^re5+fp=RH3w z=6CaVM_s{NC|e5kJ3FiJ9&{1bx9%g8ZR2sTtJd>z^r3$3ciVPGG*&>(qR3$CvjaSHQ*7Y;f0+K!z>Xh*ZD^~2wa&%7)z}XWxo|{2^DByG{3o$4WUfv3-9E2MwqY8 z%Gtqa$}H!)hLZiQ`oONl-80dlh65vdk4M!uew-M-ON{4tZepl-^xCYM4x!kQiHp4BtRr~Y<^G{THuMWzE81ItKv}qbKRe!;erlqx`sQz-~J8K_Vd=fG_ zJxR4d$xy1rVtr3gL7?8ZP}7OWemD9Bh~s@B0@5vRaXm&#eS74I?!-R+*D>XGe`H7d zz&?MUyaqX1d*DGim1985xkeiUpSyyw_Ad(Ht==v@GCS0LRY3I z92TawZV4XLoY0DYXjM$T_gNnG5%dv4E8@?m#=;6p8n(H)8>hpfBrTr~QoJb`HKUogwy8IY$MgQshP=IMG6=c8Uc zDyLrS5u-Jiow+Tl(#FtIrKZr%Xpt`uXAZUS@fK_`ptY86*MpwkbG*9G@r|SEmFz-P z!)*oOV->mBls27hb2tMSatNV6S5puOs^wLWw&)FYl59=Vf6o@=n0-eXwzW>vz&hHy zk+N^MM@5E=2qyMk;adZYCLKM)P#m1@U0W1vdaeN{I>QfLAQc48- zu0L4}2MuQe6D*~pR%80=lw0S>neG?BL)xgrB<4P34riU?eQu|&wLKqvBMr~n6w`%aEkp!mn$nuLeav1pp^0Gg(&`43L_mYe|^rz`mfr~AeC zADphxC7kYeg(yR=e{s53u&4pl0DOFWzzPnUrz@(ZLXh%cL5K5!3~C1!rloo9;>okq zRrx&*0MLyNV?irm{*HWHSS#)|n4vZ?>%~NJFAwz@^h5w!!*h9J4@_8Fh?OyFL5C~y z`PLgE0N|V&06-T&7y}6l(dPV}5Yt$K8Djf3*Ae&~ z7!m?&T0n1L0KHSzOMyg6u;LqR7l8y94ivu)9bi2=G^7%0Xf@GqgMzyb0e4^lI{a7Q zuworLh*)=^4yN$|=cV;7P*`}Y-JXx%Ys#up{t-_fRd$vr{uf9w-5 z7g|KKk1^%+m)|v~5ynD)=*X}e+Dwp(^q2~(i!1EEW5gXJ>XPstGxLQO3sVerp~d<; zKKTDXFGWN6zsfAk1uryIm^bd1kK!MIv8mrj5&wOZi`#h2GWzmSq|-5?E?v!IX1*x6 nG5-vgzXqfTV~xLF7JtfbI)5WwT;RpIDbYVFTmWFN;^OH)afMpy diff --git a/storage/connect/JdbcInterface.java b/storage/connect/JdbcInterface.java index a1b1360e6ea..72ee4ab0d39 100644 --- a/storage/connect/JdbcInterface.java +++ b/storage/connect/JdbcInterface.java @@ -1,10 +1,22 @@ package wrappers; -import java.math.*; -import java.sql.*; +import java.math.BigDecimal; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.Date; +import java.sql.Driver; +import java.sql.DriverManager; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.SQLException; +import java.sql.Statement; +import java.sql.Time; +import java.sql.Timestamp; import java.util.Collections; import java.util.Hashtable; import java.util.List; +import java.util.UUID; import javax.sql.DataSource; @@ -223,6 +235,24 @@ public class JdbcInterface { } // end of SetTimestampParm + public void SetUuidParm(int i, String s) { + try { + UUID uuid; + + if (s == null) + uuid = null; + else if (s.isEmpty()) + uuid = UUID.randomUUID(); + else + uuid = UUID.fromString(s); + + pstmt.setObject(i, uuid); + } catch (Exception e) { + SetErrmsg(e); + } // end try/catch + + } // end of SetUuidParm + public int SetNullParm(int i, int typ) { int rc = 0; @@ -481,6 +511,8 @@ public class JdbcInterface { System.out.println("Executing query '" + query + "'"); try { + if (rs != null) + rs.close(); rs = stmt.executeQuery(query); rsmd = rs.getMetaData(); ncol = rsmd.getColumnCount(); @@ -708,7 +740,7 @@ public class JdbcInterface { return 0; } // end of TimestampField - public Object ObjectField(int n, String name) { + public Object ObjectField(int n, String name) { if (rs == null) { System.out.println("No result set"); } else try { @@ -720,6 +752,22 @@ public class JdbcInterface { return null; } // end of ObjectField + public String UuidField(int n, String name) { + Object job; + + if (rs == null) { + System.out.println("No result set"); + } else + try { + job = (n > 0) ? rs.getObject(n) : rs.getObject(name); + return job.toString(); + } catch (SQLException se) { + SetErrmsg(se); + } // end try/catch + + return null; + } // end of UuidField + public int GetDrivers(String[] s, int mxs) { int n = 0; List drivers = Collections.list(DriverManager.getDrivers()); diff --git a/storage/connect/PostgresqlInterface.java b/storage/connect/PostgresqlInterface.java index adce0616a1b..9f611eeb23b 100644 --- a/storage/connect/PostgresqlInterface.java +++ b/storage/connect/PostgresqlInterface.java @@ -1,9 +1,10 @@ package wrappers; -import java.sql.*; +import java.sql.SQLException; import java.util.Hashtable; import javax.sql.DataSource; + import org.postgresql.jdbc2.optional.PoolingDataSource; public class PostgresqlInterface extends JdbcInterface { @@ -19,7 +20,7 @@ public class PostgresqlInterface extends JdbcInterface { } // end of constructor - @Override + @Override public int JdbcConnect(String[] parms, int fsize, boolean scrollable) { int rc = 0; String url = parms[1]; diff --git a/storage/connect/global.h b/storage/connect/global.h index d8d03f606ba..472d09408c3 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -220,7 +220,6 @@ DllExport BOOL PlugIsAbsolutePath(LPCSTR path); DllExport bool AllocSarea(PGLOBAL, uint); DllExport void FreeSarea(PGLOBAL); DllExport BOOL PlugSubSet(PGLOBAL, void *, uint); - void *PlugSubAlloc(PGLOBAL, void *, size_t); // Does throw DllExport char *PlugDup(PGLOBAL g, const char *str); DllExport void *MakePtr(void *, OFFSET); DllExport void htrc(char const *fmt, ...); @@ -231,4 +230,9 @@ DllExport uint GetTraceValue(void); } // extern "C" #endif +/***********************************************************************/ +/* Non exported routine declarations. */ +/***********************************************************************/ +void *PlugSubAlloc(PGLOBAL, void *, size_t); // Does throw + /*-------------------------- End of Global.H --------------------------*/ diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 2efed93ee2a..c878bf2a7de 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -174,9 +174,9 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.06.0006 February 02, 2018"; + char version[]= "Version 1.06.0007 March 11, 2018"; #if defined(__WIN__) - char compver[]= "Version 1.06.0006 " __DATE__ " " __TIME__; + char compver[]= "Version 1.06.0007 " __DATE__ " " __TIME__; char slash= '\\'; #else // !__WIN__ char slash= '/'; @@ -288,11 +288,16 @@ static MYSQL_THDVAR_SET( 0, // def (NO) &xtrace_typelib); // typelib - // Getting exact info values +// Getting exact info values static MYSQL_THDVAR_BOOL(exact_info, PLUGIN_VAR_RQCMDARG, "Getting exact info values", NULL, NULL, 0); +// Enabling cond_push +static MYSQL_THDVAR_BOOL(cond_push, PLUGIN_VAR_RQCMDARG, + "Enabling cond_push", + NULL, NULL, 1); // YES by default + /** Temporary file usage: no: Not using temporary file @@ -427,6 +432,7 @@ handlerton *connect_hton= NULL; uint GetTraceValue(void) {return (uint)(connect_hton ? THDVAR(current_thd, xtrace) : 0);} bool ExactInfo(void) {return THDVAR(current_thd, exact_info);} +bool CondPushEnabled(void) {return THDVAR(current_thd, cond_push);} USETEMP UseTemp(void) {return (USETEMP)THDVAR(current_thd, use_tempfile);} int GetConvSize(void) {return THDVAR(current_thd, conv_size);} TYPCONV GetTypeConv(void) {return (TYPCONV)THDVAR(current_thd, type_conv);} @@ -3196,7 +3202,7 @@ const COND *ha_connect::cond_push(const COND *cond) { DBUG_ENTER("ha_connect::cond_push"); - if (tdbp) { + if (tdbp && CondPushEnabled()) { PGLOBAL& g= xp->g; AMT tty= tdbp->GetAmType(); bool x= (tty == TYPE_AM_MYX || tty == TYPE_AM_XDBC); @@ -7243,7 +7249,8 @@ static struct st_mysql_sys_var* connect_system_variables[]= { #if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) MYSQL_SYSVAR(enable_mongo), #endif // JAVA_SUPPORT || CMGO_SUPPORT -NULL + MYSQL_SYSVAR(cond_push), + NULL }; maria_declare_plugin(connect) @@ -7256,10 +7263,10 @@ maria_declare_plugin(connect) PLUGIN_LICENSE_GPL, connect_init_func, /* Plugin Init */ connect_done_func, /* Plugin Deinit */ - 0x0106, /* version number (1.05) */ + 0x0107, /* version number (1.05) */ NULL, /* status variables */ connect_system_variables, /* system variables */ - "1.06.0006", /* string version */ + "1.06.0007", /* string version */ MariaDB_PLUGIN_MATURITY_STABLE /* maturity */ } maria_declare_plugin_end; diff --git a/storage/connect/jdbconn.cpp b/storage/connect/jdbconn.cpp index ff84c75b67f..33414ca74c2 100644 --- a/storage/connect/jdbconn.cpp +++ b/storage/connect/jdbconn.cpp @@ -1,7 +1,7 @@ /************ Jdbconn C++ Functions Source Code File (.CPP) ************/ -/* Name: JDBCONN.CPP Version 1.1 */ +/* Name: JDBCONN.CPP Version 1.2 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2016-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2016-2018 */ /* */ /* This file contains the JDBC connection classes functions. */ /***********************************************************************/ @@ -116,10 +116,26 @@ int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v) return TYPE_ERROR; else len = MY_MIN(abs(len), GetConvSize()); + // Pass through case 12: // VARCHAR + if (tn && !stricmp(tn, "TEXT")) + // Postgresql returns 12 for TEXT + if (GetTypeConv() == TPC_NO) + return TYPE_ERROR; + + // Postgresql can return this + if (len == 0x7FFFFFFF) + len = GetConvSize(); + + // Pass through case -9: // NVARCHAR (unicode) + // Postgresql can return this when size is unknown + if (len == 0x7FFFFFFF) + len = GetConvSize(); + v = 'V'; + // Pass through case 1: // CHAR case -15: // NCHAR (unicode) case -8: // ROWID @@ -171,6 +187,14 @@ int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v) case -5: // BIGINT type = TYPE_BIGINT; break; + case 1111: // UNKNOWN or UUID + if (!tn || !stricmp(tn, "UUID")) { + type = TYPE_STRING; + len = 36; + break; + } // endif tn + + // Pass through case 0: // NULL case -2: // BINARY case -4: // LONGVARBINARY @@ -192,6 +216,104 @@ int TranslateJDBCType(int stp, char *tn, int prec, int& len, char& v) return type; } // end of TranslateJDBCType + /***********************************************************************/ + /* A helper class to split an optionally qualified table name into */ + /* components. */ + /* These formats are understood: */ + /* "CatalogName.SchemaName.TableName" */ + /* "SchemaName.TableName" */ + /* "TableName" */ + /***********************************************************************/ +class SQLQualifiedName { + static const uint max_parts = 3; // Catalog.Schema.Table + MYSQL_LEX_STRING m_part[max_parts]; + char m_buf[512]; + + void lex_string_set(MYSQL_LEX_STRING *S, char *str, size_t length) + { + S->str = str; + S->length = length; + } // end of lex_string_set + + void lex_string_shorten_down(MYSQL_LEX_STRING *S, size_t offs) + { + DBUG_ASSERT(offs <= S->length); + S->str += offs; + S->length -= offs; + } // end of lex_string_shorten_down + + /*********************************************************************/ + /* Find the rightmost '.' delimiter and return the length */ + /* of the qualifier, including the rightmost '.' delimier. */ + /* For example, for the string {"a.b.c",5} it will return 4, */ + /* which is the length of the qualifier "a.b." */ + /*********************************************************************/ + size_t lex_string_find_qualifier(MYSQL_LEX_STRING *S) + { + size_t i; + for (i = S->length; i > 0; i--) + { + if (S->str[i - 1] == '.') + { + S->str[i - 1] = '\0'; + return i; + } + } + return 0; + } // end of lex_string_find_qualifier + +public: + /*********************************************************************/ + /* Initialize to the given optionally qualified name. */ + /* NULL pointer in "name" is supported. */ + /* name qualifier has precedence over schema. */ + /*********************************************************************/ + SQLQualifiedName(JCATPARM *cap) + { + const char *name = (const char *)cap->Tab; + char *db = (char *)cap->DB; + size_t len, i; + + // Initialize the parts + for (i = 0; i < max_parts; i++) + lex_string_set(&m_part[i], NULL, 0); + + if (name) { + // Initialize the first (rightmost) part + lex_string_set(&m_part[0], m_buf, + strmake(m_buf, name, sizeof(m_buf) - 1) - m_buf); + + // Initialize the other parts, if exist. + for (i = 1; i < max_parts; i++) { + if (!(len = lex_string_find_qualifier(&m_part[i - 1]))) + break; + + lex_string_set(&m_part[i], m_part[i - 1].str, len - 1); + lex_string_shorten_down(&m_part[i - 1], len); + } // endfor i + + } // endif name + + // If it was not specified, set schema as the passed db name + if (db && !m_part[1].length) + lex_string_set(&m_part[1], db, strlen(db)); + + } // end of SQLQualifiedName + + char *ptr(uint i) + { + DBUG_ASSERT(i < max_parts); + return (char *)(m_part[i].length ? m_part[i].str : NULL); + } // end of ptr + + size_t length(uint i) + { + DBUG_ASSERT(i < max_parts); + return m_part[i].length; + } // end of length + +}; // end of class SQLQualifiedName + /***********************************************************************/ /* Allocate the structure used to refer to the result set. */ /***********************************************************************/ @@ -519,7 +641,7 @@ JDBConn::JDBConn(PGLOBAL g, PCSZ wrapper) : JAVAConn(g, wrapper) xqid = xuid = xid = grs = readid = fetchid = typid = errid = nullptr; prepid = xpid = pcid = nullptr; chrfldid = intfldid = dblfldid = fltfldid = bigfldid = nullptr; - objfldid = datfldid = timfldid = tspfldid = nullptr; + objfldid = datfldid = timfldid = tspfldid = uidfldid = nullptr; DiscFunc = "JdbcDisconnect"; m_Ncol = 0; m_Aff = 0; @@ -535,12 +657,84 @@ JDBConn::JDBConn(PGLOBAL g, PCSZ wrapper) : JAVAConn(g, wrapper) m_IDQuoteChar[1] = 0; } // end of JDBConn -//JDBConn::~JDBConn() -// { -//if (Connected()) -// EndCom(); +/***********************************************************************/ +/* Search for UUID columns. */ +/***********************************************************************/ +bool JDBConn::SetUUID(PGLOBAL g, PTDBJDBC tjp) +{ + int ncol, ctyp; + bool brc = true; + PCSZ fnc = "GetColumns"; + PCOL colp; + JCATPARM *cap; + //jint jtyp; + jboolean rc = false; + jobjectArray parms; + jmethodID catid = nullptr; -// } // end of ~JDBConn + if (gmID(g, catid, fnc, "([Ljava/lang/String;)I")) + return true; + else if (gmID(g, intfldid, "IntField", "(ILjava/lang/String;)I")) + return true; + else if (gmID(g, readid, "ReadNext", "()I")) + return true; + + cap = AllocCatInfo(g, JCAT_COL, tjp->Schema, tjp->TableName, NULL); + SQLQualifiedName name(cap); + + // Build the java string array + parms = env->NewObjectArray(4, env->FindClass("java/lang/String"), NULL); + env->SetObjectArrayElement(parms, 0, env->NewStringUTF(name.ptr(2))); + env->SetObjectArrayElement(parms, 1, env->NewStringUTF(name.ptr(1))); + env->SetObjectArrayElement(parms, 2, env->NewStringUTF(name.ptr(0))); + + for (colp = tjp->GetColumns(); colp; colp = colp->GetNext()) { + env->SetObjectArrayElement(parms, 3, env->NewStringUTF(colp->GetName())); + ncol = env->CallIntMethod(job, catid, parms); + + if (Check(ncol)) { + sprintf(g->Message, "%s: %s", fnc, Msg); + goto err; + } // endif Check + + rc = env->CallBooleanMethod(job, readid); + + if (Check(rc)) { + sprintf(g->Message, "ReadNext: %s", Msg); + goto err; + } else if (rc == 0) { + sprintf(g->Message, "table %s does not exist", tjp->TableName); + goto err; + } // endif rc + + // Returns 666 is case of error + //jtyp = env->CallIntMethod(job, typid, 5, nullptr); + + //if (Check((jtyp == 666) ? -1 : 1)) { + // sprintf(g->Message, "Getting jtyp: %s", Msg); + // goto err; + //} // endif ctyp + + ctyp = (int)env->CallIntMethod(job, intfldid, 5, nullptr); + + if (Check(ctyp)) { + sprintf(g->Message, "Getting ctyp: %s", Msg); + goto err; + } // endif ctyp + + if (ctyp == 1111) + ((PJDBCCOL)colp)->uuid = true; + + } // endfor colp + + // All is Ok + brc = false; + + err: + // Not used anymore + env->DeleteLocalRef(parms); + return brc; +} // end of SetUUID /***********************************************************************/ /* Utility routine. */ @@ -770,6 +964,7 @@ int JDBConn::Rewind(PCSZ sql) /***********************************************************************/ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) { + const char *field; PGLOBAL& g = m_G; jint ctyp; jstring cn, jn = nullptr; @@ -793,6 +988,11 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) if (!gmID(g, objfldid, "ObjectField", "(ILjava/lang/String;)Ljava/lang/Object;")) { jb = env->CallObjectMethod(job, objfldid, (jint)rank, jn); + if (Check(0)) { + sprintf(g->Message, "Getting jp: %s", Msg); + throw (int)TYPE_AM_JDBC; + } // endif Check + if (jb == nullptr) { val->Reset(); val->SetNull(true); @@ -818,7 +1018,7 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) cn = nullptr; if (cn) { - const char *field = env->GetStringUTFChars(cn, (jboolean)false); + field = env->GetStringUTFChars(cn, (jboolean)false); val->SetValue_psz((PSZ)field); } else val->Reset(); @@ -885,6 +1085,19 @@ void JDBConn::SetColumnValue(int rank, PSZ name, PVAL val) break; case java.sql.Types.BOOLEAN: System.out.print(jdi.BooleanField(i)); */ + case 1111: // UUID + if (!gmID(g, uidfldid, "UuidField", "(ILjava/lang/String;)Ljava/lang/String;")) + cn = (jstring)env->CallObjectMethod(job, uidfldid, (jint)rank, jn); + else + cn = nullptr; + + if (cn) { + const char *field = env->GetStringUTFChars(cn, (jboolean)false); + val->SetValue_psz((PSZ)field); + } else + val->Reset(); + + break; case 0: // NULL val->SetNull(true); // passthru @@ -1055,7 +1268,14 @@ bool JDBConn::SetParam(JDBCCOL *colp) if (gmID(g, setid, "SetNullParm", "(II)I")) return true; - jrc = env->CallIntMethod(job, setid, i, (jint)GetJDBCType(val->GetType())); + jrc = env->CallIntMethod(job, setid, i, + (colp->uuid ? 1111 : (jint)GetJDBCType(val->GetType()))); + } else if (colp->uuid) { + if (gmID(g, setid, "SetUuidParm", "(ILjava/lang/String;)V")) + return true; + + jst = env->NewStringUTF(val->GetCharValue()); + env->CallVoidMethod(job, setid, i, jst); } else switch (val->GetType()) { case TYPE_STRING: if (gmID(g, setid, "SetStringParm", "(ILjava/lang/String;)V")) @@ -1274,105 +1494,6 @@ bool JDBConn::SetParam(JDBCCOL *colp) return qrp; } // end of GetMetaData - /***********************************************************************/ - /* A helper class to split an optionally qualified table name into */ - /* components. */ - /* These formats are understood: */ - /* "CatalogName.SchemaName.TableName" */ - /* "SchemaName.TableName" */ - /* "TableName" */ - /***********************************************************************/ - class SQLQualifiedName - { - static const uint max_parts= 3; // Catalog.Schema.Table - MYSQL_LEX_STRING m_part[max_parts]; - char m_buf[512]; - - void lex_string_set(MYSQL_LEX_STRING *S, char *str, size_t length) - { - S->str= str; - S->length= length; - } // end of lex_string_set - - void lex_string_shorten_down(MYSQL_LEX_STRING *S, size_t offs) - { - DBUG_ASSERT(offs <= S->length); - S->str+= offs; - S->length-= offs; - } // end of lex_string_shorten_down - - /*********************************************************************/ - /* Find the rightmost '.' delimiter and return the length */ - /* of the qualifier, including the rightmost '.' delimier. */ - /* For example, for the string {"a.b.c",5} it will return 4, */ - /* which is the length of the qualifier "a.b." */ - /*********************************************************************/ - size_t lex_string_find_qualifier(MYSQL_LEX_STRING *S) - { - size_t i; - for (i= S->length; i > 0; i--) - { - if (S->str[i - 1] == '.') - { - S->str[i - 1]= '\0'; - return i; - } - } - return 0; - } // end of lex_string_find_qualifier - - public: - /*********************************************************************/ - /* Initialize to the given optionally qualified name. */ - /* NULL pointer in "name" is supported. */ - /* name qualifier has precedence over schema. */ - /*********************************************************************/ - SQLQualifiedName(JCATPARM *cap) - { - const char *name = (const char *)cap->Tab; - char *db = (char *)cap->DB; - size_t len, i; - - // Initialize the parts - for (i = 0; i < max_parts; i++) - lex_string_set(&m_part[i], NULL, 0); - - if (name) { - // Initialize the first (rightmost) part - lex_string_set(&m_part[0], m_buf, - strmake(m_buf, name, sizeof(m_buf) - 1) - m_buf); - - // Initialize the other parts, if exist. - for (i= 1; i < max_parts; i++) { - if (!(len= lex_string_find_qualifier(&m_part[i - 1]))) - break; - - lex_string_set(&m_part[i], m_part[i - 1].str, len - 1); - lex_string_shorten_down(&m_part[i - 1], len); - } // endfor i - - } // endif name - - // If it was not specified, set schema as the passed db name - if (db && !m_part[1].length) - lex_string_set(&m_part[1], db, strlen(db)); - - } // end of SQLQualifiedName - - char *ptr(uint i) - { - DBUG_ASSERT(i < max_parts); - return (char *)(m_part[i].length ? m_part[i].str : NULL); - } // end of ptr - - size_t length(uint i) - { - DBUG_ASSERT(i < max_parts); - return m_part[i].length; - } // end of length - - }; // end of class SQLQualifiedName - /***********************************************************************/ /* Allocate recset and call SQLTables, SQLColumns or SQLPrimaryKeys. */ /***********************************************************************/ diff --git a/storage/connect/jdbconn.h b/storage/connect/jdbconn.h index 56f318d238b..0c36cccadcf 100644 --- a/storage/connect/jdbconn.h +++ b/storage/connect/jdbconn.h @@ -29,6 +29,7 @@ public: // Attributes public: char *GetQuoteChar(void) { return m_IDQuoteChar; } + bool SetUUID(PGLOBAL g, PTDBJDBC tjp); virtual int GetMaxValue(int infotype); public: @@ -58,13 +59,6 @@ public: protected: // Members -#if 0 - JavaVM *jvm; // Pointer to the JVM (Java Virtual Machine) - JNIEnv *env; // Pointer to native interface - jclass jdi; // Pointer to the java wrapper class - jobject job; // The java wrapper class object - jmethodID errid; // The GetErrmsg method ID -#endif // 0 jmethodID xqid; // The ExecuteQuery method ID jmethodID xuid; // The ExecuteUpdate method ID jmethodID xid; // The Execute method ID @@ -84,8 +78,7 @@ protected: jmethodID timfldid; // The TimeField method ID jmethodID tspfldid; // The TimestampField method ID jmethodID bigfldid; // The BigintField method ID -// PCSZ Msg; -// PCSZ m_Wrap; + jmethodID uidfldid; // The UuidField method ID char m_IDQuoteChar[2]; PCSZ m_Pwd; int m_Ncol; diff --git a/storage/connect/mysql-test/connect/r/jdbc_postgresql.result b/storage/connect/mysql-test/connect/r/jdbc_postgresql.result index 6d77d79d5d3..7969672dd66 100644 --- a/storage/connect/mysql-test/connect/r/jdbc_postgresql.result +++ b/storage/connect/mysql-test/connect/r/jdbc_postgresql.result @@ -1,9 +1,11 @@ +SET GLOBAL connect_class_path='C:/MariaDB-10.2/MariaDB/storage/connect/mysql-test/connect/std_data/JavaWrappers.jar;C:/Jconnectors/postgresql-42.2.1.jar'; CREATE TABLE t2 ( command varchar(128) not null, number int(5) not null flag=1, message varchar(255) flag=2) -ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:postgresql://localhost/mtr' -OPTION_LIST='User=mtr,Password=mtr,Schema=public,Execsrc=1'; +ENGINE=CONNECT TABLE_TYPE=JDBC +CONNECTION='jdbc:postgresql://localhost/test?user=postgres&password=tinono' +OPTION_LIST='Execsrc=1'; SELECT * FROM t2 WHERE command='drop table employee'; command number message drop table employee 0 Execute: org.postgresql.util.PSQLException: ERREUR: la table « employee » n'existe pas @@ -14,17 +16,18 @@ SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'E command number message insert into employee values(4567,'Johnson', 'Engineer', 12560.50) 1 Affected rows CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables -CONNECTION='jdbc:postgresql://localhost/mtr' -OPTION_LIST='User=mtr,Password=mtr,Schema=public,Tabtype=TABLE,Maxres=10'; +CONNECTION='jdbc:postgresql://localhost/test?user=postgres&password=tinono' +OPTION_LIST='Tabtype=TABLE,Maxres=10'; SELECT * FROM t1; Table_Cat Table_Schema Table_Name Table_Type Remark - public employee TABLE NULL - public t1 TABLE NULL - public t2 TABLE NULL +NULL public employee TABLE NULL +NULL public t1 TABLE NULL +NULL public t2 TABLE NULL +NULL public tchar TABLE NULL +NULL public testuuid TABLE NULL DROP TABLE t1; -CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=columns -CONNECTION='jdbc:postgresql://localhost/mtr' tabname=employee -OPTION_LIST='User=mtr,Password=mtr,Maxres=10'; +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC tabname=employee CATFUNC=columns +CONNECTION='jdbc:postgresql://localhost/test?user=postgres&password=tinono'; SELECT * FROM t1; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks NULL public employee id 4 int4 10 0 0 10 0 NULL @@ -34,13 +37,14 @@ NULL public employee salary 2 numeric 8 0 2 10 1 NULL DROP TABLE t1; CREATE SERVER 'postgresql' FOREIGN DATA WRAPPER 'postgresql' OPTIONS ( HOST 'localhost', -DATABASE 'mtr', -USER 'mtr', -PASSWORD 'mtr', +DATABASE 'test', +USER 'postgres', +PASSWORD 'tinono', PORT 0, SOCKET '', OWNER 'root'); -CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='postgresql/public.employee'; +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC +CONNECTION='postgresql/public.employee'; SELECT * FROM t1; id name title salary 4567 Johnson Engineer 12560.50 @@ -60,6 +64,3 @@ SELECT * FROM t2 WHERE command='drop table employee'; command number message drop table employee 0 Affected rows DROP TABLE t2; -SET GLOBAL connect_jvm_path=NULL; -SET GLOBAL connect_class_path=NULL; -SET GLOBAL time_zone = SYSTEM; diff --git a/storage/connect/mysql-test/connect/std_data/JavaWrappers.jar b/storage/connect/mysql-test/connect/std_data/JavaWrappers.jar new file mode 100644 index 0000000000000000000000000000000000000000..33b29e7685b66ee03d1b3f5921f2ba32d48f76b3 GIT binary patch literal 19192 zcma&NbBrfZyQn*D+n%=lYumPM+qP}nJ#A0hwr$(yneU$Do_+U8c5daZq>@T%W&QKi zi}lD$fq$h>>Be+2wXUJ3%T!4LQv=ijHW{@wEb?*e82Z2_WxZ`~XX z?Cnh)o#^H5oSe-ZO`II8C2XBd98C?3Oz4cP4V;`(RJ4?o)KI@!gD>N8;~iP3#H-T~ zs}w8~$RbsRheU;us#Z5793dg(9QQdO?3(|M@NzBG)B9LuOs_)F4%bgRzbl))BfJ|` zd_5DACO0k;Pnc46VLswy9zBzlMFo|E45yk;0pDc%B1iN zWv(@Yi}2@ndm7HrCevDi0!7FYS-?RvkA|Cs1|7c4MhfL`3?&Pu44o}ia-F0@kDYDu$X z%yKo1G{xzdqi0hq>)Z;vRx%}27dUt9sd+B&iOd^c{f|VRxp|Ay=ufwo|8BIji z5NVINOAWPbvp^q7Xa>#MDkSq>GVojb`E|Q^cY!iy?I>vleXj`vj9R;)) z-7j7!Mq7j(Tfq&gGmzy#L+YtF;K0Wsd59gGm@~^zV~jLj>b%lrz{N3ilpMN{{yQzP zVpSZE?k7SAl=zij2-|GUOnO4o!)CdJT$NG2(=syIvdRg&)IQgaSjwZ>NVgrc4`50{ zSM$^-+8nB(O~mNQW98m1nZSSJnJ1Y%r(+~@<<(tf0Izn+kw$q4+R}a8ZkZ9s;vFcL zv7iM0QM=2B*&UmSMu73LL*dEWUPBl}GpF-oYu4ojy*$eh$4x%ov=_rA+c!nM>+*)z zlfm*yrhg-mF0BA}=G|VA(%PPy6JzS#NSf2Sxa_gBINfn<0}s-N580tH(o(f_HoMF4 zIZQgDxKI^qAQYRvb><0|qj*)k2r>WDScz66b;5JjPV)y{v{_1bJWg0(ep z;&`nfq#C01wz6Btffj+$f>0BW(1en+u5a|U-rRHVOF7~O((bzGgljI>?2ul_W|yUo zfNIB7IIoG33bdsKwqzXg9J$s5#<5(=@))N*s0x) zjwlVjjpdHoux}`u4b{Oe0cKk^EbqXn5((DcUU9T!2|Lp&;E|k2VayI|1Ux83DZZ?} zTpHZR-@Gn5DQ$ml7bP3XJ$Q_LONO@(%;l8#jp;qHz#X70T66VguaA{g zG2#~rq7(2@ycyN;3J>%BB1WT{*mSxOd*1{`*bozMNBy22bk*4g((%e?o_?WlIV>^A zo(yh}6y$yF^+xe>*LO9-g8l>#zo!*D+#$Y)4}SoGTaczevv7}rI{*^>HkN0Gl`&v> zuclwHo8}27vg&K0Dt}LGS-iwD4jg^Si`zC6aT-0Q_j)A$lBz;K{MtbG*QejeR;+2LyS4XvkL|5%Y)@*DpL&uo6vyj zk(3dspPMAmA<5QSJ0LZ+HYRl4N{#G1SD13@G1Os=a?TzrrXL9&N0dLjvk>_ggqYU8 zx?gTPPuTppKaSLXk>8aF$MBCtmS8}KnuwLr@T9Pi-NzbCT~G@36VIyd<;syOq2>D9 zQ6$Z<4*)msb!!04glDh>4j zP94yhTY9?(`8ZGz^3h#f0Ci$Q%+K?->#m@hC{pjVJnfgp4RcQIU8fYCt-_VP+f>=V zG-qqLAEClY{G5QF7GiVT9R%o;eV{7D#U{PC#pn3NUOmTzRcGjh%kZd^^upyPpJXWX z+5w$`@%}HDry0}-F6`m>5o%zWuI8I9c*JFUi0}($P97U4(O6yLfUxw?+^CN^>RFu@ zDdMfzH62MoH%+%k+YRj&Mc90VFXV*kTh}0ScQ`RHV}XPQk4)&EOYfa30tNJ0=RTFG z?OA(Va?nrWDADublnJPSZK}wnd}YC%MF#)Bm|KB;r?Hod*^+dOneICiBoRu$w2__I z&32|mW>^PJIEBL6yZU1tr^r@FwCScTn}{2RK!dY2_ygG6stO<00|hsck2RspwnV67 ztTZBAl?KXlfyFwDHeN$b^Wz^5a3>4`H!u~fK`Na;dUv3V;V%FZ`vANlVQaK`W3=@F z6EL(>R^l2>bIAb_?S0T<@!H2>m_PyPgUG>2)g`w^mdSl8kfJ4+(4tCIDCg^@KjIQw ztSzfGM^@O!_Bp8wW7?YfKRw}s3OaS8s@{%Tdji^q&7xK^A z2=rKK+q3Xe>nOTt3BB%UxUlU0!`8>JOI$(n>0J{)G3Swi5KgJtGA8d24h%#~tKcKWL ztaH!Inaes+(DhjOyf}Z_!d^_99P!Z{wIn*Ct7P#Yk-P|Io)Bj!)rkp%tz7xwlP4qj zpwz_HMX&tJcAU76(HQf>$40?9dqi#t+q2?Okz#{8r$!>(b8JI8MY2EPf>sQ>%P@Td zF?nz!$KsWv7rwR+dWbO-y)o`&duqKO?pTo%8S`Ke@6i}tS&;8}hCKqr(xYcL4~?`{ z(_V-ZKypO9Zz1!l3aF3cf zc=uo7g_<0(tuTk>7IjaimH#&*CGXkdMW$(=!0#Eh=B@Mj?JVDP$XmubKkj~^TcS9XWyTnK6=HL-(DSAqOSW8RmBr($=$6 z)}t1`dD#;9-m(3Mkyf3i0;~TR+5P{YkplJxM&|!FBR8B-R4{*XSjXm#>q};(#;Zkw z0V+gbk}{TP>x!B}ZOQ0bh+EdF&S$1wy52~ncqYSlQ&>Nw zV#7OYO=lsefQ-Y-H@co1@17g3o$sgfzdJy zRtQQp!u+w=VYw0{yJ!tbM#m*8;TkG5@}dN%eJYc+%KZ~(+0A;Dtg|%=%iP~WxW#0% zK!9#8{WyIuOe~ulwoO;pDBDS)45&00Q2xz?%FZf~uOfnp6mVQ+funR}IBica&k;M+@I=sYVJ6RRhnpgg=Kv}Fj*MXLjv)WKN(opdbtES74vvhy5 z;uDzgn%!2{)Jb&+lq^F=V%e-tmQ`z9E_AelJ&ZEoio~=z9iDnFLEYijR^tzgh-k|k z7#fB{M0JId?nRtTD5LgpRba-_xSeukYhYXkW>GG)w@d>?otuv|<+w#|eFZ$xOl51N zRdn_Y+5>RM&xkhblnYwkJ~QiTYhNri_)3tQ!Iahd%Ia0<6~rkA8Yd*qJ@AIk(~HKDkCA{lk-Y_)p@vuy^E{JTJvGkpM%wSa#P0(qXc|>1Fi;_8M$T#%cq@W4z0+l zv#66MMTT=!?|O$>P_zBF%7W?i`_B&6sdnP|T;?c=JFtNbdNXRZkPh%`!A}-}WfD;d z0+V|bFDMsVEW6KWJU*~ct~dHE{s)}^;lSoa+&Cqo<^*GNEE_A{xn8E>e9PT3)#}-Z zS7^Zz=za!T9N^%siPQDyS?S=^Uj<#3&4Iu3T#XPIS3g(PhLEKMGWT4dc3%lFv!%qo2YaP>X^3-T2Z?=RzR-;Kkt`S{ zZPGWCw28IFKt|k)1MjFM+PoiuPm-b?zSX2*I(xO5+RW_0@;d#WVL#?2nQD~vn>w)} zd4!y7K4$pKX&;H1&-+(i!AM$=1H`@x4_^LvQ4VZI<_ttXv@6)->f_oe^H%9{sm-AbSMU z!7c`~~$tWszwAHH#FowlJ}E{@)dM zj2e`?@)FuN0&@}@Gq)fvBoin@I6rdgFDh|JND`X9x)2c%bv&{-2KEdnJJVI`N)`J* zOPbUw#ephT=ZdOa0!s21%k7KH%j=pOwac2#DVjFT)td7^of*~)n&dkZI@5kLJRe;z z83zaiVD_omM)_&^H)LFko7QU)_Z8-Yk)&{~U|<(QI6_Br@>TWTbD2-7?3B04Gr7&) zHTS~g zpzywqb+95gqL*eat3Nl)V(Z|P$Ak&Zp%HQw9$XshmJnwW(801WVv-F>)kKjfub@N= zt8B^?h)Xl%8>`r`DI~d)EVDN|hXgQtp6?=lVn!f?6phwL6+c@vS)-It6oq68x%Tg8 z=hu-yoM3|(neWSbn6ifTUsTEBd@y1kD8D*Nsy8TtW$-6C_hVQ~Sckg`#f`8B75M98 z2*%9UmEs&r4@qBlpAm7P#=7n}Ckj%525}Ka9Yh!x**tC9MD~}?nYV!q#z{s$7=l;J z+laAhx{B22*=Cy*LN7V{6s0K3L5m*MO^(keoph!T^y=uUX(?O|aeKOZ2xfRP>h0>` zN|LY+U4i4RBz|NI8&{SDg#bHQtj3BJVPjAY*we7qgSrH6kGb1;Cz+^)ASW03NyN&a zbx#gDGIT!lUkE-=7QYvkHc38p^a}H&3wEhEh1yI?)C9B@mYbSb#Kb|>uMdX20yC54 z&X=FUz7GkLMl=h`D8Xq)w80-*iyoqxwc?OCOJ^K zBVhx~tK^{C(IU+O_3+X++%7s*0t~DekHvqgV3ksaNXZHqSike|o@5t9hl`vJNGnGcn@RkWCV#6gZwn<8B-{4A$S2wtr3rWIL zBF2t>l}k+275tma9GlE$6`4YU>4vLPp#t%(7MX2dwQ&z4s*Fa|GPJ0xn_6kbi9LqX zF)Or=byykJYs+pE9e-)ybi{^C!QthUK_v%PL?dv!kSnp3aStvM&8f*lc|5jnWOT@Z zf-9DAZgZy1WWTUkVM6@ucy>@!m`HtuFrm5n)lY)>9nNB@^W(fRh4)yXe52AH+$C#W zTq8V)YU%yCe4>aMJ-qoXfwJDzO5~fq+hl_{3S*dkFF*Cr1!JCgXlCY@_<=v?z5$BO zE1^q8q?nRSeMkWrd7h&%YK(XC`YYg*faNY-v~489F~>vuVEb`jLQpdKZh_1sk7I867fX1$~5*GIt(2VE=}qP<*LD%r z(jBVM5wpr4Rg3R}`A;RGQqIP=FtYCuzlYOwv=bQ;)DLNN>Xad|@j~qtU&H+#_USF= zb8lTalav*h2~uNXLrHpCf^AOf#k$$`s-?1k(pen&xb&3^DW>_wr2N!5E^Vf{NlPU! z$NZ#I1UDF)mG{@oT&NAg_{W6q(8HuJC=7rS^dahc_)=HF!6#bsF}t|xk^MbYZvH?C zL6XWlvw@_l%G~3ol|Ntubc(}drnsT2l9OYU6QCMk?EUwk*;L{0Jndssl>=;=d1)9;SLcbU zk0pW)x=U@#cgbBcp6RqX+)Yl~?>Hrj&^u*k3&nxTzh2Ma=Uds;YIeugJeN_H}otZ?Iqr?}Tq|F)${FP$v1ISP{_lekR(Vqsvm%#x{#<4A6 zyO6kW!51Ixo<1c0w}5Pas87>-t$zB&(77jwgq_HGAY2h9`uVV=FG;gq!)!B7Q< zt^y%g6X|#uWkEe__#KcLE=a~IIP88e}F zUUn1eH)>#QCr}6CW`m%;%bC*M-T&hJ;cGVal!Dib14VNV+M+L#zne1AkJTv> zD-2y!mr>hxMKQo~SPx?4Ks|8Zksj{jEyEMsc*{OL{NT;nHJI)PaO!VaSRyAr-_a4(8C!bU)Pq=W%-5h%JQ75sWNj<3(*4>4><)LvZ^rAk< zLWu8PQVY@%X+nmaU;PuFAIKI0f#R~MA z?a#iVu08NNJSjP*+l=}M*Nkav%~Q_f=J%CFK(PO*oU6@2;RV@QnM6bSgdpYxY zGHZF5ydc8CynZwjueS@RmO??-v$hIzfwRbaOz1@qAo_)*iB|=hr zo}ZuN*0u*)a;{pe1J;g{6~j5#rtHK@Ps4X8?J_)wH#{4wGj!B?>q8%?eChACrfO4m zLOx>?w%#I(nlv18i;QX6=UMI=0lssA5bb2Tk_T+Gt1PTspI6V{uRbI(e29#!F)Ld3 zZ|O8=GaB<7Bry)+sBHm~ z*7NAUV@vAlfuRbMvLzTd*zWNEVzC)gH$F2_9^g3A)WrX3RV+2?Slp-57$n~tYE{L4 z1J#~^*ePk%t?7kpz+)Sy^o~;SQT#OScAdg{D|)S zteP*^!iEykd6FYSd2YVurBS;TODmP2mR-9vBXd!ID-j>ngnwni{~6)^_f9nKf2|=TjSY?dTOYbw3(8x0sqLqmIcbul zjf9Xem}a-mjPw!`$QVkW0hExYfpWlD9Gr~VfXo2tZ&|pD)xSdG(WQQ=5S7+4M%ePo zaPGZ)X??lIwN~@0Ql~lc{(Gm*?Rry!%>U{@?PS|4_2%$RX?XV*jzDQ=b7xFuAx$fwR${qF^^v*Afw$w=$7$Pgl%r*;!J9dEJLn!cSQ zy9n12;J){^_|&;N^S*OZ>CRkAvw|LKR6D392(C}*j3~qUSa%#H0Y4AvYkmN7y+Gt{ zi21Kic>vK?wHBvl#(3gZ!!^1ecPq{6QJNPP>dpDDIl?gkTAlA2eKCf4(oxQ->wDvu zp?du?K6Gmrc%^#x$ncatcemC`QVO4Ib#3|NvNM<7+iCbs~QYiin9Op2@hI8pna)8l%0Y|pAeC_e1 zjz8c>3;JzYw(>w|<6Cdwa0NtdSthgcRH+?n;BZCA>fTYWignZm#|CKdT=eUBkgk?m zsx@PUZ%wm8FpA#ICUUrB4CMjn0ycBk#RH**bxIWTU}{kBT>LgrES{%=rTu_?I88|m zMw7o0BKr=0IE(DXg$+F;CJRiO*9FJH>5D3`?dhrv9-JkVG`<>hdcsH8ee;I1RR2(h z#VL*|px1%+ z>-W~{C5RC2@YV|K7I$N((PxX2tGY#&9%^j(lhWczk!}e=yO7UnDKGSKFw7kIC zh6dXrNo;4?e3Bggrm=HNsyBJA9s=mJB!@vMY*2<;ysh2Hpz1F(rbUck==nPQM`_<# z!9;!WV04Pfpw2?Hm9`#~UPtDiR=btJYx!1{{7(!e38TR@WWPr5>iXGfeQQ}(9g(af zvpj*71z&y?V5GzNb!Ygc+A+-%UEwD8Gf^@SE0>|?*OuU-fUCm8EV%X%+Yn^II}yAb z9WUqk09ih8J*&ufEy5uT$d7|N>-HSp>jJKL-^2-;Wc;A(VmW@`$IZ(df4*oQ7?IFP9XSeM@XrzXNIUXi@#S--N^#R}I!y5L%9B zl^)>`e0UPrm4+}MW!}*bn@HV+OZU)H%vS#nd34*%LV1GIgBZiXvkBMcVq z)n(+W(DVW-YH=R5gwI3SLowJ&bjo#;pBn`DO7nid1KZ!#|E7)qOSm9dgN5GCxTIKhz6w(t0#G zw5Lh2e_`KYzR>U-#KKUyV~}n9!tgVLIuwrxcQGt=B=OAJW!8P#6&C;P{io2aAYHcc z)alf3KO=}lC*@Qy%=&o0hUwez6{)bhka6odTc|99=a!BNPrbY^a$AfCQ7(M&74bXB z&D03%lJ*!IYoA85cx9JHlqCaeWeXD~julkV3y_MtWf(in!X5`#ywJ+>>p)8y8b;8D zFzmJDIG7L|!vV+;86~q2dqz>|d*6OB54U?2SJ%=W3xWCQ0tmAQ?T40i#XQ`VIR8=t z?zlJsJZ}F%difY~vbVGsjv<-dwp>V2POOZ>L0zz?P@rRG-ae!+g?rlhcLzeDq8#X$ z#;BsYfs0$YO0d}jXzuAA=d1EPWRs_hQBYr!JCilyLs98zJbJJWQNdc#xyiS3P}!Yx z^cHrMD@ioFss4%NH2UWW4tJ7SM!R_SrXn7d0^=A^K2B>vQFoxU;uOUT?)E&z{s|Vh zUZ=<;<=3r2F-o*v6Bp(wtC9FHEt7ja=xKibT6cRR8h+$S$e#ITEC6fG{8o`_02}Bm zV80mL5^@OS*mn2GWcE|gj|34mtOPoOTpMq1dIGLnoy-jU@fUqfCxfE031}U~>y8f=$#ph5lg!4xV(r6t*|n zzDZ`hx>Ucox>9G@nvI>tC^XY(GRWyXKDOVJp9ZeL04l7amg(U3?2UVy)s)=|Jw z#7)_i&_O)o@4~K*V=PvvM@+T6k+~HGrW0&G`d^{MIb?~K%-mPA%)&M0RWxwDF|`aa zMz7D3AJYP-;m>UDf~ddRLCC=h$#Lc4Y=bqNj<1~E+XElj9Bn%kK`&8MPBzdm^%ul6 zTM7IC>)fp-uzkT-vLL%-t=aWdbCdSVox?2I$}>F)_o~~c`;kj^bUuQcqYZsp?{|=^U-t5w803H6_qPK0{~dvdgoWk^Lgz>-PE&Y8klH_8 zu9jujOE`R&qkrM=JI2i`QL@gacDT0~#uDyqUD+i+R6dE#2g+PROUQ^_mik$N^^NR_ zl=!A~Fc+~tj}&nekQ(#HU@rS}+k0j~GI(rGH09d60W?HU=+M7w_L?P|W_ND~6>8IF zN}ACz?XoH(@LmiRU88P3 z94gRnRoQMSHcFB8Yv3V!GP6~cvK8GX5j|8rA1_)RR#$k_}E#Ed-D^kDaN@`kMK)hxNw# zt4%S~k@Bxan6M0`LlGBq$pSM!u&_y5wbTkTU4#1JA*Q-%{F$w4Gqu%eH}}~Dfj(UF zz;K{B0QXUBx;0X`4coAsD8g|~s9IuPn`UbCL}Rrm5K2#>B$P_&Xmq|`E6o}VE#?oz zO@6v+Ond-EyI39v)cM_-Wij@ZLeVZSC0-~t@o;i;apCmuubHRJ&JEd=U+GN!p&B87 zD%$DUu?qW^BwvCX0~Zb&mxO`X#-L<575PsS#LZpz?1(c=%g-!BGo)}KLic@pCm#Z( zGboizb6kGJWye(;!S->@zRuO9TnfZT8869?$uJ??JnELj&FUFy%KpMw*V8y5@3 z-NYD<*>;Tnq*$hiHIwM>oC2*<^qYR+vn^VZiWS(4^V-RjWAaBUeSWG>_LM;#TsSR~ zT6T$5OL5#t#1*iLe;uT{!LC^*wNDn&$-vHMrxLPYSu#(YX)dt}*`zJqJJ)y9Vrzlq{BmY(u7DR3G3L9wOg3)w zu2dVda1E4>!lJU3gAuv3m|w9e&r#%nT`%3Sb#o3C@?EsEWZSKV8SXlFY}Ij6?!5_p z=c$GgVxLLW6j^nhltopO@qt{DDwTaR4!PQVFJ^3y%U}BMkm*wsPV`iRBFd5lzb|yp)K-?x~P0&$|UAd-ZRVinvTv$ zzx}DD8aO@7Osa=gKx+%2gK2QGU^kEZb^maCQ8cIC94M+ldrhXqGL?@VV^iMN9U^mm z9N%f=GC{2hW6~?Aq4|VX!Ob6pU*NIC)6^Q4Sz)gORUSA{;()48rhpgCf+U(A{Zz)j zOxe6bS+djwe!p^_!HxMhZB}GCFlIS0s7_xe=}*jg5$lTf73M<0&chxsGu}Of#yzwC zx{Et9{)Gj|J8R)m%n7Myzzd;(eP~5qfxw8xSbLyM+=7Xm3dSq57saghMW#05dBGI~ zMddON2O|lUkfp3VzGAZDV9G}D8+54!$wBT3TkxJB*Vz>Ot{a!>)NnAu2sj zop_w>5VrJk0z$CwDlsz6A~Dkt9fXC&cU-^__R2V!Rj}J?oxf(oe{TsL5hB$K|ULK)2gVmN$dw}Q^ec6^6Yjgwl+*-Zxd57Pxy&eODv9EPW z-b4@kt8R*>;LbE4&(q=sF!E0z9)&imdvicNL!W2%0H>(tC(EX>?s2JT#xZ&)1t+N< zr^bz8sfWF5a$>k8N(f)2yDtp1xw=bovNQMzm_K4ppINa;CWvZU%Tj)-i)Obw?+7(K zmSG>-U8GkaSzo)(AoqJQ3>Q+J#u!3Y5NMMtk7&Kft$VUJs6T%sxo8=kXqi zgM?iH?A37sp0=Vq|0-ZoXXRvo!;Z6ykmA3K=x&W-M}I(kEl5`F)ZgotSjD@9X1nZ_9`c$2$sm?CvGozNE-~kH1omrg%aWuH*~EVNmC7sx@fv*NJJmK94fL$ zVKmAS#$+|sG?=3WEJQbC>c01ap=yxDgf*8s##^TqUn(FJT_LHTD9~&l%sM5&HHPb? z=0>enFWBIRlk|DWO8`lMD_Vk^l@2LxPvkoF-h-~k3cafZ9Q|oD99yQsL6m~W0WrXG zhF9+nX(=Q8+UUK%T&uVL&ZvVu28NknK_t@sH~D?_Y@3Cn;b`1cA-!hK=ZI*z>F>)O zQGF={k*SXIF2OK;rmt~!-$c*AX8nD4XfREPQb&l z28+_rlG(g;4BkplOPzFuM`&lXFdwSRiVOtB2jk6+2oD8tWSv!gtGM@aUY->65PfQ4*rmHc05c>G4$(B*k1#dsyV9;X~> zuaq@VmBrFnecr^+wZ~2(O_88WQZ7B4-fKhOqSp^)7i_b#CDNxyR189{$v=kYD_l7K zy(`B+y>x`535H`P52QH907);BD^9lKm?hITRf>!A1jE^vb`H1dtm(uS~F=VF&45L z3b$#CZOBj_bBI;gA}%vN5N7}#Gs47}w;!gUE1V0zhNdgEq;q*yuO6BdQQ>4}Vuf>J zR$#wK_f9@mBFU0)C_$*@o^s;5P_1qvqG;mW2v|dht3n7 zY64!FVu#WRxVWN-!XI2xP%C;@Der2OFFJkzQ0-yamp>G|X2^fiWcnj!hgP_=&XkGV z-k93vr*wud-y3R<#57@LS|5;|ADSvpO(;<6Nv{;Pbmgke$#jO=o=BbA>WaPOqw`MU zoDpsd#}x`*8}RXrX`O=pwH%mEksm6}WVnRFbB!jiHCat*iKuPbz;C!2TqQ0JrQ3_r zH@%U6dpgy#%lMuOT4FO&SvXIu7)&Xw;V=nv=!zDH=q3qHSm%g1cj+hAgu|0NOX=Ac z0do&{n=K_E%R<Kz`XYFCmt=y zQJryDsFye^D#C$B!%afZ4np!FuTIjS`0(v!jSt|q1`bj6R-oU36Vs(DydrKXzf z%#zzyQlISYsRC2wcV4T-eAWU|n3N%luq`FKBq*IwAy^88>y>?-lb^#j!rl=F-vQ-o z$I@C8V*%t*j<;AL-7G)Z@E()UmMvK1!Yfh#}9^>ACP$h z!lo1XK;}q2TCsv<`tV1!e4u zN^<9wpH_KFlj}jKM(FyRJR_?>4<64T@^c5+Kk@rwpMD?R2{C*VkDlulU|(XueX`r^ z>b^dp+^!sm^NUeeb~5M{atFFQB1>$Fb(kSb3%J0jAVW}?n{2-O}(Gp z?nDw2@z~$~O9t;b%5$9X#=3&b?Dk%DU`cp^!ORYfac?ut#suQopfN10hzP7a&d)P|7%Xr8~EHC-Q%5Wbc#HIC|6AP z8(*iFe%8@9V)+)q9OEba`F_F~?g#Vjg}f=xPm=9n=CtTDl6Yj>wDOVFy~1Ppv)si# zQ#k4=^t?ZS%i(-8&++^*LM`?{shm}IpkyJ3ckd&js~I#i^)=4Gj)mn#LzZ);^l_SH zWAPBIg{()v+mFesU_`KchIJ-Gl2YWtyofiawun_Lt5LoX#LxPFVLebr|11^?;77#frC@floxRYaWu zO;h*;`g{SzC{b)CN0m~uv6RDV7BE=^-Or<^h^SRsx2wSweyGs#l;58srO3rq^f^nT z%{r(Yio?mR`+gQ*jf_+;+V@;_#g)3-1)Ozx*eR6_Ad1-tC98*qKd}uCfSiPisR(wB zo%&twMEWl1jg-7zIKKrJNs9kr8dp4;-Sz-T#1ua)QD~M8&yumAXX1$2SaLU4LZ8d0 zEdH^?&5T_8Pn0c0#3{fSAFcdNxg33#p&2nemE98K z(gF2RyuSdqUB zKMvuE^%AK01ce?8$hsqT0bOXJ>MUmSIFMa7oL#ocQ%;(qrpK5{>te<4+@WKG{fqkX z9jvdr7&_U0>NVYa06=7@f>(PIWx)LZFr1*OXsXdPL?TLJM5AC5*tFL6>I@-W{ zHw~u5vDo#;!TktoLzZxsN2$SfRLsO>v~qqX%8f;pti_tKgPWR)Y}bd zrtgG1>gSqNu2}nVdD_0}x$XNcJhQJP0Nvhzd|w3k@!jtH`Xm7Q#tHaI^@H(?9iD-D z{F(4;nf)0S+?gf#GKP5nn|=CIQ{MIrYO?ZtyCEK-fsm=(fWN4GZqXuRlI*Rh_%~qu zD&G;J+#6T zr#2_6nfS0juE{>OMdCBfop1wwy+inaGtBxyoyPt83gZ7koaOr@$XA1U3{vR}6Vf{s z=6k$MxW}A*`So?c?+1J874Qv}?|1)0sJO_$HLcg|=xF5k$c%d`m3d6}J!)Rqha`->;u7pPbd^z0_+QR?>>NWAZx zP<)Nx84KcBMCRilkht7iU2z$wL|z_=j^A{#ANV{U#F?J|BlYhusrYX+@gMABKey}R zCG8sg_(y9Q&*xvo^zsk9i>y`0ToUx0AFQ9)6y5NVexO5a{C6^-CwYk3c3)x}bvfAd zO%srBD8O?|o8PZi)BZhJr*Q-_owxPo(gE8_v`=!g?l^#LpcprS1D?zZ*}dc!-YM6F z0YxX9fBn*bA!Sl7+KlR0R@S*#FG0&>W#fWWVe~NTn)aPVR>-B}6!*tuWHD0X`X1S> zR&#=##Wp*4FEiI8oKtF3B%^D*_Vl8{CAG}+l@}w;{qV+oaT)!KT%i6Pc5 zgetNq3vea=2_zK7Qmf`NWon?tcO zd86cJj@E7uXqV6l*q@Raawd(u!N^ZhTrcS{H&JABjL{B7Aa^^TjYaajG$qqeo&bbXS={bFZehz;-cIZ*Vl% zLVsO*Pa}nw{sc%-!4hEOMD)lA;jEvrUbF(|oOSvt$2Pj*1lkWZO= zLbRjx{l|7?Bc27Eoew`bbZM6$D+Iu6l_Xls_C2pQvtWRA0n*Z)ue2@bSXdX>N?R}I z>u?kTde0J+l!{_jgSwDwp_^rY!v6$W@O$<3?`f}Ex$wSQ+yNh{Hp3bIqwb)2Wk zdg@fVc{|)2wzE}sc-9)xu7bb46zCCk<8P_L!qx7@`_oV>6bQ@Jr?tJvgvI|pDjct+ zTE)SR-7V)gci#ro$mvMJ;8r=+;3rC+WMPBfq8nu~V{HUXkGrF@`oYs z$i|EA1B9|4iT(+%myA`0{lw`E_YvF|^~>-$rUoQ))%ZgH3@!l(yoH6<*8bj8mrZAv6rzCKDc%%{ zBCg~1^#^a20oa;3Csw6N=*w3OrzH{s%U4JO=CUHPtm;!8*zLd=rB2~Xr^Z9i)AmqU z*g~#c8!;X(Fe}USA>6Rx;(K|sRRFGv69T7U@KSQc%D~b`q+~nm zdxlq49Dd8C>|$7?nZe5EPEeF25;@N)pBGON0$KmM1as2IJWwx<~A?fuIT>Dhg;BKm9lT_w>x?hE=;R&nE>!*%n>r$5T`ABSt(%P-EGMZ$yr z@@i8AX`iz{Jj-GsLdW~M&-e1^b!!BxmB7qC9)%)wBK*Pjyhe6=VDD@q#0ajn8zY%5#pN$bR9dUu!nq zy!PCzjh>4vkC=R^QEE8M^jo_ubIEPP-FJ0*=Da-jqVHbr?9*Yh1it`F%3Xr@%S3qd zA9OV6Xa%ivh@Loo@B7)D3T_K)ERV0s$$LkPj=D)anIgT|#pyvqrtT3fy<^iNCKdVj zEHQi9)Unvvw#{VE>PO#hw;AU?6?s|9n|>!mV^VHJm)RoVZda!zzOS~`p59_~>Tj{s zx#XUA;-Qsn**pI4XW~i}kk9Z)n&W%aC+Etg6j>`JQC88HYRU_Dy;uZXX8cHGiRb!l z$N3HlEHlt8OkMo$_D!j@f~k!cU+sAKLd%sYHab@;$ZLBq@3!p9u){LzE*7axJ-^~- z_0}D)CPB{xC}#Vu``(Y;Ju$d@@{u%iuDRpDLdp1-_z(d^cJlASuiTRKZLBcFQ3+&^)y%#<%9P+f1QY1_;@mDbhMC+pu} zcC^l&xih9x^h#vO{mnT~uiRU;Fk-r5TJ@$d{>iT|itbkbdB1nff}C5gJ+ATe|!(<@)Vq@sXV^{qetaiK;FOTN}~o zi7O?R|E@j$&qYJ$r{2T8b@fl5J8j6?y)EwB|wCvT_`HcCqzbiVwJGA`=U-!G;?mD59wmfOO zdoo&rah`L}j)_LOH;zBA`|?O3ynz3Dg8sQj2Gavr!UdjId@ttcFF262qw(<$_m%kt zlWr8+I562i5Lr9t7+2c|?yccFO+!CDk92#sVb&khClkuO_qH+lY&Xf)xFpTLfAtZw znco@Y4mF((YWrX;qqEsi>Z5SpBeCqy4KubEwg+k3+&%g{Pi4XOPpr)x8)gW7Z+Y;U z8(2_^8(z3DV`9Xd2@zZu|I}{h-~S7IB0yEu@n6mDAe9+A_UUc8WD`}aO-KDP_p0-W@d5_g<2$WvTGy7pz{Lx4v@_=&2S)6X&-t--&&@Pu!AcvQK24PkQyZ2V1KN{JeXn=iZE7 zZ3hF@wc-Jv7`m>_I-ATO;m79x_hZJj-O{{jPnP??Qkb)hYsZP#Oj$o8HD!F5ugRl6MK)j9dk8 z_x{`zdn+&}yC*6&CvPKgnB>Oc9j3Yw2InSsS$S6WowkUU*)Fs?=|*veY-7x?@Yek; z{9jm26b|pa(q6o`DsS5Fsr|C4UIM|#WaH)jZ8EhJ@mq6iSJz{|&ByjXHZ0vxdgDXV z{p{k|IrRt1L81TL#r2XKFyyx}gF>H?NrVA$L=f`XKA2r!Ek$vmuwWuTjdynPbYtbM=)2X`CRP37ojAupo_ zZMZ~$2mDB8A#cA#Hv+j+30iNB04hK(+&NfQU!$9Z+~@*zLJ=U+8p$MVT~c(Tkc%Zy zD-8jf9gvN}+GayG3{;XKXLe9Yh5&1T9cfs(hO7%z5FjT~P(gqIi`|fmg#d3>Hjo?_ N5FTV=U{Le`@c?>fD9iu= literal 0 HcmV?d00001 diff --git a/storage/connect/mysql-test/connect/t/jdbc_postgresql.test b/storage/connect/mysql-test/connect/t/jdbc_postgresql.test index 1041ef468d7..8036f71020d 100644 --- a/storage/connect/mysql-test/connect/t/jdbc_postgresql.test +++ b/storage/connect/mysql-test/connect/t/jdbc_postgresql.test @@ -3,25 +3,32 @@ # # This test is run against Postgresql driver # +eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/JavaWrappers.jar;C:/Jconnectors/postgresql-42.2.1.jar'; CREATE TABLE t2 ( command varchar(128) not null, number int(5) not null flag=1, message varchar(255) flag=2) -ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:postgresql://localhost/mtr' -OPTION_LIST='User=mtr,Password=mtr,Schema=public,Execsrc=1'; +ENGINE=CONNECT TABLE_TYPE=JDBC +CONNECTION='jdbc:postgresql://localhost/test?user=postgres&password=tinono' +OPTION_LIST='Execsrc=1'; +#CONNECTION='jdbc:postgresql://localhost/mtr' +#OPTION_LIST='User=mtr,Password=mtr,Schema=public,Execsrc=1'; SELECT * FROM t2 WHERE command='drop table employee'; SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary decimal(8,2))'; SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)"; CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables -CONNECTION='jdbc:postgresql://localhost/mtr' -OPTION_LIST='User=mtr,Password=mtr,Schema=public,Tabtype=TABLE,Maxres=10'; +CONNECTION='jdbc:postgresql://localhost/test?user=postgres&password=tinono' +OPTION_LIST='Tabtype=TABLE,Maxres=10'; +#CONNECTION='jdbc:postgresql://localhost/mtr' +#OPTION_LIST='User=mtr,Password=mtr,Schema=public,Tabtype=TABLE,Maxres=10'; SELECT * FROM t1; DROP TABLE t1; -CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=columns -CONNECTION='jdbc:postgresql://localhost/mtr' tabname=employee -OPTION_LIST='User=mtr,Password=mtr,Maxres=10'; +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC tabname=employee CATFUNC=columns +CONNECTION='jdbc:postgresql://localhost/test?user=postgres&password=tinono'; +#CONNECTION='jdbc:postgresql://localhost/mtr' tabname=employee; +#OPTION_LIST='User=mtr,Password=mtr,Maxres=10'; SELECT * FROM t1; DROP TABLE t1; @@ -30,14 +37,18 @@ DROP TABLE t1; # CREATE SERVER 'postgresql' FOREIGN DATA WRAPPER 'postgresql' OPTIONS ( HOST 'localhost', -DATABASE 'mtr', -USER 'mtr', -PASSWORD 'mtr', +DATABASE 'test', +USER 'postgres', +PASSWORD 'tinono', PORT 0, SOCKET '', OWNER 'root'); +#DATABASE 'mtr', +#USER 'mtr', +#PASSWORD 'mtr', -CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='postgresql/public.employee'; +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC +CONNECTION='postgresql/public.employee'; SELECT * FROM t1; INSERT INTO t1 VALUES(3126,'Smith', 'Clerk', 5230.00); UPDATE t1 SET salary = salary + 100.00; diff --git a/storage/connect/mysql-test/connect/t/jdbconn.inc b/storage/connect/mysql-test/connect/t/jdbconn.inc index 05122f51924..81ec80c13d6 100644 --- a/storage/connect/mysql-test/connect/t/jdbconn.inc +++ b/storage/connect/mysql-test/connect/t/jdbconn.inc @@ -22,10 +22,11 @@ DROP TABLE t1; # 1 - The current directory. # 2 - The paths of the connect_class_path global variable. # 3 - The paths of the CLASSPATH environment variable. -# In this test we use an executable jar file that contains all what is needed. -eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/JdbcMariaDB.jar'; +# In this test we use an executable jar file that contains all the eisting wrappers. +#eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/JdbcMariaDB.jar'; +eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/JavaWrappers.jar'; -# Paths to the JDK classes and to the MySQL and MariaDB drivers can be defined in the CLASSPATH environment variable +# Paths to the JDK classes and to the JDBC drivers should be defined in the CLASSPATH environment variable #CREATE FUNCTION envar RETURNS STRING SONAME 'ha_connect.dll'; #SELECT envar('CLASSPATH'); diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h index 6a0a8be8ff8..5446e0d2a07 100644 --- a/storage/connect/plgdbsem.h +++ b/storage/connect/plgdbsem.h @@ -362,7 +362,8 @@ enum COLUSE {U_P = 0x01, /* the projection list. */ U_IS_NULL = 0x80, /* The column has a null value */ U_SPECIAL = 0x100, /* The column is special */ U_UNSIGNED = 0x200, /* The column type is unsigned */ - U_ZEROFILL = 0x400}; /* The column is zero filled */ + U_ZEROFILL = 0x400, /* The column is zero filled */ + U_UUID = 0x800}; /* The column is a UUID */ /***********************************************************************/ /* DB description class and block pointer definitions. */ diff --git a/storage/connect/tabjdbc.cpp b/storage/connect/tabjdbc.cpp index c0fda584381..adbfb2168ae 100644 --- a/storage/connect/tabjdbc.cpp +++ b/storage/connect/tabjdbc.cpp @@ -605,6 +605,10 @@ bool TDBJDBC::OpenDB(PGLOBAL g) else if (Quoted) Quote = Jcp->GetQuoteChar(); + if (Mode != MODE_READ && Mode != MODE_READX) + if (Jcp->SetUUID(g, this)) + PushWarning(g, this, 1); + Use = USE_OPEN; // Do it now in case we are recursively called /*********************************************************************/ @@ -970,6 +974,7 @@ void TDBJDBC::CloseDB(PGLOBAL g) JDBCCOL::JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) : EXTCOL(cdp, tdbp, cprec, i, am) { + uuid = false; } // end of JDBCCOL constructor /***********************************************************************/ @@ -977,6 +982,7 @@ JDBCCOL::JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am) /***********************************************************************/ JDBCCOL::JDBCCOL(void) : EXTCOL() { + uuid = false; } // end of JDBCCOL constructor /***********************************************************************/ @@ -985,12 +991,11 @@ JDBCCOL::JDBCCOL(void) : EXTCOL() /***********************************************************************/ JDBCCOL::JDBCCOL(JDBCCOL *col1, PTDB tdbp) : EXTCOL(col1, tdbp) { + uuid = col1->uuid; } // end of JDBCCOL copy constructor /***********************************************************************/ -/* ReadColumn: when SQLFetch is used there is nothing to do as the */ -/* column buffer was bind to the record set. This is also the case */ -/* when calculating MaxSize (Bufp is NULL even when Rows is not). */ +/* ReadColumn: retrieve the column value via the JDBC driver. */ /***********************************************************************/ void JDBCCOL::ReadColumn(PGLOBAL g) { diff --git a/storage/connect/tabjdbc.h b/storage/connect/tabjdbc.h index d422ed26ef2..078129a14e3 100644 --- a/storage/connect/tabjdbc.h +++ b/storage/connect/tabjdbc.h @@ -101,6 +101,7 @@ protected: /***********************************************************************/ class JDBCCOL : public EXTCOL { friend class TDBJDBC; + friend class JDBConn; public: // Constructors JDBCCOL(PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i, PCSZ am = "JDBC"); @@ -119,6 +120,7 @@ protected: JDBCCOL(void); // Members + bool uuid; // For PostgreSQL }; // end of class JDBCCOL /***********************************************************************/ From 05261f97c8c85e8c2cf5102e70c17dcaed4bfde2 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Tue, 14 Jun 2016 08:14:37 -0700 Subject: [PATCH 017/139] Galera MTR Tests: Modify mysqltest so that if a --let = `SELECT ...` query is interrupted, the test does not fail but the error is communicated to caller --- client/mysqltest.cc | 9 +-------- mysql-test/include/wait_condition.inc | 2 +- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 844a2d7bbf8..2897555da3f 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -2678,7 +2678,7 @@ void var_query_set(VAR *var, const char *query, const char** query_end) init_dynamic_string(&ds_query, 0, (end - query) + 32, 256); do_eval(&ds_query, query, end, FALSE); - if (mysql_real_query(mysql, ds_query.str, ds_query.length)) + if (mysql_real_query(mysql, ds_query.str, ds_query.length) || !(res= mysql_store_result(mysql))) { handle_error(curr_command, mysql_errno(mysql), mysql_error(mysql), mysql_sqlstate(mysql), &ds_res); @@ -2688,13 +2688,6 @@ void var_query_set(VAR *var, const char *query, const char** query_end) DBUG_VOID_RETURN; } - if (!(res= mysql_store_result(mysql))) - { - report_or_die("Query '%s' didn't return a result set", ds_query.str); - dynstr_free(&ds_query); - eval_expr(var, "", 0); - DBUG_VOID_RETURN; - } dynstr_free(&ds_query); if ((row= mysql_fetch_row(res)) && row[0]) diff --git a/mysql-test/include/wait_condition.inc b/mysql-test/include/wait_condition.inc index 5fbde6950c8..d40b0e4d448 100644 --- a/mysql-test/include/wait_condition.inc +++ b/mysql-test/include/wait_condition.inc @@ -39,7 +39,7 @@ let $wait_timeout= 0; let $wait_condition_reps= 0; while ($wait_counter) { - --error 0,ER_NO_SUCH_TABLE,ER_LOCK_WAIT_TIMEOUT,ER_UNKNOWN_COM_ERROR + --error 0,ER_NO_SUCH_TABLE,ER_LOCK_WAIT_TIMEOUT,ER_UNKNOWN_COM_ERROR,ER_LOCK_DEADLOCK let $success= `$wait_condition`; inc $wait_condition_reps; if ($success) From f1de725fda1a7a6298ca8f33ca6909f815e61987 Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Mon, 12 Mar 2018 13:41:47 +0100 Subject: [PATCH 018/139] MDEV-14144 Re-enable MTR test galera.galera_as_slave --- mysql-test/suite/galera/disabled.def | 1 - 1 file changed, 1 deletion(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index d82d5dd2023..d77e91446fc 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -53,7 +53,6 @@ galera_as_master: MDEV-13549 Galera test failures 10.1 galera_pc_ignore_sb : MDEV-13549 Galera test failures 10.1 galera_lock_table : MDEV-13549 Galera test failures 10.1 MW-284 : MDEV-13549 Galera test failures 10.1 -galera_as_slave : MDEV-13549 Galera test failures 10.1 MW-328C : MDEV-13549 Galera test failures 10.1 MW-328A : MDEV-13549 Galera test failures 10.1 MW-328B : MDEV-13549 Galera test failures 10.1 From 12f9cf075f183c1c681b89f0f9b7f7c07d84dcbf Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Mon, 12 Mar 2018 16:48:42 +0400 Subject: [PATCH 019/139] Removed unused variables. Based on contribution by Daniel Black. --- storage/innobase/srv/srv0srv.cc | 10 ---------- storage/xtradb/srv/srv0srv.cc | 10 ---------- 2 files changed, 20 deletions(-) diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index cb003edfbdc..561df0bd060 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -421,16 +421,6 @@ static ulint srv_n_system_rows_read_old = 0; UNIV_INTERN ulint srv_truncated_status_writes = 0; UNIV_INTERN ulint srv_available_undo_logs = 0; -UNIV_INTERN ib_uint64_t srv_page_compression_saved = 0; -UNIV_INTERN ib_uint64_t srv_page_compression_trim_sect512 = 0; -UNIV_INTERN ib_uint64_t srv_page_compression_trim_sect4096 = 0; -UNIV_INTERN ib_uint64_t srv_index_pages_written = 0; -UNIV_INTERN ib_uint64_t srv_non_index_pages_written = 0; -UNIV_INTERN ib_uint64_t srv_pages_page_compressed = 0; -UNIV_INTERN ib_uint64_t srv_page_compressed_trim_op = 0; -UNIV_INTERN ib_uint64_t srv_page_compressed_trim_op_saved = 0; -UNIV_INTERN ib_uint64_t srv_index_page_decompressed = 0; - /* Defragmentation */ UNIV_INTERN my_bool srv_defragment = FALSE; UNIV_INTERN uint srv_defragment_n_pages = 7; diff --git a/storage/xtradb/srv/srv0srv.cc b/storage/xtradb/srv/srv0srv.cc index 02314db6b07..f431c47de4d 100644 --- a/storage/xtradb/srv/srv0srv.cc +++ b/storage/xtradb/srv/srv0srv.cc @@ -565,16 +565,6 @@ static ulint srv_n_system_rows_read_old = 0; UNIV_INTERN ulint srv_truncated_status_writes = 0; UNIV_INTERN ulint srv_available_undo_logs = 0; -UNIV_INTERN ib_uint64_t srv_page_compression_saved = 0; -UNIV_INTERN ib_uint64_t srv_page_compression_trim_sect512 = 0; -UNIV_INTERN ib_uint64_t srv_page_compression_trim_sect4096 = 0; -UNIV_INTERN ib_uint64_t srv_index_pages_written = 0; -UNIV_INTERN ib_uint64_t srv_non_index_pages_written = 0; -UNIV_INTERN ib_uint64_t srv_pages_page_compressed = 0; -UNIV_INTERN ib_uint64_t srv_page_compressed_trim_op = 0; -UNIV_INTERN ib_uint64_t srv_page_compressed_trim_op_saved = 0; -UNIV_INTERN ib_uint64_t srv_index_page_decompressed = 0; - /* Ensure status variables are on separate cache lines */ #ifdef __powerpc__ From 9005108234ca97ce9e86935fd79ea0b3fb97ec43 Mon Sep 17 00:00:00 2001 From: Andrei Elkin Date: Fri, 26 Jan 2018 23:26:39 +0200 Subject: [PATCH 020/139] MDEV-14721 Big transaction events get lost on semisync master when replicate_events_marked_for_skip=FILTER_ON_MASTER [Note this is a cherry-pick from 10.2 branch.] When events of a big transaction are binlogged offsetting over 2GB from the beginning of the log the semisync master's dump thread lost such events. The events were skipped by the Dump thread that found their skipping status erroneously. The current fixes make sure the skipping status is computed correctly. The test verifies them simulating the 2GB offset. --- .../rpl/r/rpl_semi_sync_skip_repl.result | 23 +++++++ .../suite/rpl/t/rpl_semi_sync_skip_repl.test | 62 +++++++++++++++++++ sql/log_event.cc | 2 + sql/sql_repl.cc | 7 +-- 4 files changed, 89 insertions(+), 5 deletions(-) create mode 100644 mysql-test/suite/rpl/r/rpl_semi_sync_skip_repl.result create mode 100644 mysql-test/suite/rpl/t/rpl_semi_sync_skip_repl.test diff --git a/mysql-test/suite/rpl/r/rpl_semi_sync_skip_repl.result b/mysql-test/suite/rpl/r/rpl_semi_sync_skip_repl.result new file mode 100644 index 00000000000..4762ac8dd07 --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_semi_sync_skip_repl.result @@ -0,0 +1,23 @@ +include/master-slave.inc +[connection master] +call mtr.add_suppression("Timeout waiting for reply of binlog"); +SET @@GLOBAL.rpl_semi_sync_master_enabled = 1; +SET @@GLOBAL.rpl_semi_sync_master_timeout=100; +include/stop_slave.inc +SET @@GLOBAL.replicate_events_marked_for_skip=FILTER_ON_MASTER; +SET @@GLOBAL. rpl_semi_sync_slave_enabled = 1; +include/start_slave.inc +CREATE TABLE t1 (a INT) ENGINE=innodb; +SET @@GLOBAL.debug_dbug= "d,dbug_master_binlog_over_2GB"; +SET @@SESSION.skip_replication=1; +INSERT INTO t1 SET a=1; +SET @@SESSION.skip_replication=0; +INSERT INTO t1 SET a=0; +SET @@GLOBAL.debug_dbug=""; +SET @@GLOBAL. rpl_semi_sync_master_timeout = 10000; +SET @@GLOBAL. rpl_semi_sync_master_enabled = 0; +DROP TABLE t1; +include/stop_slave.inc +SET @@GLOBAL. rpl_semi_sync_slave_enabled = 0; +SET @@GLOBAL.replicate_events_marked_for_skip = REPLICATE; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_semi_sync_skip_repl.test b/mysql-test/suite/rpl/t/rpl_semi_sync_skip_repl.test new file mode 100644 index 00000000000..2f6da18067c --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_semi_sync_skip_repl.test @@ -0,0 +1,62 @@ +# MDEV-14721 Big transaction events get lost on semisync master when +# replicate_events_marked_for_skip=FILTER_ON_MASTER +# +# When events of a big transaction are binlogged offsetting over 2GB from +# the beginning of the log the semisync master's dump thread +# lost such events. +# The test verifies the fixes' correctness simulating the 2GB offset. + +source include/have_semisync.inc; +source include/not_embedded.inc; +source include/have_innodb.inc; +source include/have_debug.inc; +source include/master-slave.inc; + +--connection master +# Suppress warnings that might be generated during the test +call mtr.add_suppression("Timeout waiting for reply of binlog"); + +--let $sav_enabled_master=`SELECT @@GLOBAL.rpl_semi_sync_master_enabled ` +--let $sav_timeout_master=`SELECT @@GLOBAL.rpl_semi_sync_master_timeout ` +SET @@GLOBAL.rpl_semi_sync_master_enabled = 1; +SET @@GLOBAL.rpl_semi_sync_master_timeout=100; + +--connection slave +source include/stop_slave.inc; +--let $sav_skip_marked_slave=`SELECT @@GLOBAL.replicate_events_marked_for_skip ` +SET @@GLOBAL.replicate_events_marked_for_skip=FILTER_ON_MASTER; +--let $sav_enabled_slave=`SELECT @@GLOBAL.rpl_semi_sync_slave_enabled ` +SET @@GLOBAL. rpl_semi_sync_slave_enabled = 1; + +source include/start_slave.inc; + +--connection master +CREATE TABLE t1 (a INT) ENGINE=innodb; + +# Make the following events as if they offset over 2GB from the beginning of binlog +SET @@GLOBAL.debug_dbug= "d,dbug_master_binlog_over_2GB"; +SET @@SESSION.skip_replication=1; +INSERT INTO t1 SET a=1; +SET @@SESSION.skip_replication=0; +INSERT INTO t1 SET a=0; + +--sync_slave_with_master + +# +# Clean up +# +--connection master +SET @@GLOBAL.debug_dbug=""; +--eval SET @@GLOBAL. rpl_semi_sync_master_timeout = $sav_timeout_master +--eval SET @@GLOBAL. rpl_semi_sync_master_enabled = $sav_enabled_master + +--connection master +DROP TABLE t1; + +--sync_slave_with_master +source include/stop_slave.inc; +--eval SET @@GLOBAL. rpl_semi_sync_slave_enabled = $sav_enabled_slave +--eval SET @@GLOBAL.replicate_events_marked_for_skip = $sav_skip_marked_slave + +--let $rpl_only_running_threads= 1 +--source include/rpl_end.inc diff --git a/sql/log_event.cc b/sql/log_event.cc index 81bc683625f..aadd0938093 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -1311,6 +1311,8 @@ bool Log_event::write_header(ulong event_data_length) */ log_pos= writer->pos() + data_written; + + DBUG_EXECUTE_IF("dbug_master_binlog_over_2GB", log_pos += (1ULL <<31);); } now= get_time(); // Query start time diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index b5cca334891..569c3d2c4ef 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -1904,11 +1904,8 @@ send_event_to_slave(binlog_send_info *info, Log_event_type event_type, */ if (info->thd->variables.option_bits & OPTION_SKIP_REPLICATION) { - /* - The first byte of the packet is a '\0' to distinguish it from an error - packet. So the actual event starts at offset +1. - */ - uint16 event_flags= uint2korr(&((*packet)[FLAGS_OFFSET+1])); + uint16 event_flags= uint2korr(&((*packet)[FLAGS_OFFSET + ev_offset])); + if (event_flags & LOG_EVENT_SKIP_REPLICATION_F) return NULL; } From 71f9cc1221fc9faed52b29f5f79a0d9918d7526c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 13 Mar 2018 09:35:37 +0200 Subject: [PATCH 021/139] MDEV-15554 InnoDB page_cleaner shutdown sometimes hangs buf_flush_page_cleaner_coordinator(): Signal the worker threads to exit while waiting for them to exit. Apparently, signals are sometimes lost, causing shutdown to occasionally hang when multiple page cleaners (and buffer pool instances) are used, that is, when innodb_buffer_pool_size is at least 1 GiB. buf_flush_page_cleaner_close(): Merge with the only caller. --- storage/innobase/buf/buf0flu.cc | 31 ++++++++++--------------------- 1 file changed, 10 insertions(+), 21 deletions(-) diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index ca647368908..24b27d7462c 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -2718,25 +2718,6 @@ buf_flush_page_cleaner_init(void) page_cleaner.is_running = true; } -/** -Close page_cleaner. */ -static -void -buf_flush_page_cleaner_close(void) -{ - ut_ad(!page_cleaner.is_running); - - /* waiting for all worker threads exit */ - while (page_cleaner.n_workers) { - os_thread_sleep(10000); - } - - mutex_destroy(&page_cleaner.mutex); - - os_event_destroy(page_cleaner.is_finished); - os_event_destroy(page_cleaner.is_requested); -} - /** Requests for all slots to flush all buffer pool instances. @param min_n wished minimum mumber of blocks flushed @@ -3438,9 +3419,17 @@ thread_exit: and no more access to page_cleaner structure by them. Wakes worker threads up just to make them exit. */ page_cleaner.is_running = false; - os_event_set(page_cleaner.is_requested); - buf_flush_page_cleaner_close(); + /* waiting for all worker threads exit */ + while (page_cleaner.n_workers) { + os_event_set(page_cleaner.is_requested); + os_thread_sleep(10000); + } + + mutex_destroy(&page_cleaner.mutex); + + os_event_destroy(page_cleaner.is_finished); + os_event_destroy(page_cleaner.is_requested); buf_page_cleaner_is_active = false; From 48c11d407b409a1291c84782518c69ee9138ce72 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Tue, 13 Mar 2018 12:42:41 +0400 Subject: [PATCH 022/139] MDEV-13202 Assertion `ltime->neg == 0' failed in date_to_datetime --- mysql-test/r/func_time.result | 24 ++++++++++++++++++++++++ mysql-test/t/func_time.test | 16 ++++++++++++++++ sql/sql_time.cc | 2 ++ 3 files changed, 42 insertions(+) diff --git a/mysql-test/r/func_time.result b/mysql-test/r/func_time.result index ab0576c69a7..7316724a16f 100644 --- a/mysql-test/r/func_time.result +++ b/mysql-test/r/func_time.result @@ -2878,5 +2878,29 @@ NULL Warnings: Warning 1441 Datetime function: datetime field overflow # +# MDEV-13202 Assertion `ltime->neg == 0' failed in date_to_datetime +# +CREATE TABLE t1 (i INT, d DATE); +INSERT INTO t1 VALUES (1, '1970-01-01'); +SELECT MAX(NULLIF(i,1)) FROM t1 ORDER BY DATE_SUB(d,INTERVAL 17300000 HOUR); +MAX(NULLIF(i,1)) +NULL +Warnings: +Warning 1441 Datetime function: datetime field overflow +DROP TABLE t1; +CREATE TABLE t1 (i INT, d DATE); +INSERT INTO t1 VALUES (1, '1970-01-01'); +SELECT CONCAT(DATE_SUB(d, INTERVAL 17300000 HOUR)) FROM t1; +CONCAT(DATE_SUB(d, INTERVAL 17300000 HOUR)) +NULL +Warnings: +Warning 1441 Datetime function: datetime field overflow +DROP TABLE t1; +SELECT CONCAT(DATE_SUB(TIMESTAMP'1970-01-01 00:00:00', INTERVAL 17300000 HOUR)); +CONCAT(DATE_SUB(TIMESTAMP'1970-01-01 00:00:00', INTERVAL 17300000 HOUR)) +NULL +Warnings: +Warning 1441 Datetime function: datetime field overflow +# # End of 10.0 tests # diff --git a/mysql-test/t/func_time.test b/mysql-test/t/func_time.test index 1f1f3a29574..1baded9fef7 100644 --- a/mysql-test/t/func_time.test +++ b/mysql-test/t/func_time.test @@ -1758,6 +1758,22 @@ SELECT ADDDATE(DATE'0000-01-01', INTERVAL '3652423:0:0:315569433559' DAY_SECOND) SELECT ADDDATE(DATE'0000-01-01', INTERVAL '0:87658175:0:315569433559' DAY_SECOND); SELECT ADDDATE(DATE'0000-01-01', INTERVAL '0:0:5259490559:315569433599' DAY_SECOND); +--echo # +--echo # MDEV-13202 Assertion `ltime->neg == 0' failed in date_to_datetime +--echo # + +CREATE TABLE t1 (i INT, d DATE); +INSERT INTO t1 VALUES (1, '1970-01-01'); +SELECT MAX(NULLIF(i,1)) FROM t1 ORDER BY DATE_SUB(d,INTERVAL 17300000 HOUR); +DROP TABLE t1; + +CREATE TABLE t1 (i INT, d DATE); +INSERT INTO t1 VALUES (1, '1970-01-01'); +SELECT CONCAT(DATE_SUB(d, INTERVAL 17300000 HOUR)) FROM t1; +DROP TABLE t1; + +SELECT CONCAT(DATE_SUB(TIMESTAMP'1970-01-01 00:00:00', INTERVAL 17300000 HOUR)); + --echo # --echo # End of 10.0 tests diff --git a/sql/sql_time.cc b/sql/sql_time.cc index 1bd68e89ecb..a618f751e65 100644 --- a/sql/sql_time.cc +++ b/sql/sql_time.cc @@ -960,6 +960,8 @@ bool date_add_interval(MYSQL_TIME *ltime, interval_type int_type, ltime->day= 0; return 0; } + else if (ltime->neg) + goto invalid_date; if (int_type != INTERVAL_DAY) ltime->time_type= MYSQL_TIMESTAMP_DATETIME; // Return full date From ff909acfa4e2ebad430db4af2bb86cc4ab6fb6c8 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Tue, 13 Mar 2018 15:19:30 +0530 Subject: [PATCH 023/139] MDEV-14545 Backup fails due to MLOG_INDEX_LOAD record Problem: ======= Mariabackup exits during prepare phase if it encounters MLOG_INDEX_LOAD redo log record. MLOG_INDEX_LOAD record informs Mariabackup that the backup cannot be completed based on the redo log scan, because some information is purposely omitted due to bulk index creation in ALTER TABLE. Solution: ======== Detect the MLOG_INDEX_LOAD redo record during backup phase and exit the mariabackup with the proper error message. --- extra/mariabackup/xtrabackup.cc | 79 +++++++++++++--- .../suite/mariabackup/unsupported_redo.result | 46 ++++++++++ .../suite/mariabackup/unsupported_redo.test | 92 +++++++++++++++++++ storage/innobase/include/log0recv.h | 35 +++++++ storage/innobase/log/log0recv.cc | 66 ++++++------- 5 files changed, 268 insertions(+), 50 deletions(-) create mode 100644 mysql-test/suite/mariabackup/unsupported_redo.result create mode 100644 mysql-test/suite/mariabackup/unsupported_redo.test diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index acc66238838..78003cd64af 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -449,6 +449,43 @@ void mdl_lock_all() } datafiles_iter_free(it); } + +/** Check if the space id belongs to the table which name should +be skipped based on the --tables, --tables-file and --table-exclude +options. +@param[in] space_id space id to check +@return true if the space id belongs to skip table/database list. */ +static bool backup_includes(space_id_t space_id) +{ + datafiles_iter_t *it = datafiles_iter_new(fil_system); + if (!it) + return true; + + while (fil_node_t *node = datafiles_iter_next(it)){ + if (space_id == 0 + || (node->space->id == space_id + && !check_if_skip_table(node->space->name))) { + + msg("mariabackup: Unsupported redo log detected " + "and it belongs to %s\n", + space_id ? node->name: "the InnoDB system tablespace"); + + msg("mariabackup: ALTER TABLE or OPTIMIZE TABLE " + "was being executed during the backup.\n"); + + if (!opt_lock_ddl_per_table) { + msg("mariabackup: Use --lock-ddl-per-table " + "parameter to lock all the table before " + "backup operation.\n"); + } + + return false; + } + } + + return true; +} + /* ======== Date copying thread context ======== */ typedef struct { @@ -2341,8 +2378,8 @@ lsn_t xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn) { lsn_t scanned_lsn = start_lsn; - const byte* log_block = log_sys->buf; + bool more_data = false; for (ulint scanned_checkpoint = 0; scanned_lsn < end_lsn; @@ -2357,8 +2394,15 @@ xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn) } scanned_checkpoint = checkpoint; + ulint data_len = log_block_get_data_len(log_block); + more_data = recv_sys_add_to_parsing_buf( + log_block, + scanned_lsn + data_len); + + recv_sys->scanned_lsn = scanned_lsn + data_len; + if (data_len == OS_FILE_LOG_BLOCK_SIZE) { /* We got a full log block. */ scanned_lsn += data_len; @@ -2374,6 +2418,15 @@ xtrabackup_copy_log(copy_logfile copy, lsn_t start_lsn, lsn_t end_lsn) } } + if (more_data && recv_parse_log_recs(0, STORE_NO, false)) { + + msg("mariabackup: copying the log failed \n"); + + return(0); + } + + recv_sys_justify_left_parsing_buf(); + log_sys->log.scanned_lsn = scanned_lsn; end_lsn = copy == COPY_LAST @@ -2407,9 +2460,12 @@ xtrabackup_copy_logfile(copy_logfile copy) lsn_t start_lsn; lsn_t end_lsn; + recv_sys->parse_start_lsn = log_copy_scanned_lsn; + recv_sys->scanned_lsn = log_copy_scanned_lsn; + recv_sys->recovered_lsn = log_copy_scanned_lsn; + start_lsn = ut_uint64_align_down(log_copy_scanned_lsn, OS_FILE_LOG_BLOCK_SIZE); - /* When copying the first or last part of the log, retry a few times to ensure that all log up to the last checkpoint will be read. */ @@ -3569,8 +3625,6 @@ xtrabackup_backup_func() "or RENAME TABLE during the backup, inconsistent backup will be " "produced.\n"); - - /* initialize components */ if(innodb_init_param()) { fail: @@ -3842,6 +3896,14 @@ reread_log_header: &io_watching_thread_id); } + /* Populate fil_system with tablespaces to copy */ + err = xb_load_tablespaces(); + if (err != DB_SUCCESS) { + msg("mariabackup: error: xb_load_tablespaces() failed with" + " error %s.\n", ut_strerr(err)); + goto fail; + } + /* copy log file by current position */ log_copy_scanned_lsn = checkpoint_lsn_start; if (xtrabackup_copy_logfile(COPY_FIRST)) @@ -3851,14 +3913,6 @@ reread_log_header: log_copying_running = true; os_thread_create(log_copying_thread, NULL, &log_copying_thread_id); - /* Populate fil_system with tablespaces to copy */ - err = xb_load_tablespaces(); - if (err != DB_SUCCESS) { - msg("mariabackup: error: xb_load_tablespaces() failed with" - " error code %u\n", err); - goto fail; - } - /* FLUSH CHANGED_PAGE_BITMAPS call */ if (!flush_changed_page_bitmaps()) { goto fail; @@ -5072,6 +5126,7 @@ handle_options(int argc, char **argv, char ***argv_client, char ***argv_server) srv_operation = SRV_OPERATION_RESTORE; files_charset_info = &my_charset_utf8_general_ci; + check_if_backup_includes = backup_includes; setup_error_messages(); sys_var_init(); diff --git a/mysql-test/suite/mariabackup/unsupported_redo.result b/mysql-test/suite/mariabackup/unsupported_redo.result new file mode 100644 index 00000000000..be563682697 --- /dev/null +++ b/mysql-test/suite/mariabackup/unsupported_redo.result @@ -0,0 +1,46 @@ +call mtr.add_suppression("InnoDB: New log files created"); +call mtr.add_suppression("InnoDB: Operating system error number .* in a file operation"); +call mtr.add_suppression("InnoDB: The error means the system cannot find the path specified"); +call mtr.add_suppression("InnoDB: If you are installing InnoDB, remember that you must create directories yourself, InnoDB does not create them"); +call mtr.add_suppression("InnoDB: Ignoring tablespace for `test`\\.`t21` because it could not be opened"); +call mtr.add_suppression("InnoDB: Cannot open datafile for read-only: "); +call mtr.add_suppression("Table .* in the InnoDB data dictionary has tablespace id .*, but tablespace with that id or name does not exist"); +CREATE TABLE t1(i INT PRIMARY KEY auto_increment, a int) ENGINE INNODB; +alter table t1 FORCE, algorithm=inplace; +# Fails during full backup +DROP TABLE t1; +CREATE TABLE t1(i INT PRIMARY KEY auto_increment, a int) ENGINE INNODB; +INSERT INTO t1(a) select 1 union select 2 union select 3; +# Create full backup , modify table, then fails during creation of +# incremental/differential backup +alter table t1 force, algorithm=inplace; +drop table t1; +CREATE TABLE t1(i INT) ENGINE INNODB; +INSERT INTO t1 VALUES(1); +CREATE TABLE t21(i INT) ENGINE INNODB; +INSERT INTO t21 VALUES(1); +CREATE TABLE t2(i int) ENGINE INNODB; +ALTER TABLE t21 FORCE, ALGORITHM=INPLACE; +# Create partial backup (excluding table t21), Ignore the +# unsupported redo log for the table t21. +t1.frm +t1.ibd +t2.frm +t2.ibd +# After partial restore operation, t21 files will be missing but t21 +# table information will be present in dictionary. It will +# restrict creating the table t21 in the future test. To avoid +# that, take the copy of t21 files and drop the table later. +# Prepare the full backup +# shutdown server +# remove datadir +# xtrabackup move back +# restart server +SHOW TABLES; +Tables_in_test +t1 +t2 +DROP TABLE t1; +DROP TABLE t2; +# Move the t21 files into data directory +DROP TABLE t21; diff --git a/mysql-test/suite/mariabackup/unsupported_redo.test b/mysql-test/suite/mariabackup/unsupported_redo.test new file mode 100644 index 00000000000..0e3cd9f6703 --- /dev/null +++ b/mysql-test/suite/mariabackup/unsupported_redo.test @@ -0,0 +1,92 @@ +--source include/have_innodb.inc +call mtr.add_suppression("InnoDB: New log files created"); +call mtr.add_suppression("InnoDB: Operating system error number .* in a file operation"); +call mtr.add_suppression("InnoDB: The error means the system cannot find the path specified"); +call mtr.add_suppression("InnoDB: If you are installing InnoDB, remember that you must create directories yourself, InnoDB does not create them"); +call mtr.add_suppression("InnoDB: Ignoring tablespace for `test`\\.`t21` because it could not be opened"); +call mtr.add_suppression("InnoDB: Cannot open datafile for read-only: "); +call mtr.add_suppression("Table .* in the InnoDB data dictionary has tablespace id .*, but tablespace with that id or name does not exist"); + +let $basedir=$MYSQLTEST_VARDIR/tmp/backup; +let $incremental_dir=$MYSQLTEST_VARDIR/tmp/backup_inc1; + +CREATE TABLE t1(i INT PRIMARY KEY auto_increment, a int) ENGINE INNODB; +alter table t1 FORCE, algorithm=inplace; + +# Below mariabackup operation may complete successfully if checkpoint happens +# after the alter table command. + +echo # Fails during full backup; +--disable_result_log +--error 1 +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir; +--enable_result_log + +DROP TABLE t1; +rmdir $basedir; + +CREATE TABLE t1(i INT PRIMARY KEY auto_increment, a int) ENGINE INNODB; + +INSERT INTO t1(a) select 1 union select 2 union select 3; + +--echo # Create full backup , modify table, then fails during creation of +--echo # incremental/differential backup +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir; +--enable_result_log + +alter table t1 force, algorithm=inplace; + +--disable_result_log +--error 1 +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$incremental_dir --incremental-basedir=$basedir; +--enable_result_log + +drop table t1; +rmdir $basedir; +rmdir $incremental_dir; + +CREATE TABLE t1(i INT) ENGINE INNODB; +INSERT INTO t1 VALUES(1); +CREATE TABLE t21(i INT) ENGINE INNODB; +INSERT INTO t21 VALUES(1); + +let $MYSQLD_DATADIR= `select @@datadir`; +let $targetdir=$MYSQLTEST_VARDIR/tmp/bk; +let old_datadir=$MYSQLTEST_VARDIR/tmp/old_data; +--mkdir $old_datadir + +CREATE TABLE t2(i int) ENGINE INNODB; +ALTER TABLE t21 FORCE, ALGORITHM=INPLACE; + +--echo # Create partial backup (excluding table t21), Ignore the +--echo # unsupported redo log for the table t21. + +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup "--tables-exclude=test.t21" --target-dir=$targetdir; +--enable_result_log +--list_files $targetdir/test + +--echo # After partial restore operation, t21 files will be missing but t21 +--echo # table information will be present in dictionary. It will +--echo # restrict creating the table t21 in the future test. To avoid +--echo # that, take the copy of t21 files and drop the table later. +--copy_file $MYSQLD_DATADIR/test/t21.frm $old_datadir/t21.frm + +--echo # Prepare the full backup +--disable_result_log +exec $XTRABACKUP --prepare --target-dir=$targetdir; +--source include/restart_and_restore.inc +--enable_result_log + +SHOW TABLES; + +DROP TABLE t1; +DROP TABLE t2; + +--echo # Move the t21 files into data directory +--copy_file $old_datadir/t21.frm $MYSQLD_DATADIR/test/t21.frm + +DROP TABLE t21; +rmdir $targetdir; +rmdir $old_datadir; diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h index 29784a59d95..065326ead88 100644 --- a/storage/innobase/include/log0recv.h +++ b/storage/innobase/include/log0recv.h @@ -122,6 +122,41 @@ recv_sys_var_init(void); void recv_apply_hashed_log_recs(bool last_batch); +/** Whether to store redo log records to the hash table */ +enum store_t { + /** Do not store redo log records. */ + STORE_NO, + /** Store redo log records. */ + STORE_YES, + /** Store redo log records if the tablespace exists. */ + STORE_IF_EXISTS +}; + + +/** Adds data from a new log block to the parsing buffer of recv_sys if +recv_sys->parse_start_lsn is non-zero. +@param[in] log_block log block to add +@param[in] scanned_lsn lsn of how far we were able to find + data in this log block +@return true if more data added */ +bool recv_sys_add_to_parsing_buf(const byte* log_block, lsn_t scanned_lsn); + +/** Parse log records from a buffer and optionally store them to a +hash table to wait merging to file pages. +@param[in] checkpoint_lsn the LSN of the latest checkpoint +@param[in] store whether to store page operations +@param[in] apply whether to apply the records +@return whether MLOG_CHECKPOINT record was seen the first time, +or corruption was noticed */ +bool recv_parse_log_recs(lsn_t checkpoint_lsn, store_t store, bool apply); + +/** Moves the parsing buffer data left to the buffer start. */ +void recv_sys_justify_left_parsing_buf(); + +/** Backup function checks whether the space id belongs to +the skip table list given in the mariabackup option. */ +extern bool(*check_if_backup_includes)(ulint space_id); + /** Block of log record data */ struct recv_data_t{ recv_data_t* next; /*!< pointer to the next block or NULL */ diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index e55c01aca3a..b41ea4dabee 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -158,6 +158,10 @@ typedef std::map< static recv_spaces_t recv_spaces; +/** Backup function checks whether the space id belongs to +the skip table list given in the mariabackup option. */ +bool(*check_if_backup_includes)(ulint space_id); + /** Process a file name from a MLOG_FILE_* record. @param[in,out] name file name @param[in] len length of the file name @@ -173,6 +177,14 @@ fil_name_process( ulint space_id, bool deleted) { + if (srv_operation == SRV_OPERATION_BACKUP) { + return true; + } + + ut_ad(srv_operation == SRV_OPERATION_NORMAL + || srv_operation == SRV_OPERATION_RESTORE + || srv_operation == SRV_OPERATION_RESTORE_EXPORT); + bool processed = true; /* We will also insert space=NULL into the map, so that @@ -557,7 +569,6 @@ recv_sys_init() recv_sys->addr_hash = hash_create(size / 512); recv_sys->progress_time = ut_time(); - recv_max_page_lsn = 0; /* Call the constructor for recv_sys_t::dblwr member */ @@ -2297,30 +2308,14 @@ recv_report_corrupt_log( return(true); } -/** Whether to store redo log records to the hash table */ -enum store_t { - /** Do not store redo log records. */ - STORE_NO, - /** Store redo log records. */ - STORE_YES, - /** Store redo log records if the tablespace exists. */ - STORE_IF_EXISTS -}; - /** Parse log records from a buffer and optionally store them to a hash table to wait merging to file pages. @param[in] checkpoint_lsn the LSN of the latest checkpoint @param[in] store whether to store page operations @param[in] apply whether to apply the records -@param[out] err DB_SUCCESS or error code @return whether MLOG_CHECKPOINT record was seen the first time, or corruption was noticed */ -static MY_ATTRIBUTE((warn_unused_result)) -bool -recv_parse_log_recs( - lsn_t checkpoint_lsn, - store_t store, - bool apply) +bool recv_parse_log_recs(lsn_t checkpoint_lsn, store_t store, bool apply) { byte* ptr; byte* end_ptr; @@ -2460,11 +2455,14 @@ loop: } /* fall through */ case MLOG_INDEX_LOAD: - /* Mariabackup FIXME: Report an error - when encountering MLOG_INDEX_LOAD on - --prepare or already on --backup. */ - ut_a(type != MLOG_INDEX_LOAD - || srv_operation == SRV_OPERATION_NORMAL); + if (type == MLOG_INDEX_LOAD) { + if (check_if_backup_includes + && !check_if_backup_includes(space)) { + ut_ad(srv_operation + == SRV_OPERATION_BACKUP); + return true; + } + } /* fall through */ case MLOG_FILE_NAME: case MLOG_FILE_DELETE: @@ -2652,17 +2650,13 @@ loop: goto loop; } -/*******************************************************//** -Adds data from a new log block to the parsing buffer of recv_sys if +/** Adds data from a new log block to the parsing buffer of recv_sys if recv_sys->parse_start_lsn is non-zero. +@param[in] log_block log block to add +@param[in] scanned_lsn lsn of how far we were able to find + data in this log block @return true if more data added */ -static -bool -recv_sys_add_to_parsing_buf( -/*========================*/ - const byte* log_block, /*!< in: log block */ - lsn_t scanned_lsn) /*!< in: lsn of how far we were able - to find data in this log block */ +bool recv_sys_add_to_parsing_buf(const byte* log_block, lsn_t scanned_lsn) { ulint more_len; ulint data_len; @@ -2727,12 +2721,8 @@ recv_sys_add_to_parsing_buf( return(true); } -/*******************************************************//** -Moves the parsing buffer data left to the buffer start. */ -static -void -recv_sys_justify_left_parsing_buf(void) -/*===================================*/ +/** Moves the parsing buffer data left to the buffer start. */ +void recv_sys_justify_left_parsing_buf() { ut_memmove(recv_sys->buf, recv_sys->buf + recv_sys->recovered_offset, recv_sys->len - recv_sys->recovered_offset); From 76ae6e725d647fae8a00e0cac71cdc73f66e41d0 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Tue, 13 Mar 2018 15:20:00 +0530 Subject: [PATCH 024/139] MDEV-15384 buf_flush_LRU_list_batch() always reports n->flushed=0, n->evicted=0 - buf_flush_LRU_list_batch() initializes the count to zero and updates them correctly. --- .../suite/innodb/r/purge_secondary.result | 16 ++++++++++++++ mysql-test/suite/innodb/t/purge_secondary.opt | 2 ++ .../suite/innodb/t/purge_secondary.test | 13 +++++++++++ storage/innobase/buf/buf0flu.cc | 22 +++++++++---------- 4 files changed, 41 insertions(+), 12 deletions(-) diff --git a/mysql-test/suite/innodb/r/purge_secondary.result b/mysql-test/suite/innodb/r/purge_secondary.result index 2312434a2bd..eff5e71c9cc 100644 --- a/mysql-test/suite/innodb/r/purge_secondary.result +++ b/mysql-test/suite/innodb/r/purge_secondary.result @@ -123,6 +123,10 @@ LENGTH(l) 11197 INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; CHECK TABLE t1; Table Op Msg_type Msg_text test.t1 check status OK @@ -139,6 +143,18 @@ SELECT OTHER_INDEX_SIZE FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS WHERE NAME='test/t1'; OTHER_INDEX_SIZE 1 +SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS +WHERE NAME="buffer_LRU_batch_evict_total_pages" AND COUNT > 0; +NAME SUBSYSTEM +buffer_LRU_batch_evict_total_pages buffer +SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS +WHERE NAME="buffer_LRU_batch_flush_total_pages" AND COUNT > 0; +NAME SUBSYSTEM +buffer_LRU_batch_flush_total_pages buffer +SELECT (variable_value > 0) FROM information_schema.global_status +WHERE LOWER(variable_name) LIKE 'INNODB_BUFFER_POOL_PAGES_FLUSHED'; +(variable_value > 0) +1 # Note: The OTHER_INDEX_SIZE does not cover any SPATIAL INDEX. # To test that all indexes were emptied, replace DROP TABLE # with the following, and examine the root pages in t1.ibd: diff --git a/mysql-test/suite/innodb/t/purge_secondary.opt b/mysql-test/suite/innodb/t/purge_secondary.opt index 99bf0e5a28b..e98e639d5f4 100644 --- a/mysql-test/suite/innodb/t/purge_secondary.opt +++ b/mysql-test/suite/innodb/t/purge_secondary.opt @@ -1 +1,3 @@ --innodb-sys-tablestats +--innodb_buffer_pool_size=5M +--innodb_monitor_enable=module_buffer diff --git a/mysql-test/suite/innodb/t/purge_secondary.test b/mysql-test/suite/innodb/t/purge_secondary.test index 47cfaec41ca..e447e63389e 100644 --- a/mysql-test/suite/innodb/t/purge_secondary.test +++ b/mysql-test/suite/innodb/t/purge_secondary.test @@ -109,6 +109,10 @@ INSERT INTO t1 () VALUES (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); SELECT LENGTH(l) FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; CHECK TABLE t1; UPDATE t1 SET c=true, l=ST_linefromtext('linestring(0 0,1 1,2 2)'); DELETE FROM t1; @@ -120,6 +124,15 @@ ANALYZE TABLE t1; SELECT OTHER_INDEX_SIZE FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS WHERE NAME='test/t1'; +SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS +WHERE NAME="buffer_LRU_batch_evict_total_pages" AND COUNT > 0; + +SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS +WHERE NAME="buffer_LRU_batch_flush_total_pages" AND COUNT > 0; + +SELECT (variable_value > 0) FROM information_schema.global_status +WHERE LOWER(variable_name) LIKE 'INNODB_BUFFER_POOL_PAGES_FLUSHED'; + --echo # Note: The OTHER_INDEX_SIZE does not cover any SPATIAL INDEX. --echo # To test that all indexes were emptied, replace DROP TABLE --echo # with the following, and examine the root pages in t1.ibd: diff --git a/storage/innobase/buf/buf0flu.cc b/storage/innobase/buf/buf0flu.cc index 24b27d7462c..aa982ba0bdc 100644 --- a/storage/innobase/buf/buf0flu.cc +++ b/storage/innobase/buf/buf0flu.cc @@ -1631,8 +1631,6 @@ buf_flush_LRU_list_batch( { buf_page_t* bpage; ulint scanned = 0; - ulint evict_count = 0; - ulint count = 0; ulint free_len = UT_LIST_GET_LEN(buf_pool->free); ulint lru_len = UT_LIST_GET_LEN(buf_pool->LRU); ulint withdraw_depth = 0; @@ -1648,7 +1646,7 @@ buf_flush_LRU_list_batch( } for (bpage = UT_LIST_GET_LAST(buf_pool->LRU); - bpage != NULL && count + evict_count < max + bpage != NULL && n->flushed + n->evicted < max && free_len < srv_LRU_scan_depth + withdraw_depth && lru_len > BUF_LRU_MIN_LEN; ++scanned, @@ -1666,7 +1664,7 @@ buf_flush_LRU_list_batch( clean and is not IO-fixed or buffer fixed. */ mutex_exit(block_mutex); if (buf_LRU_free_page(bpage, true)) { - ++evict_count; + ++n->evicted; } } else if (buf_flush_ready_for_flush(bpage, BUF_FLUSH_LRU)) { /* Block is ready for flush. Dispatch an IO @@ -1674,7 +1672,7 @@ buf_flush_LRU_list_batch( free list in IO completion routine. */ mutex_exit(block_mutex); buf_flush_page_and_try_neighbors( - bpage, BUF_FLUSH_LRU, max, &count); + bpage, BUF_FLUSH_LRU, max, &n->flushed); } else { /* Can't evict or dispatch this block. Go to previous. */ @@ -1698,12 +1696,12 @@ buf_flush_LRU_list_batch( ut_ad(buf_pool_mutex_own(buf_pool)); - if (evict_count) { + if (n->evicted) { MONITOR_INC_VALUE_CUMULATIVE( MONITOR_LRU_BATCH_EVICT_TOTAL_PAGE, MONITOR_LRU_BATCH_EVICT_COUNT, MONITOR_LRU_BATCH_EVICT_PAGES, - evict_count); + n->evicted); } if (scanned) { @@ -2160,16 +2158,16 @@ buf_flush_lists( failure. */ success = false; - continue; } + + n_flushed += n.flushed; } if (n_flushed) { buf_flush_stats(n_flushed, 0); - } - - if (n_processed) { - *n_processed = n_flushed; + if (n_processed) { + *n_processed = n_flushed; + } } return(success); From 9ee39d2b9b915a837fae966a95a565f3a0cc018b Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Fri, 9 Mar 2018 10:20:19 +0100 Subject: [PATCH 025/139] MDEV-13549 Fix for test galera.galera_var_max_ws_rows This patch re-enables test galera.galera_var_max_ws_rows. The test did not work because there were two distinct places where the server was incrementing member THD::wsrep_affected_rows before enforcing wsrep_max_ws_rows. Essentially, the test would fail because every inserted row was counted twice. The patch removes the extra code. --- mysql-test/suite/galera/disabled.def | 1 - sql/handler.cc | 33 ---------------------------- 2 files changed, 34 deletions(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index d82d5dd2023..5abd32e5f0a 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -47,7 +47,6 @@ lp1376747 : MDEV-13549 Galera test failures galera_toi_ddl_nonconflicting : MDEV-13549 Galera test failures galera_parallel_simple : MDEV-13549 Galera test failures galera_admin : MDEV-13549 Galera test failures -galera_var_max_ws_rows : MDEV-13549 Galera test failures 10.1 MW-286 : MDEV-13549 Galera test failures 10.1 galera_as_master: MDEV-13549 Galera test failures 10.1 galera_pc_ignore_sb : MDEV-13549 Galera test failures 10.1 diff --git a/sql/handler.cc b/sql/handler.cc index b2a00e48d65..a570638fc1a 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -5769,8 +5769,6 @@ static int write_locked_table_maps(THD *thd) typedef bool Log_func(THD*, TABLE*, bool, const uchar*, const uchar*); -static int check_wsrep_max_ws_rows(); - static int binlog_log_row(TABLE* table, const uchar *before_record, const uchar *after_record, @@ -5824,13 +5822,6 @@ static int binlog_log_row(TABLE* table, bool const has_trans= thd->lex->sql_command == SQLCOM_CREATE_TABLE || table->file->has_transactions(); error= (*log_func)(thd, table, has_trans, before_record, after_record); - - /* - Now that the record has been logged, increment wsrep_affected_rows and - also check whether its within the allowable limits (wsrep_max_ws_rows). - */ - if (error == 0) - error= check_wsrep_max_ws_rows(); } } return error ? HA_ERR_RBR_LOGGING_FAILED : 0; @@ -5941,30 +5932,6 @@ int handler::ha_reset() } -static int check_wsrep_max_ws_rows() -{ -#ifdef WITH_WSREP - if (wsrep_max_ws_rows) - { - THD *thd= current_thd; - - if (!WSREP(thd)) - return 0; - - thd->wsrep_affected_rows++; - if (thd->wsrep_exec_mode != REPL_RECV && - thd->wsrep_affected_rows > wsrep_max_ws_rows) - { - trans_rollback_stmt(thd) || trans_rollback(thd); - my_message(ER_ERROR_DURING_COMMIT, "wsrep_max_ws_rows exceeded", MYF(0)); - return ER_ERROR_DURING_COMMIT; - } - } -#endif /* WITH_WSREP */ - return 0; -} - - int handler::ha_write_row(uchar *buf) { int error; From 30019a48bfc908d8842b4cd03aad4c80dc5c7134 Mon Sep 17 00:00:00 2001 From: Andrei Elkin Date: Fri, 9 Feb 2018 15:00:23 +0200 Subject: [PATCH 026/139] MDEV-12746 rpl.rpl_parallel_optimistic_nobinlog fails committing out of order at retry The test failures were of two sorts. One is that the number of retries what the slave thought as a temporary error exceeded the default value of the slave retry option. The 2nd issue was an out of order commit by transactions that were supposed to error out instead. Both issues are caused by the same reason that the post-temporary-error retry did not check possibly already existing error status. This is mended with refining conditions to retry. Specifically, a retrying worker checks `rpl_parallel_entry::stop_on_error_sub_id` that a potential failing predecessor could set to its own sub id. Now should the member be set the retrying follower errors out with ER_PRIOR_COMMIT_FAILED. --- .../suite/rpl/r/rpl_parallel_retry.result | 57 ++++++++++ .../suite/rpl/t/rpl_parallel_retry.test | 101 +++++++++++++++++- sql/rpl_parallel.cc | 39 ++++++- 3 files changed, 194 insertions(+), 3 deletions(-) diff --git a/mysql-test/suite/rpl/r/rpl_parallel_retry.result b/mysql-test/suite/rpl/r/rpl_parallel_retry.result index c4c56489aa4..66428c94086 100644 --- a/mysql-test/suite/rpl/r/rpl_parallel_retry.result +++ b/mysql-test/suite/rpl/r/rpl_parallel_retry.result @@ -120,6 +120,7 @@ connection server_2; SET sql_log_bin=0; CALL mtr.add_suppression("Slave worker thread retried transaction 10 time\\(s\\) in vain, giving up"); CALL mtr.add_suppression("Slave: Deadlock found when trying to get lock; try restarting transaction"); +CALL mtr.add_suppression("Slave worker thread retried transaction .* in vain, giving up"); SET sql_log_bin=1; SET @old_dbug= @@GLOBAL.debug_dbug; SET GLOBAL debug_dbug="+d,rpl_parallel_simulate_temp_err_gtid_0_x_100,rpl_parallel_simulate_infinite_temp_err_gtid_0_x_100"; @@ -340,4 +341,60 @@ include/start_slave.inc connection server_1; DROP TABLE t1, t2, t3, t4; DROP function foo; +connection server_2; +connection server_1; +CREATE TABLE t1 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; +connection server_2; +include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @@GLOBAL.slave_parallel_threads=5; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET @@GLOBAL.slave_parallel_mode='aggressive'; +SET @old_lock_wait_timeout=@@GLOBAL.innodb_lock_wait_timeout; +SET @@GLOBAL.innodb_lock_wait_timeout=2; +SET @old_slave_transaction_retries=@@GLOBAL.slave_transaction_retries; +SET @@GLOBAL.slave_transaction_retries=1; +# Spoilers on the slave side causing temporary errors +connect spoiler_21,127.0.0.1,root,,test,$SLAVE_MYPORT; +BEGIN; +INSERT INTO t1 SET a=1,b=2; +connect spoiler_22,127.0.0.1,root,,test,$SLAVE_MYPORT; +BEGIN; +INSERT INTO t1 SET a=2,b=2; +# Master payload +connection server_1; +SET @@SESSION.GTID_SEQ_NO=1000; +INSERT INTO t1 SET a=1,b=1; +SET @@SESSION.GTID_SEQ_NO=1001; +INSERT INTO t1 SET a=2,b=1; +# Start slave whose both appliers is destined to being blocked +connection server_2; +SET @old_dbug= @@GLOBAL.debug_dbug; +SET @@GLOBAL.debug_dbug="+d,rpl_parallel_simulate_wait_at_retry"; +include/start_slave.inc +# Make sure the 2nd seqno_1001 worker has gotten to waiting +# Signal to the 1st to proceed after it has reached termination state +SET @@DEBUG_SYNC='now SIGNAL proceed_by_1000'; +connection spoiler_21; +ROLLBACK; +# Release the 2nd worker to proceed +connection spoiler_22; +ROLLBACK; +connection server_2; +SET @@DEBUG_SYNC='now SIGNAL proceed_by_1001'; +# observe how it all ends +# Wait for the workers to go home and check the result of applying +# which is OK +connection server_2; +include/stop_slave.inc +SET @@GLOBAL.slave_parallel_threads=@old_parallel_threads; +SET @@GLOBAL.slave_parallel_mode=@old_parallel_mode; +SET @@GLOBAL.innodb_lock_wait_timeout=@old_lock_wait_timeout; +SET @@GLOBAL.slave_transaction_retries=@old_slave_transaction_retries; +SET @@GLOBAL.debug_dbug=@old_dbug; +SET debug_sync='RESET'; +include/start_slave.inc +connection server_1; +DROP TABLE t1; +connection server_2; include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_parallel_retry.test b/mysql-test/suite/rpl/t/rpl_parallel_retry.test index b3a8ea45cf0..96863f9021d 100644 --- a/mysql-test/suite/rpl/t/rpl_parallel_retry.test +++ b/mysql-test/suite/rpl/t/rpl_parallel_retry.test @@ -128,6 +128,7 @@ SELECT * FROM t1 ORDER BY a; SET sql_log_bin=0; CALL mtr.add_suppression("Slave worker thread retried transaction 10 time\\(s\\) in vain, giving up"); CALL mtr.add_suppression("Slave: Deadlock found when trying to get lock; try restarting transaction"); +CALL mtr.add_suppression("Slave worker thread retried transaction .* in vain, giving up"); SET sql_log_bin=1; SET @old_dbug= @@GLOBAL.debug_dbug; @@ -371,7 +372,7 @@ SELECT * FROM t3 ORDER BY a; SET binlog_format=@old_format; -# Clean up. +# Clean up of the above part. --connection server_2 --source include/stop_slave.inc SET GLOBAL slave_parallel_threads=@old_parallel_threads; @@ -381,4 +382,102 @@ SET GLOBAL slave_parallel_threads=@old_parallel_threads; DROP TABLE t1, t2, t3, t4; DROP function foo; +--sync_slave_with_master server_2 + +# +# MDEV-12746 rpl.rpl_parallel_optimistic_nobinlog fails committing out of order at retry +# + +--connection server_1 +CREATE TABLE t1 (a int PRIMARY KEY, b INT) ENGINE=InnoDB; + + +# Replicate create-t1 and prepare to re-start slave in optimistic mode +--sync_slave_with_master server_2 +--source include/stop_slave.inc +SET @old_parallel_threads=@@GLOBAL.slave_parallel_threads; +SET @@GLOBAL.slave_parallel_threads=5; +SET @old_parallel_mode=@@GLOBAL.slave_parallel_mode; +SET @@GLOBAL.slave_parallel_mode='aggressive'; +SET @old_lock_wait_timeout=@@GLOBAL.innodb_lock_wait_timeout; +SET @@GLOBAL.innodb_lock_wait_timeout=2; +SET @old_slave_transaction_retries=@@GLOBAL.slave_transaction_retries; +SET @@GLOBAL.slave_transaction_retries=1; + +--echo # Spoilers on the slave side causing temporary errors +--connect (spoiler_21,127.0.0.1,root,,test,$SLAVE_MYPORT) +BEGIN; + INSERT INTO t1 SET a=1,b=2; + +--connect (spoiler_22,127.0.0.1,root,,test,$SLAVE_MYPORT) +BEGIN; + INSERT INTO t1 SET a=2,b=2; + +--echo # Master payload +--connection server_1 +SET @@SESSION.GTID_SEQ_NO=1000; +INSERT INTO t1 SET a=1,b=1; +SET @@SESSION.GTID_SEQ_NO=1001; +INSERT INTO t1 SET a=2,b=1; + +--echo # Start slave whose both appliers is destined to being blocked +--connection server_2 +SET @old_dbug= @@GLOBAL.debug_dbug; +SET @@GLOBAL.debug_dbug="+d,rpl_parallel_simulate_wait_at_retry"; +--source include/start_slave.inc + +--echo # Make sure the 2nd seqno_1001 worker has gotten to waiting +--let $wait_condition= SELECT count(*) FROM information_schema.processlist WHERE state LIKE '%debug sync point: now%'; +--source include/wait_condition.inc + + +--echo # Signal to the 1st to proceed after it has reached termination state +SET @@DEBUG_SYNC='now SIGNAL proceed_by_1000'; +--connection spoiler_21 +ROLLBACK; + +--echo # Release the 2nd worker to proceed +--connection spoiler_22 +ROLLBACK; +--connection server_2 +SET @@DEBUG_SYNC='now SIGNAL proceed_by_1001'; + +--echo # observe how it all ends +if (`SELECT count(*) = 1 FROM t1 WHERE a = 1`) +{ + --echo "*** Unexpected commit by the first Worker ***" + SELECT * from t1; + --die +} + +--echo # Wait for the workers to go home and check the result of applying +--let $wait_condition=SELECT count(*) = 0 FROM information_schema.processlist WHERE command = 'Slave_worker' +--source include/wait_condition.inc +if (`SELECT count(*) = 1 FROM t1 WHERE a = 2`) +{ + --echo + --echo "*** Error: congrats, you hit MDEV-12746 issue. ***" + --echo + --die +} +--echo # which is OK + +# +# Clean up +# +--connection server_2 +--source include/stop_slave.inc +SET @@GLOBAL.slave_parallel_threads=@old_parallel_threads; +SET @@GLOBAL.slave_parallel_mode=@old_parallel_mode; +SET @@GLOBAL.innodb_lock_wait_timeout=@old_lock_wait_timeout; +SET @@GLOBAL.slave_transaction_retries=@old_slave_transaction_retries; +SET @@GLOBAL.debug_dbug=@old_dbug; +SET debug_sync='RESET'; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t1; + +--sync_slave_with_master server_2 + --source include/rpl_end.inc diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 34f9113f7fe..5d1e5418925 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -229,6 +229,14 @@ finish_event_group(rpl_parallel_thread *rpt, uint64 sub_id, entry->stop_on_error_sub_id= sub_id; mysql_mutex_unlock(&entry->LOCK_parallel_entry); + DBUG_EXECUTE_IF("rpl_parallel_simulate_wait_at_retry", { + if (rgi->current_gtid.seq_no == 1000) { + DBUG_ASSERT(entry->stop_on_error_sub_id == sub_id); + debug_sync_set_action(thd, + STRING_WITH_LEN("now WAIT_FOR proceed_by_1000")); + } + }); + if (rgi->killed_for_retry == rpl_group_info::RETRY_KILL_PENDING) wait_for_pending_deadlock_kill(thd, rgi); thd->clear_error(); @@ -716,12 +724,20 @@ do_retry: unregistering (and later re-registering) the wait. */ if(thd->wait_for_commit_ptr) - thd->wait_for_commit_ptr->unregister_wait_for_prior_commit(); + thd->wait_for_commit_ptr->unregister_wait_for_prior_commit(); DBUG_EXECUTE_IF("inject_mdev8031", { /* Simulate that we get deadlock killed at this exact point. */ rgi->killed_for_retry= rpl_group_info::RETRY_KILL_KILLED; thd->set_killed(KILL_CONNECTION); }); + DBUG_EXECUTE_IF("rpl_parallel_simulate_wait_at_retry", { + if (rgi->current_gtid.seq_no == 1001) { + debug_sync_set_action(thd, + STRING_WITH_LEN("rpl_parallel_simulate_wait_at_retry WAIT_FOR proceed_by_1001")); + } + DEBUG_SYNC(thd, "rpl_parallel_simulate_wait_at_retry"); + }); + rgi->cleanup_context(thd, 1); wait_for_pending_deadlock_kill(thd, rgi); thd->reset_killed(); @@ -745,7 +761,26 @@ do_retry: for (;;) { mysql_mutex_lock(&entry->LOCK_parallel_entry); - register_wait_for_prior_event_group_commit(rgi, entry); + if (entry->stop_on_error_sub_id == (uint64) ULONGLONG_MAX || +#ifndef DBUG_OFF + (DBUG_EVALUATE_IF("simulate_mdev_12746", 1, 0)) || +#endif + rgi->gtid_sub_id < entry->stop_on_error_sub_id) + { + register_wait_for_prior_event_group_commit(rgi, entry); + } + else + { + /* + A failure of a preceeding "parent" transaction may not be + seen by the current one through its own worker_error. + Such induced error gets set by ourselves now. + */ + err= rgi->worker_error= 1; + my_error(ER_PRIOR_COMMIT_FAILED, MYF(0)); + mysql_mutex_unlock(&entry->LOCK_parallel_entry); + goto err; + } mysql_mutex_unlock(&entry->LOCK_parallel_entry); /* From 649b7a64efba4620d79cd5a20560b11ffd4ae2a5 Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Tue, 13 Mar 2018 14:42:15 +0400 Subject: [PATCH 027/139] MDEV-9592 - New 'Normal shutdown' log format can be confusing Elaborate shutdown message. --- mysql-test/r/shutdown.result | 2 +- mysql-test/t/shutdown.test | 2 +- sql/share/errmsg-utf8.txt | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/shutdown.result b/mysql-test/r/shutdown.result index be2eb16470c..7a69f58ffd9 100644 --- a/mysql-test/r/shutdown.result +++ b/mysql-test/r/shutdown.result @@ -13,4 +13,4 @@ drop user user1@localhost; # # MDEV-8491 - On shutdown, report the user and the host executed that. # -FOUND 2 /mysqld(\.exe)? \(root\[root\] @ localhost \[(::1)?\]\): Normal shutdown/ in mysqld.1.err +FOUND 2 /mysqld(\.exe)? \(initiated by: root\[root\] @ localhost \[(::1)?\]\): Normal shutdown/ in mysqld.1.err diff --git a/mysql-test/t/shutdown.test b/mysql-test/t/shutdown.test index 775628e441d..e423725177b 100644 --- a/mysql-test/t/shutdown.test +++ b/mysql-test/t/shutdown.test @@ -34,5 +34,5 @@ drop user user1@localhost; --echo # MDEV-8491 - On shutdown, report the user and the host executed that. --echo # --let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err ---let SEARCH_PATTERN=mysqld(\.exe)? \(root\[root\] @ localhost \[(::1)?\]\): Normal shutdown +--let SEARCH_PATTERN=mysqld(\.exe)? \(initiated by: root\[root\] @ localhost \[(::1)?\]\): Normal shutdown --source include/search_pattern_in_file.inc diff --git a/sql/share/errmsg-utf8.txt b/sql/share/errmsg-utf8.txt index ade9d53cdb7..4285f5cffa5 100644 --- a/sql/share/errmsg-utf8.txt +++ b/sql/share/errmsg-utf8.txt @@ -1807,7 +1807,7 @@ ER_NORMAL_SHUTDOWN cze "%s (%s): normální ukončení" dan "%s (%s): Normal nedlukning" nla "%s (%s): Normaal afgesloten " - eng "%s (%s): Normal shutdown" + eng "%s (initiated by: %s): Normal shutdown" est "%s (%s): MariaDB lõpetas" fre "%s (%s): Arrêt normal du serveur" ger "%s (%s): Normal heruntergefahren" @@ -1822,7 +1822,7 @@ ER_NORMAL_SHUTDOWN pol "%s (%s): Standardowe zakończenie działania" por "%s (%s): 'Shutdown' normal" rum "%s (%s): Terminare normala" - rus "%s (%s): Корректная остановка" + rus "%s (инициирована пользователем: %s): Корректная остановка" serbian "%s (%s): Normalno gašenje" slo "%s (%s): normálne ukončenie" spa "%s (%s): Apagado normal" From 9953588ac48bf762dfcef97ca5b92fa3e7e8cb62 Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Tue, 13 Mar 2018 14:27:44 +0100 Subject: [PATCH 028/139] MDEV-13549 Fix and re-enable MTR test galera.MW-366 Test galera.MW-366 is not deterministic and depends on timing assumptions. The test occasionally fails after checking the number of 'system user' processes in processlist after changing the value of variable global wsrep_slave_threads, like this: ``` SET GLOBAL wsrep_slave_threads = x; --sleep 0.5 SELECT COUNT(*) = x FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; ``` The problem is that the number of slave threads is internally adjusted 'asynchronously', and it may take some time to spawn/kill new threads, especially in a heavily loaded system. This patch removes the '--sleep 0.5' statements from the test and replaces those with appropriate wait conditions, like this: ``` SET GLOBAL wsrep_slave_threads = x; let $wait_condition = SLECT COUNT(*) = x FROM ...; --source include/wait_condition.inc ``` --- mysql-test/suite/galera/disabled.def | 1 - mysql-test/suite/galera/r/MW-336.result | 9 --------- mysql-test/suite/galera/t/MW-336.test | 19 +++++++++++-------- 3 files changed, 11 insertions(+), 18 deletions(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index d82d5dd2023..ca7d88e4264 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -9,7 +9,6 @@ # Do not use any TAB characters for whitespace. # ############################################################################## -MW-336 : MDEV-13549 Galera test failures galera_gra_log : MDEV-13549 Galera test failures galera_flush_local : MDEV-13549 Galera test failures galera_flush : MDEV-13549 Galera test failures diff --git a/mysql-test/suite/galera/r/MW-336.result b/mysql-test/suite/galera/r/MW-336.result index 9bdb61c1a9c..34874198c6f 100644 --- a/mysql-test/suite/galera/r/MW-336.result +++ b/mysql-test/suite/galera/r/MW-336.result @@ -3,13 +3,7 @@ SET GLOBAL wsrep_slave_threads = 10; SET GLOBAL wsrep_slave_threads = 1; INSERT INTO t1 VALUES (1); SET GLOBAL wsrep_slave_threads = 10; -SELECT COUNT(*) = 11 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; -COUNT(*) = 11 -1 SET GLOBAL wsrep_slave_threads = 20; -SELECT COUNT(*) = 21 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; -COUNT(*) = 21 -1 SET GLOBAL wsrep_slave_threads = 1; INSERT INTO t1 VALUES (1); INSERT INTO t1 VALUES (2); @@ -35,8 +29,5 @@ INSERT INTO t1 VALUES (17); INSERT INTO t1 VALUES (18); INSERT INTO t1 VALUES (19); INSERT INTO t1 VALUES (20); -SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; -COUNT(*) = 2 -1 SET GLOBAL wsrep_slave_threads = 1; DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/MW-336.test b/mysql-test/suite/galera/t/MW-336.test index 79d8951a822..9572489ebe9 100644 --- a/mysql-test/suite/galera/t/MW-336.test +++ b/mysql-test/suite/galera/t/MW-336.test @@ -10,20 +10,20 @@ CREATE TABLE t1 (f1 INTEGER) Engine=InnoDB; --connection node_1 SET GLOBAL wsrep_slave_threads = 10; SET GLOBAL wsrep_slave_threads = 1; +--let $wait_condition = SELECT COUNT(*) = 11 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; +--source include/wait_condition.inc --connection node_2 INSERT INTO t1 VALUES (1); --connection node_1 ---sleep 0.5 SET GLOBAL wsrep_slave_threads = 10; ---sleep 0.5 -SELECT COUNT(*) = 11 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; +--let $wait_condition = SELECT COUNT(*) = 11 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; +--source include/wait_condition.inc SET GLOBAL wsrep_slave_threads = 20; ---sleep 0.5 -SELECT COUNT(*) = 21 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; - +--let $wait_condition = SELECT COUNT(*) = 21 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; +--source include/wait_condition.inc SET GLOBAL wsrep_slave_threads = 1; @@ -40,6 +40,9 @@ INSERT INTO t1 VALUES (9); --connection node_1 +--let $wait_condition = SELECT COUNT(*) = 12 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; +--source include/wait_condition.inc + SET GLOBAL wsrep_slave_threads = 10; SET GLOBAL wsrep_slave_threads = 0; @@ -57,8 +60,8 @@ INSERT INTO t1 VALUES (19); INSERT INTO t1 VALUES (20); --connection node_1 ---sleep 0.5 -SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; +--let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; +--source include/wait_condition.inc SET GLOBAL wsrep_slave_threads = 1; DROP TABLE t1; From c14733f64e0ec372b255587c0a7726e752c84d16 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Wed, 14 Mar 2018 11:57:05 +0530 Subject: [PATCH 029/139] MDEV-14545 Backup fails due to MLOG_INDEX_LOAD record - Fixing the windows failure of unsupported_redo test case. mariabackup --tables-exclude option only restricts ibd file. --- .../suite/mariabackup/unsupported_redo.result | 17 ++--------------- .../suite/mariabackup/unsupported_redo.test | 19 ++----------------- 2 files changed, 4 insertions(+), 32 deletions(-) diff --git a/mysql-test/suite/mariabackup/unsupported_redo.result b/mysql-test/suite/mariabackup/unsupported_redo.result index be563682697..325e8233fd8 100644 --- a/mysql-test/suite/mariabackup/unsupported_redo.result +++ b/mysql-test/suite/mariabackup/unsupported_redo.result @@ -23,24 +23,11 @@ CREATE TABLE t2(i int) ENGINE INNODB; ALTER TABLE t21 FORCE, ALGORITHM=INPLACE; # Create partial backup (excluding table t21), Ignore the # unsupported redo log for the table t21. -t1.frm t1.ibd -t2.frm t2.ibd -# After partial restore operation, t21 files will be missing but t21 -# table information will be present in dictionary. It will -# restrict creating the table t21 in the future test. To avoid -# that, take the copy of t21 files and drop the table later. # Prepare the full backup -# shutdown server -# remove datadir -# xtrabackup move back -# restart server -SHOW TABLES; -Tables_in_test -t1 -t2 +t1.ibd +t2.ibd DROP TABLE t1; DROP TABLE t2; -# Move the t21 files into data directory DROP TABLE t21; diff --git a/mysql-test/suite/mariabackup/unsupported_redo.test b/mysql-test/suite/mariabackup/unsupported_redo.test index 0e3cd9f6703..a9208a3f8ba 100644 --- a/mysql-test/suite/mariabackup/unsupported_redo.test +++ b/mysql-test/suite/mariabackup/unsupported_redo.test @@ -53,8 +53,6 @@ INSERT INTO t21 VALUES(1); let $MYSQLD_DATADIR= `select @@datadir`; let $targetdir=$MYSQLTEST_VARDIR/tmp/bk; -let old_datadir=$MYSQLTEST_VARDIR/tmp/old_data; ---mkdir $old_datadir CREATE TABLE t2(i int) ENGINE INNODB; ALTER TABLE t21 FORCE, ALGORITHM=INPLACE; @@ -65,28 +63,15 @@ ALTER TABLE t21 FORCE, ALGORITHM=INPLACE; --disable_result_log exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup "--tables-exclude=test.t21" --target-dir=$targetdir; --enable_result_log ---list_files $targetdir/test - ---echo # After partial restore operation, t21 files will be missing but t21 ---echo # table information will be present in dictionary. It will ---echo # restrict creating the table t21 in the future test. To avoid ---echo # that, take the copy of t21 files and drop the table later. ---copy_file $MYSQLD_DATADIR/test/t21.frm $old_datadir/t21.frm +--list_files $targetdir/test *.ibd --echo # Prepare the full backup --disable_result_log exec $XTRABACKUP --prepare --target-dir=$targetdir; ---source include/restart_and_restore.inc --enable_result_log - -SHOW TABLES; +--list_files $targetdir/test *.ibd DROP TABLE t1; DROP TABLE t2; - ---echo # Move the t21 files into data directory ---copy_file $old_datadir/t21.frm $MYSQLD_DATADIR/test/t21.frm - DROP TABLE t21; rmdir $targetdir; -rmdir $old_datadir; From fe66f766bb74c29c88e4ff0f84fb2b1198083d19 Mon Sep 17 00:00:00 2001 From: Vasil Dimov Date: Wed, 20 Sep 2017 18:38:08 +0300 Subject: [PATCH 030/139] Fix and enable galera.galera_bf_abort_for_update The test was not deterministic as it would not wait for the second INSERT to be applied (and thus the victim transaction rolled back). --- .../r/galera_bf_abort_for_update.result | 23 ++++++--- .../galera/t/galera_bf_abort_for_update.test | 51 ++++++++++++++----- 2 files changed, 56 insertions(+), 18 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_bf_abort_for_update.result b/mysql-test/suite/galera/r/galera_bf_abort_for_update.result index 3978a3df193..2367924466c 100644 --- a/mysql-test/suite/galera/r/galera_bf_abort_for_update.result +++ b/mysql-test/suite/galera/r/galera_bf_abort_for_update.result @@ -1,10 +1,21 @@ -CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 VALUES (1); -INSERT INTO t1 VALUES (1); +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INT) ENGINE=InnoDB; +INSERT INTO t1 VALUES (1, 10); +BEGIN; SELECT * FROM t1 FOR UPDATE; +f1 f2 +1 10 +UPDATE t1 SET f1 = 2; +COMMIT; ERROR 40001: Deadlock found when trying to get lock; try restarting transaction -wsrep_local_aborts_increment +wsrep_local_bf_aborts_diff +1 +BEGIN; +SELECT * FROM t1 FOR UPDATE; +f1 f2 +2 10 +UPDATE t1 SET f2 = 20; +COMMIT; +ERROR 40001: Deadlock found when trying to get lock; try restarting transaction +wsrep_local_bf_aborts_diff 1 DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_bf_abort_for_update.test b/mysql-test/suite/galera/t/galera_bf_abort_for_update.test index 24c29778e5d..13e48f8f3ce 100644 --- a/mysql-test/suite/galera/t/galera_bf_abort_for_update.test +++ b/mysql-test/suite/galera/t/galera_bf_abort_for_update.test @@ -5,25 +5,52 @@ # Test a local transaction being aborted by a slave one while it is running a SELECT FOR UPDATE # -CREATE TABLE t1 (f1 INTEGER PRIMARY KEY) ENGINE=InnoDB; +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 INT) ENGINE=InnoDB; ---connection node_2 ---let $wsrep_local_bf_aborts_before = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'` -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 VALUES (1); +INSERT INTO t1 VALUES (1, 10); + +# Test updating the PK --connection node_1 -INSERT INTO t1 VALUES (1); - ---connection node_2 ---error ER_LOCK_DEADLOCK +--let $wsrep_local_bf_aborts_before = `SELECT variable_value FROM information_schema.global_status WHERE variable_name = 'wsrep_local_bf_aborts'` +BEGIN; SELECT * FROM t1 FOR UPDATE; ---let $wsrep_local_bf_aborts_after = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_bf_aborts'` +--connection node_2 +UPDATE t1 SET f1 = 2; + +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--let $wait_condition = SELECT COUNT(*) FROM t1 WHERE f1 = 2 +--source include/wait_condition.inc + +--connection node_1 +--error ER_LOCK_DEADLOCK +COMMIT; --disable_query_log ---eval SELECT $wsrep_local_bf_aborts_after - $wsrep_local_bf_aborts_before = 1 AS wsrep_local_aborts_increment; +--eval SELECT variable_value - $wsrep_local_bf_aborts_before AS wsrep_local_bf_aborts_diff FROM information_schema.global_status WHERE variable_name = 'wsrep_local_bf_aborts' +--enable_query_log + +# Test updating non-indexed column + +--connection node_1 +--let $wsrep_local_bf_aborts_before = `SELECT variable_value FROM information_schema.global_status WHERE variable_name = 'wsrep_local_bf_aborts'` +BEGIN; +SELECT * FROM t1 FOR UPDATE; + +--connection node_2 +UPDATE t1 SET f2 = 20; + +--connection node_1a +--let $wait_condition = SELECT COUNT(*) FROM t1 WHERE f2 = 20 +--source include/wait_condition.inc + +--connection node_1 +--error ER_LOCK_DEADLOCK +COMMIT; + +--disable_query_log +--eval SELECT variable_value - $wsrep_local_bf_aborts_before AS wsrep_local_bf_aborts_diff FROM information_schema.global_status WHERE variable_name = 'wsrep_local_bf_aborts' --enable_query_log DROP TABLE t1; From 9a21fd34af0506c5b45133c87c23862b84b36717 Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Wed, 14 Mar 2018 10:27:31 +0100 Subject: [PATCH 031/139] MDEV-13549 Re-enable test galera.galera_bf_abort_for_update Test passes reliably after applying missing commit from upstream. --- mysql-test/suite/galera/disabled.def | 1 - 1 file changed, 1 deletion(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index d82d5dd2023..3e70924aa3c 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -34,7 +34,6 @@ galera_ist_mysqldump : MDEV-13549 Galera test failures mysql-wsrep#31 : MDEV-13549 Galera test failures galera_migrate : MariaDB 10.0 does not support START SLAVE USER galera_concurrent_ctas : MDEV-13549 Galera test failures -galera_bf_abort_for_update : MDEV-13549 Galera test failures galera_wsrep_desync_wsrep_on : MDEV-13549 Galera test failures galera_ssl_upgrade : MDEV-13549 Galera test failures mysql-wsrep#33 : MDEV-13549 Galera test failures From 38579cefa9f3a3f662e42dbe7808f82e94878686 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Wed, 14 Mar 2018 14:46:23 +0400 Subject: [PATCH 032/139] MDEV-14452 Precision in INTERVAL xxx DAY_MICROSECOND parsed wrong? --- mysql-test/r/func_date_add.result | 51 +++++++++++++++++++++++++++++++ mysql-test/t/func_date_add.test | 37 ++++++++++++++++++++++ sql/item_timefunc.cc | 14 ++++++--- 3 files changed, 98 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/func_date_add.result b/mysql-test/r/func_date_add.result index e8fbba786a4..0258267b5ec 100644 --- a/mysql-test/r/func_date_add.result +++ b/mysql-test/r/func_date_add.result @@ -102,3 +102,54 @@ select * from t1 where case a when adddate( '2012-12-12', 7 ) then true end; a drop table t1; End of 5.5 tests +# +# Start of 10.1 tests +# +# +# MDEV-14452 Precision in INTERVAL xxx DAY_MICROSECOND parsed wrong? +# +SELECT +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5' DAY_MICROSECOND) c1, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50' DAY_MICROSECOND) c2, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.500' DAY_MICROSECOND) c3, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5000' DAY_MICROSECOND) c4, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50000' DAY_MICROSECOND) c5, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.500000' DAY_MICROSECOND) c6, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5000000' DAY_MICROSECOND) c7, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50000000' DAY_MICROSECOND) c8, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.500000000' DAY_MICROSECOND) c9, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5000000000' DAY_MICROSECOND) c10, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50000000000' DAY_MICROSECOND) c11, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.500000000000' DAY_MICROSECOND) c12, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5000000000000' DAY_MICROSECOND) c13, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50000000000000' DAY_MICROSECOND) c14, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.500000000000000' DAY_MICROSECOND) c15, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5000000000000000' DAY_MICROSECOND) c16, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50000000000000000' DAY_MICROSECOND) c17, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.500000000000000000' DAY_MICROSECOND) c18, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5000000000000000000' DAY_MICROSECOND) c19, +DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50000000000000000000' DAY_MICROSECOND) c20 +; +c1 1000-01-01 00:00:01.500000 +c2 1000-01-01 00:00:01.500000 +c3 1000-01-01 00:00:01.500000 +c4 1000-01-01 00:00:01.500000 +c5 1000-01-01 00:00:01.500000 +c6 1000-01-01 00:00:01.500000 +c7 1000-01-01 00:00:01.500000 +c8 1000-01-01 00:00:01.500000 +c9 1000-01-01 00:00:01.500000 +c10 1000-01-01 00:00:01.500000 +c11 1000-01-01 00:00:01.500000 +c12 1000-01-01 00:00:01.500000 +c13 1000-01-01 00:00:01.500000 +c14 1000-01-01 00:00:01.500000 +c15 1000-01-01 00:00:01.500000 +c16 1000-01-01 00:00:01.500000 +c17 1000-01-01 00:00:01.500000 +c18 1000-01-01 00:00:01.500000 +c19 1000-01-01 00:00:01.500000 +c20 NULL +# +# End of 10.1 tests +# diff --git a/mysql-test/t/func_date_add.test b/mysql-test/t/func_date_add.test index 5f27978347c..e7e2b96f0eb 100644 --- a/mysql-test/t/func_date_add.test +++ b/mysql-test/t/func_date_add.test @@ -100,3 +100,40 @@ drop table t1; --echo End of 5.5 tests +--echo # +--echo # Start of 10.1 tests +--echo # + +--echo # +--echo # MDEV-14452 Precision in INTERVAL xxx DAY_MICROSECOND parsed wrong? +--echo # + +--vertical_results +SELECT + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5' DAY_MICROSECOND) c1, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50' DAY_MICROSECOND) c2, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.500' DAY_MICROSECOND) c3, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5000' DAY_MICROSECOND) c4, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50000' DAY_MICROSECOND) c5, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.500000' DAY_MICROSECOND) c6, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5000000' DAY_MICROSECOND) c7, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50000000' DAY_MICROSECOND) c8, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.500000000' DAY_MICROSECOND) c9, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5000000000' DAY_MICROSECOND) c10, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50000000000' DAY_MICROSECOND) c11, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.500000000000' DAY_MICROSECOND) c12, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5000000000000' DAY_MICROSECOND) c13, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50000000000000' DAY_MICROSECOND) c14, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.500000000000000' DAY_MICROSECOND) c15, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5000000000000000' DAY_MICROSECOND) c16, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50000000000000000' DAY_MICROSECOND) c17, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.500000000000000000' DAY_MICROSECOND) c18, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.5000000000000000000' DAY_MICROSECOND) c19, + DATE_ADD('1000-01-01 00:00:00', INTERVAL '0 00:00:01.50000000000000000000' DAY_MICROSECOND) c20 +; +--horizontal_results + + +--echo # +--echo # End of 10.1 tests +--echo # diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index 4a94c3a5f89..d7506026c62 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -702,7 +702,7 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs, { const char *end=str+length; uint i; - long msec_length= 0; + long field_length= 0; while (str != end && !my_isdigit(cs,*str)) str++; @@ -713,7 +713,8 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs, const char *start= str; for (value= 0; str != end && my_isdigit(cs, *str); str++) value= value*10 + *str - '0'; - msec_length= 6 - (str - start); + if ((field_length= str - start) >= 20) + return true; values[i]= value; while (str != end && !my_isdigit(cs,*str)) str++; @@ -728,8 +729,13 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs, } } - if (transform_msec && msec_length > 0) - values[count - 1] *= (long) log_10_int[msec_length]; + if (transform_msec && field_length > 0) + { + if (field_length < 6) + values[count - 1] *= (long) log_10_int[6 - field_length]; + else if (field_length > 6) + values[count - 1] /= (long) log_10_int[field_length - 6]; + } return (str != end); } From 782fb1e016e0342074ca9938e412a14d20d6f793 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20Ter=C3=A4s?= Date: Wed, 14 Mar 2018 18:47:02 +1100 Subject: [PATCH 033/139] fix ucontext configure check musl ships the header for other purposes, but makecontext is not implemented. fix the check to detect if makecontext is implemented before enabling code using it. --- configure.cmake | 3 +++ 1 file changed, 3 insertions(+) diff --git a/configure.cmake b/configure.cmake index a1cddbb37fa..df180e72963 100644 --- a/configure.cmake +++ b/configure.cmake @@ -1111,6 +1111,9 @@ CHECK_INCLUDE_FILE(ucontext.h HAVE_UCONTEXT_H) IF(NOT HAVE_UCONTEXT_H) CHECK_INCLUDE_FILE(sys/ucontext.h HAVE_UCONTEXT_H) ENDIF() +IF(HAVE_UCONTEXT_H) + CHECK_FUNCTION_EXISTS(makecontext HAVE_UCONTEXT_H) +ENDIF() CHECK_STRUCT_HAS_MEMBER("struct timespec" tv_sec "time.h" STRUCT_TIMESPEC_HAS_TV_SEC) CHECK_STRUCT_HAS_MEMBER("struct timespec" tv_nsec "time.h" STRUCT_TIMESPEC_HAS_TV_NSEC) From bf1ca14ff3f3faa9f7a018097b25aa0f66d068cd Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 12 Mar 2018 18:53:59 +0100 Subject: [PATCH 034/139] cleanup: Item_func_case reorder items in args[] array. Instead of when1,then1,when2,then2,...[,case][,else] sort them as [case,]when1,when2,...,then1,then2,...[,else] in this case all items used for comparison take a continuous part of the array and can be aggregated directly. and all items that can be returned take a continuous part of the array and can be aggregated directly. Old code had to copy them to a temporary array before aggreation, and then copy back (thd->change_item_tree) everything that was changed. --- sql/item_cmpfunc.cc | 245 +++++++++++++++++--------------------------- sql/item_cmpfunc.h | 4 +- 2 files changed, 95 insertions(+), 154 deletions(-) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index edaf6e87621..7d4c646abee 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -3003,11 +3003,12 @@ Item_func_case::Item_func_case(THD *thd, List &list, Item_func_hybrid_field_type(thd), first_expr_num(-1), else_expr_num(-1), left_cmp_type(INT_RESULT), case_item(0), m_found_types(0) { - ncases= list.elements; + DBUG_ASSERT(list.elements % 2 == 0); + nwhens= list.elements / 2; if (first_expr_arg) { - first_expr_num= list.elements; - list.push_back(first_expr_arg, thd->mem_root); + first_expr_num= 0; + list.push_front(first_expr_arg, thd->mem_root); } if (else_expr_arg) { @@ -3015,6 +3016,22 @@ Item_func_case::Item_func_case(THD *thd, List &list, list.push_back(else_expr_arg, thd->mem_root); } set_arguments(thd, list); + + /* + Reorder args, to have at first the optional CASE expression, then all WHEN + expressions, then all THEN expressions. And the optional ELSE expression + at the end. + */ + const size_t size= sizeof(Item*)*nwhens*2; + Item **arg_buffer= (Item **)my_safe_alloca(size); + memcpy(arg_buffer, args + first_expr_num + 1, size); + for (uint i= 0; i < nwhens ; i++) + { + args[first_expr_num + 1 + i]= arg_buffer[i*2]; + args[first_expr_num + 1 + i + nwhens] = arg_buffer[i*2 + 1]; + } + my_safe_afree(arg_buffer, size); + bzero(&cmp_items, sizeof(cmp_items)); } @@ -3045,18 +3062,17 @@ Item *Item_func_case::find_item(String *str) if (first_expr_num == -1) { - for (uint i=0 ; i < ncases ; i+=2) + for (uint i=0 ; i < nwhens ; i++) { // No expression between CASE and the first WHEN if (args[i]->val_bool()) - return args[i+1]; - continue; + return args[i+nwhens]; } } else { /* Compare every WHEN argument with it and return the first match */ - for (uint i=0 ; i < ncases ; i+=2) + for (uint i=1 ; i <= nwhens; i++) { if (args[i]->real_item()->type() == NULL_ITEM) continue; @@ -3065,13 +3081,13 @@ Item *Item_func_case::find_item(String *str) DBUG_ASSERT(cmp_items[(uint)cmp_type]); if (!(value_added_map & (1U << (uint)cmp_type))) { - cmp_items[(uint)cmp_type]->store_value(args[first_expr_num]); - if ((null_value=args[first_expr_num]->null_value)) + cmp_items[(uint)cmp_type]->store_value(args[0]); + if ((null_value= args[0]->null_value)) return else_expr_num != -1 ? args[else_expr_num] : 0; value_added_map|= 1U << (uint)cmp_type; } if (cmp_items[(uint)cmp_type]->cmp(args[i]) == FALSE) - return args[i + 1]; + return args[i + nwhens]; } } // No, WHEN clauses all missed, return ELSE expression @@ -3174,9 +3190,6 @@ bool Item_func_case::fix_fields(THD *thd, Item **ref) */ uchar buff[MAX_FIELD_WIDTH*2+sizeof(String)*2+sizeof(String*)*2+sizeof(double)*2+sizeof(longlong)*2]; - if (!(arg_buffer= (Item**) thd->alloc(sizeof(Item*)*(ncases+1)))) - return TRUE; - bool res= Item_func::fix_fields(thd, ref); /* Call check_stack_overrun after fix_fields to be sure that stack variable @@ -3191,31 +3204,17 @@ bool Item_func_case::fix_fields(THD *thd, Item **ref) /** Check if (*place) and new_value points to different Items and call THD::change_item_tree() if needed. - - This function is a workaround for implementation deficiency in - Item_func_case. The problem there is that the 'args' attribute contains - Items from different expressions. - - The function must not be used elsewhere and will be remove eventually. */ -static void change_item_tree_if_needed(THD *thd, - Item **place, - Item *new_value) +static void change_item_tree_if_needed(THD *thd, Item **place, Item *new_value) { - if (*place == new_value) - return; - - thd->change_item_tree(place, new_value); + if (new_value && *place != new_value) + thd->change_item_tree(place, new_value); } void Item_func_case::fix_length_and_dec() { - Item **agg= arg_buffer; - uint nagg; - THD *thd= current_thd; - m_found_types= 0; if (else_expr_num == -1 || args[else_expr_num]->maybe_null) maybe_null= 1; @@ -3224,33 +3223,17 @@ void Item_func_case::fix_length_and_dec() Aggregate all THEN and ELSE expression types and collations when string result */ - - for (nagg= 0 ; nagg < ncases/2 ; nagg++) - agg[nagg]= args[nagg*2+1]; - - if (else_expr_num != -1) - agg[nagg++]= args[else_expr_num]; - - set_handler_by_field_type(agg_field_type(agg, nagg, true)); + Item **rets= args + first_expr_num + 1 + nwhens; + uint nrets= nwhens + (else_expr_num != -1); + set_handler_by_field_type(agg_field_type(rets, nrets, true)); if (Item_func_case::result_type() == STRING_RESULT) { - if (count_string_result_length(Item_func_case::field_type(), agg, nagg)) + if (count_string_result_length(Item_func_case::field_type(), rets, nrets)) return; - /* - Copy all THEN and ELSE items back to args[] array. - Some of the items might have been changed to Item_func_conv_charset. - */ - for (nagg= 0 ; nagg < ncases / 2 ; nagg++) - change_item_tree_if_needed(thd, &args[nagg * 2 + 1], agg[nagg]); - - if (else_expr_num != -1) - change_item_tree_if_needed(thd, &args[else_expr_num], agg[nagg++]); } else - { - fix_attributes(agg, nagg); - } + fix_attributes(rets, nrets); /* Aggregate first expression and all WHEN expression types @@ -3258,25 +3241,14 @@ void Item_func_case::fix_length_and_dec() */ if (first_expr_num != -1) { - uint i; - agg[0]= args[first_expr_num]; - left_cmp_type= agg[0]->cmp_type(); + left_cmp_type= args[0]->cmp_type(); - /* - As the first expression and WHEN expressions - are intermixed in args[] array THEN and ELSE items, - extract the first expression and all WHEN expressions into - a temporary array, to process them easier. - */ - for (nagg= 0; nagg < ncases/2 ; nagg++) - agg[nagg+1]= args[nagg*2]; - nagg++; - if (!(m_found_types= collect_cmp_types(agg, nagg))) + if (!(m_found_types= collect_cmp_types(args, nwhens + 1))) return; Item *date_arg= 0; if (m_found_types & (1U << TIME_RESULT)) - date_arg= find_date_time_item(args, arg_count, 0); + date_arg= find_date_time_item(args, nwhens + 1, 0); if (m_found_types & (1U << STRING_RESULT)) { @@ -3304,25 +3276,15 @@ void Item_func_case::fix_length_and_dec() CASE utf16_item WHEN CONVERT(latin1_item USING utf16) THEN ... END */ - if (agg_arg_charsets_for_comparison(cmp_collation, agg, nagg)) + if (agg_arg_charsets_for_comparison(cmp_collation, args, nwhens + 1)) return; - /* - Now copy first expression and all WHEN expressions back to args[] - arrray, because some of the items might have been changed to converters - (e.g. Item_func_conv_charset, or Item_string for constants). - */ - change_item_tree_if_needed(thd, &args[first_expr_num], agg[0]); - - for (nagg= 0; nagg < ncases / 2; nagg++) - change_item_tree_if_needed(thd, &args[nagg * 2], agg[nagg + 1]); } - for (i= 0; i <= (uint)TIME_RESULT; i++) + for (uint i= 0; i <= (uint)TIME_RESULT; i++) { if (m_found_types & (1U << i) && !cmp_items[i]) { DBUG_ASSERT((Item_result)i != ROW_RESULT); - if (!(cmp_items[i]= cmp_item::get_comparator((Item_result)i, date_arg, cmp_collation.collation))) @@ -3342,75 +3304,59 @@ Item* Item_func_case::propagate_equal_fields(THD *thd, const Context &ctx, COND_ return this; } - for (uint i= 0; i < arg_count; i++) + /* + First, replace CASE expression. + We cannot replace the CASE (the switch) argument if + there are multiple comparison types were found, or found a single + comparison type that is not equal to args[0]->cmp_type(). + + - Example: multiple comparison types, can't propagate: + WHERE CASE str_column + WHEN 'string' THEN TRUE + WHEN 1 THEN TRUE + ELSE FALSE END; + + - Example: a single incompatible comparison type, can't propagate: + WHERE CASE str_column + WHEN DATE'2001-01-01' THEN TRUE + ELSE FALSE END; + + - Example: a single incompatible comparison type, can't propagate: + WHERE CASE str_column + WHEN 1 THEN TRUE + ELSE FALSE END; + + - Example: a single compatible comparison type, ok to propagate: + WHERE CASE str_column + WHEN 'str1' THEN TRUE + WHEN 'str2' THEN TRUE + ELSE FALSE END; + */ + if (m_found_types == (1UL << left_cmp_type)) + change_item_tree_if_needed(thd, args, + args[0]->propagate_equal_fields(thd, Context(ANY_SUBST, left_cmp_type, + cmp_collation.collation), + cond)); + uint i= 1; + for (; i <= nwhens ; i++) // WHEN expressions { /* - Even "i" values cover items that are in a comparison context: - CASE x0 WHEN x1 .. WHEN x2 .. WHEN x3 .. - Odd "i" values cover items that are not in comparison: - CASE ... THEN y1 ... THEN y2 ... THEN y3 ... ELSE y4 END + These arguments are in comparison. + Allow invariants of the same value during propagation. + Note, as we pass ANY_SUBST, none of the WHEN arguments will be + replaced to zero-filled constants (only IDENTITY_SUBST allows this). + Such a change for WHEN arguments would require rebuilding cmp_items. */ - Item *new_item= 0; - if ((int) i == first_expr_num) // Then CASE (the switch) argument - { - /* - Cannot replace the CASE (the switch) argument if - there are multiple comparison types were found, or found a single - comparison type that is not equal to args[0]->cmp_type(). - - - Example: multiple comparison types, can't propagate: - WHERE CASE str_column - WHEN 'string' THEN TRUE - WHEN 1 THEN TRUE - ELSE FALSE END; - - - Example: a single incompatible comparison type, can't propagate: - WHERE CASE str_column - WHEN DATE'2001-01-01' THEN TRUE - ELSE FALSE END; - - - Example: a single incompatible comparison type, can't propagate: - WHERE CASE str_column - WHEN 1 THEN TRUE - ELSE FALSE END; - - - Example: a single compatible comparison type, ok to propagate: - WHERE CASE str_column - WHEN 'str1' THEN TRUE - WHEN 'str2' THEN TRUE - ELSE FALSE END; - */ - if (m_found_types == (1UL << left_cmp_type)) - new_item= args[i]->propagate_equal_fields(thd, - Context( - ANY_SUBST, - left_cmp_type, - cmp_collation.collation), - cond); - } - else if ((i % 2) == 0) // WHEN arguments - { - /* - These arguments are in comparison. - Allow invariants of the same value during propagation. - Note, as we pass ANY_SUBST, none of the WHEN arguments will be - replaced to zero-filled constants (only IDENTITY_SUBST allows this). - Such a change for WHEN arguments would require rebuilding cmp_items. - */ - Item_result tmp_cmp_type= item_cmp_type(args[first_expr_num], args[i]); - new_item= args[i]->propagate_equal_fields(thd, - Context( - ANY_SUBST, - tmp_cmp_type, - cmp_collation.collation), - cond); - } - else // THEN and ELSE arguments (they are not in comparison) - { - new_item= args[i]->propagate_equal_fields(thd, Context_identity(), cond); - } - if (new_item && new_item != args[i]) - thd->change_item_tree(&args[i], new_item); + Item_result tmp_cmp_type= item_cmp_type(args[first_expr_num], args[i]); + change_item_tree_if_needed(thd, args + i, + args[i]->propagate_equal_fields(thd, Context(ANY_SUBST, tmp_cmp_type, + cmp_collation.collation), + cond)); + } + for (; i < arg_count ; i++) // THEN expressions and optional ELSE expression + { + change_item_tree_if_needed(thd, args + i, + args[i]->propagate_equal_fields(thd, Context_identity(), cond)); } return this; } @@ -3419,11 +3365,8 @@ Item* Item_func_case::propagate_equal_fields(THD *thd, const Context &ctx, COND_ uint Item_func_case::decimal_precision() const { int max_int_part=0; - for (uint i=0 ; i < ncases ; i+=2) - set_if_bigger(max_int_part, args[i+1]->decimal_int_part()); - - if (else_expr_num != -1) - set_if_bigger(max_int_part, args[else_expr_num]->decimal_int_part()); + for (uint i=first_expr_num + 1 + nwhens ; i < arg_count; i++) + set_if_bigger(max_int_part, args[i]->decimal_int_part()); return MY_MIN(max_int_part + decimals, DECIMAL_MAX_PRECISION); } @@ -3438,15 +3381,15 @@ void Item_func_case::print(String *str, enum_query_type query_type) str->append(STRING_WITH_LEN("case ")); if (first_expr_num != -1) { - args[first_expr_num]->print_parenthesised(str, query_type, precedence()); + args[0]->print_parenthesised(str, query_type, precedence()); str->append(' '); } - for (uint i=0 ; i < ncases ; i+=2) + for (uint i= first_expr_num + 1 ; i < nwhens + first_expr_num + 1; i++) { str->append(STRING_WITH_LEN("when ")); args[i]->print_parenthesised(str, query_type, precedence()); str->append(STRING_WITH_LEN(" then ")); - args[i+1]->print_parenthesised(str, query_type, precedence()); + args[i+nwhens]->print_parenthesised(str, query_type, precedence()); str->append(' '); } if (else_expr_num != -1) diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 1eb07c5040a..20435b3fa4b 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1558,12 +1558,11 @@ class Item_func_case :public Item_func_hybrid_field_type int first_expr_num, else_expr_num; enum Item_result left_cmp_type; String tmp_value; - uint ncases; + uint nwhens; Item_result cmp_type; DTCollation cmp_collation; cmp_item *cmp_items[6]; /* For all result types */ cmp_item *case_item; - Item **arg_buffer; uint m_found_types; public: Item_func_case(THD *thd, List &list, Item *first_expr_arg, @@ -1593,7 +1592,6 @@ public: if (clone) { clone->case_item= 0; - clone->arg_buffer= 0; bzero(&clone->cmp_items, sizeof(cmp_items)); } return clone; From 75ac5789b4f6b0b3c95829130c7865247d0e7edc Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 11 Mar 2018 16:41:56 +0100 Subject: [PATCH 035/139] cleanup: typos, comments, whitespace --- mysql-test/r/type_time.result | 4 ++-- mysql-test/t/type_time.test | 4 ++-- sql/item.h | 6 +++--- sql/item_cmpfunc.cc | 15 +++++++-------- sql/item_cmpfunc.h | 5 ++--- 5 files changed, 16 insertions(+), 18 deletions(-) diff --git a/mysql-test/r/type_time.result b/mysql-test/r/type_time.result index 8b80177104d..0e627bdb915 100644 --- a/mysql-test/r/type_time.result +++ b/mysql-test/r/type_time.result @@ -822,7 +822,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 8 100.00 Using where Warnings: Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = TIME'10:20:30' and (length(TIME'10:20:30')) = 30 + rand() -# Old mode, TIMESTAMP literal, zon-zero YYYYMMDD, no propagation +# Old mode, TIMESTAMP literal, non-zero YYYYMMDD, no propagation SELECT * FROM t1 WHERE a=TIMESTAMP'0000-00-01 10:20:30'; a 34:20:30 @@ -860,7 +860,7 @@ id select_type table type possible_keys key key_len ref rows filtered Extra 1 SIMPLE t1 ALL NULL NULL NULL NULL 8 100.00 Using where Warnings: Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where `test`.`t1`.`a` = TIME'10:20:30' and (length(TIME'10:20:30')) = 30 + rand() -# Old mode, TIMESTAMP-alike literal, zon-zero YYYYMMDD, no propagation +# Old mode, TIMESTAMP-alike literal, non-zero YYYYMMDD, no propagation SELECT * FROM t1 WHERE a='0000-00-01 10:20:30'; a 34:20:30 diff --git a/mysql-test/t/type_time.test b/mysql-test/t/type_time.test index 6841af41e11..ab496408c32 100644 --- a/mysql-test/t/type_time.test +++ b/mysql-test/t/type_time.test @@ -506,7 +506,7 @@ SELECT * FROM t1 WHERE a=TIMESTAMP'0000-00-00 10:20:30' AND LENGTH(a)=8; EXPLAIN EXTENDED SELECT * FROM t1 WHERE a=TIMESTAMP'0000-00-00 10:20:30' AND LENGTH(a)=30+RAND(); ---echo # Old mode, TIMESTAMP literal, zon-zero YYYYMMDD, no propagation +--echo # Old mode, TIMESTAMP literal, non-zero YYYYMMDD, no propagation SELECT * FROM t1 WHERE a=TIMESTAMP'0000-00-01 10:20:30'; SELECT * FROM t1 WHERE a=TIMESTAMP'0000-00-01 10:20:30' AND LENGTH(a)=8; EXPLAIN EXTENDED @@ -522,7 +522,7 @@ SELECT * FROM t1 WHERE a='0000-00-00 10:20:30' AND LENGTH(a)=8; EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='0000-00-00 10:20:30' AND LENGTH(a)=30+RAND(); ---echo # Old mode, TIMESTAMP-alike literal, zon-zero YYYYMMDD, no propagation +--echo # Old mode, TIMESTAMP-alike literal, non-zero YYYYMMDD, no propagation SELECT * FROM t1 WHERE a='0000-00-01 10:20:30'; SELECT * FROM t1 WHERE a='0000-00-01 10:20:30' AND LENGTH(a)=8; EXPLAIN EXTENDED diff --git a/sql/item.h b/sql/item.h index 325514d19aa..40350ba1a2c 100644 --- a/sql/item.h +++ b/sql/item.h @@ -1365,7 +1365,7 @@ public: /* Get time with automatic DATE/DATETIME to TIME conversion. - Performce a reserve operation to get_date_with_conversion(). + Performes a reverse operation to get_date_with_conversion(). Suppose: - we have a set of items (typically with the native MYSQL_TYPE_TIME type) whose item->get_date() return TIME1 value, and @@ -3842,7 +3842,7 @@ class Item_date_literal_for_invalid_dates: public Item_date_literal Item_date_literal_for_invalid_dates::get_date() (unlike the regular Item_date_literal::get_date()) - does not check the result for NO_ZERO_IN_DATE and NO_ZER_DATE, + does not check the result for NO_ZERO_IN_DATE and NO_ZERO_DATE, always returns success (false), and does not produce error/warning messages. We need these _for_invalid_dates classes to be able to rewrite: @@ -5588,7 +5588,7 @@ public: virtual void store(Item *item); virtual bool cache_value()= 0; bool basic_const_item() const - { return MY_TEST(example && example->basic_const_item()); } + { return example && example->basic_const_item(); } virtual void clear() { null_value= TRUE; value_cached= FALSE; } bool is_null() { return !has_value(); } virtual bool is_expensive() diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 7d4c646abee..14e4b2c36f6 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -525,8 +525,7 @@ void Item_bool_rowready_func2::fix_length_and_dec() int Arg_comparator::set_compare_func(Item_func_or_sum *item, Item_result type) { owner= item; - func= comparator_matrix[type] - [is_owner_equal_func()]; + func= comparator_matrix[type][is_owner_equal_func()]; switch (type) { case TIME_RESULT: @@ -708,7 +707,7 @@ int Arg_comparator::set_cmp_func(Item_func_or_sum *owner_arg, @return cache item or original value. */ -Item** Arg_comparator::cache_converted_constant(THD *thd_arg, Item **value, +Item** Arg_comparator::cache_converted_constant(THD *thd, Item **value, Item **cache_item, Item_result type) { @@ -717,12 +716,12 @@ Item** Arg_comparator::cache_converted_constant(THD *thd_arg, Item **value, Also, get_datetime_value creates Item_cache internally. Unless fixed, we should not do it here. */ - if (!thd_arg->lex->is_ps_or_view_context_analysis() && + if (!thd->lex->is_ps_or_view_context_analysis() && (*value)->const_item() && type != (*value)->result_type() && type != TIME_RESULT) { - Item_cache *cache= Item_cache::get_cache(thd_arg, *value, type); - cache->setup(thd_arg, *value); + Item_cache *cache= Item_cache::get_cache(thd, *value, type); + cache->setup(thd, *value); *cache_item= cache; return cache_item; } @@ -2172,7 +2171,7 @@ void Item_func_between::fix_length_and_dec() if (m_compare_type == TIME_RESULT) compare_as_dates= find_date_time_item(args, 3, 0); - /* See the comment about the similar block in Item_bool_func2 */ + /* See the comment for Item_func::convert_const_compared_to_int_field */ if (args[0]->real_item()->type() == FIELD_ITEM && !thd->lex->is_ps_or_view_context_analysis()) { @@ -4286,7 +4285,7 @@ void Item_func_in::fix_length_and_dec() values on the right can be compared as integers and adjust the comparison type accordingly. - See the comment about the similar block in Item_bool_func2 + And see the comment for Item_func::convert_const_compared_to_int_field */ if (args[0]->real_item()->type() == FIELD_ITEM && !thd->lex->is_view_context_analysis() && m_compare_type != INT_RESULT) diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 20435b3fa4b..7ce883e7214 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -529,14 +529,13 @@ public: clone->cmp.comparators= 0; } return clone; - } - + } }; /** XOR inherits from Item_bool_func because it is not optimized yet. Later, when XOR is optimized, it needs to inherit from - Item_cond instead. See WL#5800. + Item_cond instead. See WL#5800. */ class Item_func_xor :public Item_bool_func { From e0d3d4059fb6266cd295791601ecabbfd07dba97 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 12 Mar 2018 20:11:33 +0100 Subject: [PATCH 036/139] cleanup: add Item::convert_time_to_datetime() helper will be used in following commits --- sql/item.cc | 10 +++------- sql/item.h | 9 +++++++++ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/sql/item.cc b/sql/item.cc index 67cf5b3da21..27469c08078 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -151,13 +151,9 @@ bool Item::get_date_with_conversion(MYSQL_TIME *ltime, ulonglong fuzzydate) if (get_date(ltime, fuzzydate | time_flag)) return true; if (ltime->time_type == MYSQL_TIMESTAMP_TIME && - !(fuzzydate & TIME_TIME_ONLY)) - { - MYSQL_TIME tmp; - if (time_to_datetime_with_warn(thd, ltime, &tmp, fuzzydate)) - return null_value= true; - *ltime= tmp; - } + !(fuzzydate & TIME_TIME_ONLY) && + convert_time_to_datetime(thd, ltime, fuzzydate)) + return true; return false; } diff --git a/sql/item.h b/sql/item.h index 40350ba1a2c..ed00522fa53 100644 --- a/sql/item.h +++ b/sql/item.h @@ -27,6 +27,7 @@ #include "sql_const.h" /* RAND_TABLE_BIT, MAX_FIELD_NAME */ #include "field.h" /* Derivation */ #include "sql_type.h" +#include "sql_time.h" C_MODE_START #include @@ -1361,6 +1362,14 @@ public: bool get_time(MYSQL_TIME *ltime) { return get_date(ltime, TIME_TIME_ONLY | TIME_INVALID_DATES); } // Get date with automatic TIME->DATETIME conversion + bool convert_time_to_datetime(THD *thd, MYSQL_TIME *ltime, ulonglong fuzzydate) + { + MYSQL_TIME tmp; + if (time_to_datetime_with_warn(thd, ltime, &tmp, fuzzydate)) + return null_value= true; + *ltime= tmp; + return false; + } bool get_date_with_conversion(MYSQL_TIME *ltime, ulonglong fuzzydate); /* Get time with automatic DATE/DATETIME to TIME conversion. From c2671e97a3d6c68ee24090abca0ba5ff40d6d376 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 12 Mar 2018 20:08:01 +0100 Subject: [PATCH 037/139] cleanup: make find_date_time_item() static --- sql/item_cmpfunc.cc | 2 +- sql/item_func.h | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 14e4b2c36f6..7238caef6a0 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -42,7 +42,7 @@ this is the type that will be used in warnings like "Incorrect <> value". */ -Item *find_date_time_item(Item **args, uint nargs, uint col) +static Item *find_date_time_item(Item **args, uint nargs, uint col) { Item *date_arg= 0, **arg, **arg_end; for (arg= args, arg_end= args + nargs; arg != arg_end ; arg++) diff --git a/sql/item_func.h b/sql/item_func.h index f9df035db12..c9b5841e293 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -2507,7 +2507,6 @@ Item *get_system_var(THD *thd, enum_var_type var_type, LEX_STRING name, extern bool check_reserved_words(LEX_STRING *name); extern enum_field_types agg_field_type(Item **items, uint nitems, bool treat_bit_as_number); -Item *find_date_time_item(Item **args, uint nargs, uint col); double my_double_round(double value, longlong dec, bool dec_unsigned, bool truncate); bool eval_const_cond(COND *cond); From 24d6cd7d62c10a6a17be9f4fac1a112f16f8b6e8 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 12 Mar 2018 20:13:05 +0100 Subject: [PATCH 038/139] cleanup: Arg_comparator::cache_converted_constant() It's a generic function, not using anything from Arg_comparator. Make it a static function, not a class method, to be able to use it later without Arg_comparator --- sql/item_cmpfunc.cc | 7 ++++--- sql/item_cmpfunc.h | 4 +--- 2 files changed, 5 insertions(+), 6 deletions(-) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 7238caef6a0..75ed765ea7d 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -34,6 +34,8 @@ #include "sql_time.h" // make_truncated_value_warning #include "sql_base.h" // dynamic_column_error_message +static Item** cache_converted_constant(THD *thd, Item **value, + Item **cache_item, Item_result type,enum_field_types f_type); /** find an temporal type (item) that others will be converted to @@ -707,9 +709,8 @@ int Arg_comparator::set_cmp_func(Item_func_or_sum *owner_arg, @return cache item or original value. */ -Item** Arg_comparator::cache_converted_constant(THD *thd, Item **value, - Item **cache_item, - Item_result type) +static Item** cache_converted_constant(THD *thd, Item **value, + Item **cache_item, Item_result type) { /* Don't need cache if doing context analysis only. diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 7ce883e7214..f833232de95 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -113,8 +113,6 @@ public: int compare_e_json_str(); int compare_e_str_json(); - Item** cache_converted_constant(THD *thd, Item **value, Item **cache, - Item_result type); static arg_cmp_func comparator_matrix [6][2]; inline bool is_owner_equal_func() { @@ -1069,7 +1067,7 @@ class Item_func_nullif :public Item_func_hybrid_field_type The left "a" is in a comparison and can be replaced by: - Item_func::convert_const_compared_to_int_field() - agg_item_set_converter() in set_cmp_func() - - Arg_comparator::cache_converted_constant() in set_cmp_func() + - cache_converted_constant() in set_cmp_func() Both "a"s are subject to equal fields propagation and can be replaced by: - Item_field::propagate_equal_fields(ANY_SUBST) for the left "a" From 622115ee07f8e7878389d22b2d624c13a2eda7da Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 12 Mar 2018 20:13:36 +0100 Subject: [PATCH 039/139] cleanup: extend Item_cache::get_cache() to accept f_type Do not assume that it's always item->field_type() - this is not the case in temporal comparisons (e.g. when comparing DATETIME column with a TIME literal). --- sql/item.cc | 12 +++--------- sql/item.h | 13 +++++++++++-- sql/item_row.h | 3 +-- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/sql/item.cc b/sql/item.cc index 27469c08078..05e2bc43aed 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -9543,12 +9543,6 @@ int stored_field_cmp_to_item(THD *thd, Field *field, Item *item) return 0; } -Item_cache* Item_cache::get_cache(THD *thd, const Item *item) -{ - return get_cache(thd, item, item->cmp_type()); -} - - /** Get a cache item of given type. @@ -9559,12 +9553,12 @@ Item_cache* Item_cache::get_cache(THD *thd, const Item *item) */ Item_cache* Item_cache::get_cache(THD *thd, const Item *item, - const Item_result type) + const Item_result type, const enum_field_types f_type) { MEM_ROOT *mem_root= thd->mem_root; switch (type) { case INT_RESULT: - return new (mem_root) Item_cache_int(thd, item->field_type()); + return new (mem_root) Item_cache_int(thd, f_type); case REAL_RESULT: return new (mem_root) Item_cache_real(thd); case DECIMAL_RESULT: @@ -9574,7 +9568,7 @@ Item_cache* Item_cache::get_cache(THD *thd, const Item *item, case ROW_RESULT: return new (mem_root) Item_cache_row(thd); case TIME_RESULT: - return new (mem_root) Item_cache_temporal(thd, item->field_type()); + return new (mem_root) Item_cache_temporal(thd, f_type); } return 0; // Impossible } diff --git a/sql/item.h b/sql/item.h index ed00522fa53..8921ee76f6a 100644 --- a/sql/item.h +++ b/sql/item.h @@ -5551,8 +5551,17 @@ public: enum Item_result cmp_type () const { return Type_handler_hybrid_field_type::cmp_type(); } - static Item_cache* get_cache(THD *thd, const Item *item); - static Item_cache* get_cache(THD *thd, const Item* item, const Item_result type); + static Item_cache* get_cache(THD *thd, const Item* item, + const Item_result type, const enum_field_types f_type); + static Item_cache* get_cache(THD *thd, const Item* item, + const Item_result type) + { + return get_cache(thd, item, type, item->field_type()); + } + static Item_cache* get_cache(THD *thd, const Item *item) + { + return get_cache(thd, item, item->cmp_type()); + } virtual void keep_array() {} virtual void print(String *str, enum_query_type query_type); bool eq_def(const Field *field) diff --git a/sql/item_row.h b/sql/item_row.h index f85a5e35c18..bc82b31f9e2 100644 --- a/sql/item_row.h +++ b/sql/item_row.h @@ -90,8 +90,7 @@ public: Item_result cmp_type() const { return ROW_RESULT; } enum_field_types field_type() const { - DBUG_ASSERT(0); - return MYSQL_TYPE_DOUBLE; + return MYSQL_TYPE_NULL; } void update_used_tables() { From 885edc4fa50546304c1596e4a477ae75aa3973e0 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 12 Mar 2018 20:13:55 +0100 Subject: [PATCH 040/139] bugfix: Item_cache_temporal::get_date() didn't set null_value --- sql/item.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/item.cc b/sql/item.cc index 05e2bc43aed..09ae50fcc59 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -9799,7 +9799,7 @@ bool Item_cache_temporal::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate) if (!has_value()) { bzero((char*) ltime,sizeof(*ltime)); - return 1; + return null_value= true; } unpack_time(value, ltime); From 1c6f6dc8924f144770a862d74667d7fa0eba55c1 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 12 Mar 2018 20:15:18 +0100 Subject: [PATCH 041/139] bugfix: Item_cache_temporal::convert_to_basic_const_item assumed DATETIME while it should look at the actual field_type() and use get_date() or get_time() as appropriate. test case is in the following commit. --- sql/item.cc | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/sql/item.cc b/sql/item.cc index 09ae50fcc59..0e6135d64f8 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -9853,9 +9853,18 @@ Item *Item_cache_temporal::convert_to_basic_const_item(THD *thd) else { MYSQL_TIME ltime; - unpack_time(val_datetime_packed(), <ime); - new_item= (Item*) new (thd->mem_root) Item_datetime_literal(thd, <ime, - decimals); + if (Item_cache_temporal::field_type() == MYSQL_TYPE_TIME) + { + unpack_time(val_time_packed(), <ime); + new_item= (Item*) new (thd->mem_root) Item_time_literal(thd, <ime, + decimals); + } + else + { + unpack_time(val_datetime_packed(), <ime); + new_item= (Item*) new (thd->mem_root) Item_datetime_literal(thd, <ime, + decimals); + } } return new_item; } From d390e501eb812138db8963098a66d888f8adf490 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 12 Mar 2018 20:16:33 +0100 Subject: [PATCH 042/139] MDEV-11839 move value caching from get_datetime_value to fix_fields time Refactor get_datetime_value() not to create Item_cache_temporal(), but do it always in ::fix_fields() or ::fix_length_and_dec(). Creating items at the execution time doesn't work very well with virtual columns and check constraints that are fixed and executed in different THDs. --- mysql-test/r/cache_temporal_4265.result | 1 - mysql-test/r/case.result | 34 ++++++ mysql-test/r/range.result | 2 - mysql-test/r/range_mrr_icp.result | 2 - mysql-test/t/case.test | 19 +++ sql/item.cc | 49 +++++--- sql/item_cmpfunc.cc | 112 ++++++++---------- sql/item_cmpfunc.h | 14 +-- ..._time_fractional_seconds_with_index.result | 2 +- .../storage/r/column_time_with_index.result | 2 +- .../mysql-test/rocksdb/r/rocksdb.result | 4 +- 11 files changed, 139 insertions(+), 102 deletions(-) diff --git a/mysql-test/r/cache_temporal_4265.result b/mysql-test/r/cache_temporal_4265.result index 980bb957e19..7f215de43fb 100644 --- a/mysql-test/r/cache_temporal_4265.result +++ b/mysql-test/r/cache_temporal_4265.result @@ -7,7 +7,6 @@ a 2002-03-04 Warnings: Note 1003 2000-01-01 -Note 1003 2000-01-06 set debug_dbug=''; drop table t1; create table t1 (id int not null, ut timestamp(6) not null); diff --git a/mysql-test/r/case.result b/mysql-test/r/case.result index 4d5edbda1ce..5a453ebe815 100644 --- a/mysql-test/r/case.result +++ b/mysql-test/r/case.result @@ -403,3 +403,37 @@ DROP TABLE t1; # # End of 10.1 test # +select case 'foo' when time'10:00:00' then 'never' when '0' then 'bug' else 'ok' end; +case 'foo' when time'10:00:00' then 'never' when '0' then 'bug' else 'ok' end +ok +Warnings: +Warning 1292 Truncated incorrect time value: 'foo' +select 'foo' in (time'10:00:00','0'); +'foo' in (time'10:00:00','0') +0 +Warnings: +Warning 1292 Truncated incorrect time value: 'foo' +create table t1 (a time); +insert t1 values (100000), (102030), (203040); +select case 'foo' when a then 'never' when '0' then 'bug' else 'ok' end from t1; +case 'foo' when a then 'never' when '0' then 'bug' else 'ok' end +ok +ok +ok +Warnings: +Warning 1292 Truncated incorrect time value: 'foo' +Warning 1292 Truncated incorrect time value: 'foo' +Warning 1292 Truncated incorrect time value: 'foo' +select 'foo' in (a,'0') from t1; +'foo' in (a,'0') +0 +0 +0 +Warnings: +Warning 1292 Truncated incorrect time value: 'foo' +Warning 1292 Truncated incorrect time value: 'foo' +Warning 1292 Truncated incorrect time value: 'foo' +drop table t1; +select case '20:10:05' when date'2020-10-10' then 'never' when time'20:10:5' then 'ok' else 'bug' end; +case '20:10:05' when date'2020-10-10' then 'never' when time'20:10:5' then 'ok' else 'bug' end +bug diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index 28f5cf635d0..3a71d08eb38 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -1595,8 +1595,6 @@ NULL Warnings: Warning 1411 Incorrect datetime value: '2007-20-00' for function str_to_date Warning 1411 Incorrect datetime value: '2007-20-00' for function str_to_date -Warning 1411 Incorrect datetime value: '2007-20-00' for function str_to_date -Warning 1411 Incorrect datetime value: '2007-20-00' for function str_to_date SELECT str_to_date('2007-10-00', '%Y-%m-%d') BETWEEN '' AND '2007/10/20'; str_to_date('2007-10-00', '%Y-%m-%d') BETWEEN '' AND '2007/10/20' 1 diff --git a/mysql-test/r/range_mrr_icp.result b/mysql-test/r/range_mrr_icp.result index f2860aaab76..799a299e33f 100644 --- a/mysql-test/r/range_mrr_icp.result +++ b/mysql-test/r/range_mrr_icp.result @@ -1597,8 +1597,6 @@ NULL Warnings: Warning 1411 Incorrect datetime value: '2007-20-00' for function str_to_date Warning 1411 Incorrect datetime value: '2007-20-00' for function str_to_date -Warning 1411 Incorrect datetime value: '2007-20-00' for function str_to_date -Warning 1411 Incorrect datetime value: '2007-20-00' for function str_to_date SELECT str_to_date('2007-10-00', '%Y-%m-%d') BETWEEN '' AND '2007/10/20'; str_to_date('2007-10-00', '%Y-%m-%d') BETWEEN '' AND '2007/10/20' 1 diff --git a/mysql-test/t/case.test b/mysql-test/t/case.test index 702592374b6..550b89aab0d 100644 --- a/mysql-test/t/case.test +++ b/mysql-test/t/case.test @@ -287,3 +287,22 @@ DROP TABLE t1; --echo # --echo # End of 10.1 test --echo # + +# +# caching of first argument in CASE/IN for temporal types +# +# + +# should not convert all values to time +select case 'foo' when time'10:00:00' then 'never' when '0' then 'bug' else 'ok' end; +select 'foo' in (time'10:00:00','0'); + +create table t1 (a time); +insert t1 values (100000), (102030), (203040); +# only one warning, TIME('foo') should be cached +select case 'foo' when a then 'never' when '0' then 'bug' else 'ok' end from t1; +select 'foo' in (a,'0') from t1; +drop table t1; + +# first comparison should be as date, second as time +select case '20:10:05' when date'2020-10-10' then 'never' when time'20:10:5' then 'ok' else 'bug' end; diff --git a/sql/item.cc b/sql/item.cc index 0e6135d64f8..8dce7f7c64a 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -9361,13 +9361,16 @@ void resolve_const_item(THD *thd, Item **ref, Item *comp_item) switch (res_type) { case TIME_RESULT: { - bool is_null; - Item **ref_copy= ref; - /* the following call creates a constant and puts it in new_item */ enum_field_types type= item->field_type_for_temporal_comparison(comp_item); - get_datetime_value(thd, &ref_copy, &new_item, type, &is_null); - if (is_null) + longlong value= item->val_temporal_packed(type); + if (item->null_value) new_item= new (mem_root) Item_null(thd, name); + else + { + Item_cache_temporal *cache= new (mem_root) Item_cache_temporal(thd, type); + cache->store_packed(value, item); + new_item= cache; + } break; } case STRING_RESULT: @@ -9702,8 +9705,7 @@ Item_cache_temporal::Item_cache_temporal(THD *thd, longlong Item_cache_temporal::val_datetime_packed() { DBUG_ASSERT(fixed == 1); - if (Item_cache_temporal::field_type() == MYSQL_TYPE_TIME) - return Item::val_datetime_packed(); // TIME-to-DATETIME conversion needed + DBUG_ASSERT(Item_cache_temporal::field_type() != MYSQL_TYPE_TIME); if ((!value_cached && !cache_value()) || null_value) { null_value= TRUE; @@ -9716,8 +9718,7 @@ longlong Item_cache_temporal::val_datetime_packed() longlong Item_cache_temporal::val_time_packed() { DBUG_ASSERT(fixed == 1); - if (Item_cache_temporal::field_type() != MYSQL_TYPE_TIME) - return Item::val_time_packed(); // DATETIME-to-TIME conversion needed + DBUG_ASSERT(Item_cache_temporal::field_type() == MYSQL_TYPE_TIME); if ((!value_cached && !cache_value()) || null_value) { null_value= TRUE; @@ -9775,18 +9776,26 @@ double Item_cache_temporal::val_real() } -bool Item_cache_temporal::cache_value() +bool Item_cache_temporal::cache_value() { if (!example) return false; - value_cached= true; - + MYSQL_TIME ltime; - if (example->get_date_result(<ime, 0)) - value=0; - else + uint fuzzydate= TIME_FUZZY_DATES | TIME_INVALID_DATES; + if (Item_cache_temporal::field_type() == MYSQL_TYPE_TIME) + fuzzydate|= TIME_TIME_ONLY; + + value= 0; + if (!example->get_date_result(<ime, fuzzydate)) + { + if (ltime.time_type == MYSQL_TIMESTAMP_TIME && + !(fuzzydate & TIME_TIME_ONLY) && + convert_time_to_datetime(current_thd, <ime, fuzzydate)) + return true; value= pack_time(<ime); + } null_value= example->null_value; return true; } @@ -9806,11 +9815,15 @@ bool Item_cache_temporal::get_date(MYSQL_TIME *ltime, ulonglong fuzzydate) ltime->time_type= mysql_type_to_time_type(field_type()); if (ltime->time_type == MYSQL_TIMESTAMP_TIME) { - ltime->hour+= (ltime->month*32+ltime->day)*24; - ltime->month= ltime->day= 0; + if (fuzzydate & TIME_TIME_ONLY) + { + ltime->hour+= (ltime->month*32+ltime->day)*24; + ltime->month= ltime->day= 0; + } + else if (convert_time_to_datetime(current_thd, ltime, fuzzydate)) + return true; } return 0; - } diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 75ed765ea7d..42b94e4daee 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -41,10 +41,14 @@ static Item** cache_converted_constant(THD *thd, Item **value, find an temporal type (item) that others will be converted to for the purpose of comparison. + for IN/CASE conversion only happens if the first item defines the + comparison context. + this is the type that will be used in warnings like "Incorrect <> value". */ -static Item *find_date_time_item(Item **args, uint nargs, uint col) +static Item *find_date_time_item(THD *thd, Item **args, uint nargs, uint col, + bool in_case) { Item *date_arg= 0, **arg, **arg_end; for (arg= args, arg_end= args + nargs; arg != arg_end ; arg++) @@ -52,10 +56,22 @@ static Item *find_date_time_item(Item **args, uint nargs, uint col) Item *item= arg[0]->element_index(col); if (item->cmp_type() != TIME_RESULT) continue; - if (item->field_type() == MYSQL_TYPE_DATETIME) - return item; if (!date_arg) date_arg= item; + if (item->field_type() == MYSQL_TYPE_DATETIME) + break; + } + if (in_case ? date_arg == args[0]->element_index(col) : date_arg != NULL) + { + enum_field_types f_type= date_arg->field_type(); + for (arg= args, arg_end= args + nargs; arg != arg_end ; arg++) + { + Item *cache, **a= arg[0]->addr(col); + if (!a) + a= arg; + if (cache_converted_constant(thd, a, &cache, TIME_RESULT, f_type) != a) + thd->change_item_tree(a, cache); + } } return date_arg; } @@ -658,6 +674,8 @@ int Arg_comparator::set_cmp_func(Item_func_or_sum *owner_arg, func= is_owner_equal_func() ? &Arg_comparator::compare_e_datetime : &Arg_comparator::compare_datetime; } + a= cache_converted_constant(thd, a, &a_cache, m_compare_type, f_type); + b= cache_converted_constant(thd, b, &b_cache, m_compare_type, f_type); return 0; } @@ -685,9 +703,11 @@ int Arg_comparator::set_cmp_func(Item_func_or_sum *owner_arg, func= is_owner_equal_func() ? &Arg_comparator::compare_e_datetime : &Arg_comparator::compare_datetime; } - - a= cache_converted_constant(thd, a, &a_cache, m_compare_type); - b= cache_converted_constant(thd, b, &b_cache, m_compare_type); + else + { + a= cache_converted_constant(thd, a, &a_cache, m_compare_type, (*a)->field_type()); + b= cache_converted_constant(thd, b, &b_cache, m_compare_type, (*b)->field_type()); + } return set_compare_func(owner_arg, m_compare_type); } @@ -710,18 +730,13 @@ int Arg_comparator::set_cmp_func(Item_func_or_sum *owner_arg, */ static Item** cache_converted_constant(THD *thd, Item **value, - Item **cache_item, Item_result type) + Item **cache_item, Item_result type, enum_field_types f_type) { - /* - Don't need cache if doing context analysis only. - Also, get_datetime_value creates Item_cache internally. - Unless fixed, we should not do it here. - */ + /* Don't need cache if doing context analysis only. */ if (!thd->lex->is_ps_or_view_context_analysis() && - (*value)->const_item() && type != (*value)->result_type() && - type != TIME_RESULT) + (*value)->const_item() && type != (*value)->result_type()) { - Item_cache *cache= Item_cache::get_cache(thd, *value, type); + Item_cache *cache= Item_cache::get_cache(thd, *value, type, f_type); cache->setup(thd, *value); *cache_item= cache; return cache_item; @@ -761,26 +776,11 @@ static Item** cache_converted_constant(THD *thd, Item **value, MYSQL_TIME value, packed in a longlong, suitable for comparison. */ -longlong -get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg, - enum_field_types f_type, bool *is_null) +longlong get_datetime_value(Item *item, enum_field_types f_type, bool *is_null) { - longlong UNINIT_VAR(value); - Item *item= **item_arg; - value= item->val_temporal_packed(f_type); + longlong value= item->val_temporal_packed(f_type); if ((*is_null= item->null_value)) return ~(ulonglong) 0; - if (cache_arg && item->const_item() && - !(item->type() == Item::CACHE_ITEM && item->cmp_type() == TIME_RESULT)) - { - if (!thd) - thd= current_thd; - - Item_cache_temporal *cache= new (thd->mem_root) Item_cache_temporal(thd, f_type); - cache->store_packed(value, item); - *cache_arg= cache; - *item_arg= cache_arg; - } return value; } @@ -811,12 +811,12 @@ int Arg_comparator::compare_temporal(enum_field_types type) owner->null_value= 1; /* Get DATE/DATETIME/TIME value of the 'a' item. */ - a_value= get_datetime_value(0, &a, &a_cache, type, &a_is_null); + a_value= get_datetime_value(*a, type, &a_is_null); if (a_is_null) return -1; /* Get DATE/DATETIME/TIME value of the 'b' item. */ - b_value= get_datetime_value(0, &b, &b_cache, type, &b_is_null); + b_value= get_datetime_value(*b, type, &b_is_null); if (b_is_null) return -1; @@ -834,10 +834,10 @@ int Arg_comparator::compare_e_temporal(enum_field_types type) longlong a_value, b_value; /* Get DATE/DATETIME/TIME value of the 'a' item. */ - a_value= get_datetime_value(0, &a, &a_cache, type, &a_is_null); + a_value= get_datetime_value(*a, type, &a_is_null); /* Get DATE/DATETIME/TIME value of the 'b' item. */ - b_value= get_datetime_value(0, &b, &b_cache, type, &b_is_null); + b_value= get_datetime_value(*b, type, &b_is_null); return a_is_null || b_is_null ? a_is_null == b_is_null : a_value == b_value; } @@ -2170,7 +2170,7 @@ void Item_func_between::fix_length_and_dec() strings as. */ if (m_compare_type == TIME_RESULT) - compare_as_dates= find_date_time_item(args, 3, 0); + compare_as_dates= find_date_time_item(thd, args, 3, 0, false); /* See the comment for Item_func::convert_const_compared_to_int_field */ if (args[0]->real_item()->type() == FIELD_ITEM && @@ -2196,29 +2196,17 @@ longlong Item_func_between::val_int() switch (m_compare_type) { case TIME_RESULT: { - THD *thd= current_thd; longlong value, a, b; - Item *cache, **ptr; bool value_is_null, a_is_null, b_is_null; - ptr= &args[0]; enum_field_types f_type= field_type_for_temporal_comparison(compare_as_dates); - value= get_datetime_value(thd, &ptr, &cache, f_type, &value_is_null); - if (ptr != &args[0]) - thd->change_item_tree(&args[0], *ptr); + value= get_datetime_value(args[0], f_type, &value_is_null); if ((null_value= value_is_null)) return 0; - ptr= &args[1]; - a= get_datetime_value(thd, &ptr, &cache, f_type, &a_is_null); - if (ptr != &args[1]) - thd->change_item_tree(&args[1], *ptr); - - ptr= &args[2]; - b= get_datetime_value(thd, &ptr, &cache, f_type, &b_is_null); - if (ptr != &args[2]) - thd->change_item_tree(&args[2], *ptr); + a= get_datetime_value(args[1], f_type, &a_is_null); + b= get_datetime_value(args[2], f_type, &b_is_null); if (!a_is_null && !b_is_null) return (longlong) ((value >= a && value <= b) != negated); @@ -3248,7 +3236,7 @@ void Item_func_case::fix_length_and_dec() Item *date_arg= 0; if (m_found_types & (1U << TIME_RESULT)) - date_arg= find_date_time_item(args, nwhens + 1, 0); + date_arg= find_date_time_item(current_thd, args, nwhens + 1, 0, true); if (m_found_types & (1U << STRING_RESULT)) { @@ -3790,10 +3778,8 @@ void in_datetime::set(uint pos,Item *item) uchar *in_datetime::get_value(Item *item) { bool is_null; - Item **tmp_item= lval_cache ? &lval_cache : &item; - enum_field_types f_type= - tmp_item[0]->field_type_for_temporal_comparison(warn_item); - tmp.val= get_datetime_value(0, &tmp_item, &lval_cache, f_type, &is_null); + enum_field_types f_type= item->field_type_for_temporal_comparison(warn_item); + tmp.val= get_datetime_value(item, f_type, &is_null); if (item->null_value) return 0; tmp.unsigned_flag= 1L; @@ -4055,10 +4041,8 @@ cmp_item* cmp_item_decimal::make_same() void cmp_item_datetime::store_value(Item *item) { bool is_null; - Item **tmp_item= lval_cache ? &lval_cache : &item; - enum_field_types f_type= - tmp_item[0]->field_type_for_temporal_comparison(warn_item); - value= get_datetime_value(0, &tmp_item, &lval_cache, f_type, &is_null); + enum_field_types f_type= item->field_type_for_temporal_comparison(warn_item); + value= get_datetime_value(item, f_type, &is_null); m_null_value= item->null_value; } @@ -4263,7 +4247,7 @@ void Item_func_in::fix_length_and_dec() for (uint col= 0; col < cols; col++) { - date_arg= find_date_time_item(args, arg_count, col); + date_arg= find_date_time_item(thd, args, arg_count, col, true); if (date_arg) { cmp_item **cmp= 0; @@ -4329,7 +4313,7 @@ void Item_func_in::fix_length_and_dec() array= new (thd->mem_root) in_decimal(thd, arg_count - 1); break; case TIME_RESULT: - date_arg= find_date_time_item(args, arg_count, 0); + date_arg= find_date_time_item(thd, args, arg_count, 0, true); array= new (thd->mem_root) in_datetime(thd, date_arg, arg_count - 1); break; } @@ -4356,7 +4340,7 @@ void Item_func_in::fix_length_and_dec() else { if (found_types & (1U << TIME_RESULT)) - date_arg= find_date_time_item(args, arg_count, 0); + date_arg= find_date_time_item(thd, args, arg_count, 0, true); if (found_types & (1U << STRING_RESULT) && agg_arg_charsets_for_comparison(cmp_collation, args, arg_count)) return; diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index f833232de95..82bfd11a94d 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1267,19 +1267,15 @@ public: /* Class to represent a vector of constant DATE/DATETIME values. Values are obtained with help of the get_datetime_value() function. - If the left item is a constant one then its value is cached in the - lval_cache variable. */ class in_datetime :public in_longlong { public: /* An item used to issue warnings. */ Item *warn_item; - /* Cache for the left item. */ - Item *lval_cache; in_datetime(THD *thd, Item *warn_item_arg, uint elements) - :in_longlong(thd, elements), warn_item(warn_item_arg), lval_cache(0) {}; + :in_longlong(thd, elements), warn_item(warn_item_arg) {} void set(uint pos,Item *item); uchar *get_value(Item *item); Item *create_item(THD *thd); @@ -1441,8 +1437,6 @@ public: /* Compare items in the DATETIME context. Values are obtained with help of the get_datetime_value() function. - If the left item is a constant one then its value is cached in the - lval_cache variable. */ class cmp_item_datetime : public cmp_item_scalar { @@ -1450,11 +1444,9 @@ class cmp_item_datetime : public cmp_item_scalar public: /* Item used for issuing warnings. */ Item *warn_item; - /* Cache for the left item. */ - Item *lval_cache; cmp_item_datetime(Item *warn_item_arg) - : warn_item(warn_item_arg), lval_cache(0) {} + : warn_item(warn_item_arg) {} void store_value(Item *item); int cmp(Item *arg); int compare(cmp_item *ci); @@ -2622,7 +2614,7 @@ inline bool is_cond_or(Item *item) Item *and_expressions(Item *a, Item *b, Item **org_item); -longlong get_datetime_value(THD *thd, Item ***item_arg, Item **cache_arg, +longlong get_datetime_value(Item ***item_arg, Item **cache_arg, enum_field_types f_type, bool *is_null); diff --git a/storage/mroonga/mysql-test/mroonga/storage/r/column_time_fractional_seconds_with_index.result b/storage/mroonga/mysql-test/mroonga/storage/r/column_time_fractional_seconds_with_index.result index 35434a00160..111fae77a04 100644 --- a/storage/mroonga/mysql-test/mroonga/storage/r/column_time_fractional_seconds_with_index.result +++ b/storage/mroonga/mysql-test/mroonga/storage/r/column_time_fractional_seconds_with_index.result @@ -25,6 +25,6 @@ id title average max SELECT * FROM running_records WHERE average BETWEEN "-838:59:59.000000" AND "01:00:00.000001"; id title average max -3 record failure -838:59:59.000000 -838:59:59.000000 1 normal condition 01:00:00.000001 01:05:00.000001 +3 record failure -838:59:59.000000 -838:59:59.000000 DROP TABLE running_records; diff --git a/storage/mroonga/mysql-test/mroonga/storage/r/column_time_with_index.result b/storage/mroonga/mysql-test/mroonga/storage/r/column_time_with_index.result index a0b0350a8e3..326c81958cc 100644 --- a/storage/mroonga/mysql-test/mroonga/storage/r/column_time_with_index.result +++ b/storage/mroonga/mysql-test/mroonga/storage/r/column_time_with_index.result @@ -25,6 +25,6 @@ id title average max SELECT * FROM running_records WHERE average BETWEEN "-838:59:59" AND "01:00:00"; id title average max -3 record failure -838:59:59 -838:59:59 1 normal condition 01:00:00 01:05:00 +3 record failure -838:59:59 -838:59:59 DROP TABLE running_records; diff --git a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result index 9b084e63cd5..0b5e512cdc9 100644 --- a/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result +++ b/storage/rocksdb/mysql-test/rocksdb/r/rocksdb.result @@ -2080,7 +2080,7 @@ explain select kp1,kp2 from t1 force index (kp1) where kp1 between '09:01:00' and '09:05:00'; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 range kp1 kp1 4 NULL # Using where; Using index +1 SIMPLE t1 index kp1 kp1 9 NULL # Using where; Using index select kp1,kp2 from t1 force index (kp1) where kp1 between '09:01:00' and '09:05:00'; kp1 kp2 @@ -2103,7 +2103,7 @@ explain select kp1,kp2 from t2 force index (kp1) where kp1 between '09:01:00' and '09:05:00'; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t2 range kp1 kp1 3 NULL # Using where; Using index +1 SIMPLE t2 index kp1 kp1 8 NULL # Using where; Using index select kp1,kp2 from t2 force index (kp1) where kp1 between '09:01:00' and '09:05:00'; kp1 kp2 From 42529c4341ab8d4d5b2b7982baa9431ccdce6860 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 12 Mar 2018 20:35:38 +0100 Subject: [PATCH 043/139] MDEV-15141 Check constraint validation on a datetime field crashes the process add the test case (the bug was fixed in d390e501eb8) --- mysql-test/r/check_constraint.result | 4 ++++ mysql-test/t/check_constraint.test | 8 ++++++++ 2 files changed, 12 insertions(+) diff --git a/mysql-test/r/check_constraint.result b/mysql-test/r/check_constraint.result index 525140d96e7..70d64cd6ff7 100644 --- a/mysql-test/r/check_constraint.result +++ b/mysql-test/r/check_constraint.result @@ -152,3 +152,7 @@ a 1 NULL drop table t1; +create table t1 (id int auto_increment primary key, datecol datetime, check (datecol>'0001-01-01 00:00:00')); +insert into t1 (datecol) values (now()); +insert into t1 (datecol) values (now()); +drop table t1; diff --git a/mysql-test/t/check_constraint.test b/mysql-test/t/check_constraint.test index f72ce38087e..9a77736acd7 100644 --- a/mysql-test/t/check_constraint.test +++ b/mysql-test/t/check_constraint.test @@ -103,3 +103,11 @@ insert t1 values (2); insert t1 values (NULL); select * from t1; drop table t1; + +# +# MDEV-15141 Check constraint validation on a datetime field crashes the process +# +create table t1 (id int auto_increment primary key, datecol datetime, check (datecol>'0001-01-01 00:00:00')); +insert into t1 (datecol) values (now()); +insert into t1 (datecol) values (now()); +drop table t1; From 27d94b7e03242b688632258b73727ed304a779f7 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 13 Mar 2018 11:23:06 +0100 Subject: [PATCH 044/139] cleanup: remove get_datetime_value() --- sql/item_cmpfunc.cc | 88 +++++++++++---------------------------------- sql/item_cmpfunc.h | 6 ---- 2 files changed, 20 insertions(+), 74 deletions(-) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index 42b94e4daee..bb71a11c986 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -745,46 +745,6 @@ static Item** cache_converted_constant(THD *thd, Item **value, } -/** - Retrieves correct DATETIME value from given item. - - @param[in] thd thread handle - @param[in,out] item_arg item to retrieve DATETIME value from - @param[in,out] cache_arg pointer to place to store the caching item to - @param[in] warn_item item for issuing the conversion warning - @param[out] is_null TRUE <=> the item_arg is null - - @details - Retrieves the correct DATETIME value from given item for comparison by the - compare_datetime() function. - - If the value should be compared as time (TIME_RESULT), it's retrieved as - MYSQL_TIME. Otherwise it's read as a number/string and converted to time. - Constant items are cached, so the convertion is only done once for them. - - Note the f_type behavior: if the item can be compared as time, then - f_type is this item's field_type(). Otherwise it's field_type() of - warn_item (which is the other operand of the comparison operator). - This logic provides correct string/number to date/time conversion - depending on the other operand (when comparing a string with a date, it's - parsed as a date, when comparing a string with a time it's parsed as a time) - - If the item is a constant it is replaced by the Item_cache_int, that - holds the packed datetime value. - - @return - MYSQL_TIME value, packed in a longlong, suitable for comparison. -*/ - -longlong get_datetime_value(Item *item, enum_field_types f_type, bool *is_null) -{ - longlong value= item->val_temporal_packed(f_type); - if ((*is_null= item->null_value)) - return ~(ulonglong) 0; - return value; -} - - /* Compare items values as dates. @@ -793,8 +753,7 @@ longlong get_datetime_value(Item *item, enum_field_types f_type, bool *is_null) DESCRIPTION Compare items values as DATE/DATETIME for both EQUAL_FUNC and from other - comparison functions. The correct DATETIME values are obtained - with help of the get_datetime_value() function. + comparison functions. RETURN -1 a < b or at least one item is null @@ -804,20 +763,19 @@ longlong get_datetime_value(Item *item, enum_field_types f_type, bool *is_null) int Arg_comparator::compare_temporal(enum_field_types type) { - bool a_is_null, b_is_null; longlong a_value, b_value; if (set_null) owner->null_value= 1; /* Get DATE/DATETIME/TIME value of the 'a' item. */ - a_value= get_datetime_value(*a, type, &a_is_null); - if (a_is_null) + a_value= (*a)->val_temporal_packed(type); + if ((*a)->null_value) return -1; /* Get DATE/DATETIME/TIME value of the 'b' item. */ - b_value= get_datetime_value(*b, type, &b_is_null); - if (b_is_null) + b_value= (*b)->val_temporal_packed(type); + if ((*b)->null_value) return -1; /* Here we have two not-NULL values. */ @@ -830,16 +788,15 @@ int Arg_comparator::compare_temporal(enum_field_types type) int Arg_comparator::compare_e_temporal(enum_field_types type) { - bool a_is_null, b_is_null; longlong a_value, b_value; /* Get DATE/DATETIME/TIME value of the 'a' item. */ - a_value= get_datetime_value(*a, type, &a_is_null); + a_value= (*a)->val_temporal_packed(type); /* Get DATE/DATETIME/TIME value of the 'b' item. */ - b_value= get_datetime_value(*b, type, &b_is_null); - return a_is_null || b_is_null ? a_is_null == b_is_null - : a_value == b_value; + b_value= (*b)->val_temporal_packed(type); + return (*a)->null_value || (*b)->null_value ? + (*a)->null_value == (*b)->null_value : a_value == b_value; } int Arg_comparator::compare_string() @@ -2163,9 +2120,7 @@ void Item_func_between::fix_length_and_dec() /* When comparing as date/time, we need to convert non-temporal values - (e.g. strings) to MYSQL_TIME. get_datetime_value() does it - automatically when one of the operands is a date/time. But here we - may need to compare two strings as dates (str1 BETWEEN str2 AND date). + (e.g. strings) to MYSQL_TIME. For this to work, we need to know what date/time type we compare strings as. */ @@ -2197,23 +2152,22 @@ longlong Item_func_between::val_int() case TIME_RESULT: { longlong value, a, b; - bool value_is_null, a_is_null, b_is_null; enum_field_types f_type= field_type_for_temporal_comparison(compare_as_dates); - value= get_datetime_value(args[0], f_type, &value_is_null); + value= args[0]->val_temporal_packed(f_type); - if ((null_value= value_is_null)) + if ((null_value= args[0]->null_value)) return 0; - a= get_datetime_value(args[1], f_type, &a_is_null); - b= get_datetime_value(args[2], f_type, &b_is_null); + a= args[1]->val_temporal_packed(f_type); + b= args[2]->val_temporal_packed(f_type); - if (!a_is_null && !b_is_null) + if (!args[1]->null_value && !args[2]->null_value) return (longlong) ((value >= a && value <= b) != negated); - if (a_is_null && b_is_null) + if (args[1]->null_value && args[2]->null_value) null_value=1; - else if (a_is_null) - null_value= value <= b; // not null if false range. + else if (args[1]->null_value) + null_value= value <= b; // not null if false range. else null_value= value >= a; break; @@ -3777,9 +3731,8 @@ void in_datetime::set(uint pos,Item *item) uchar *in_datetime::get_value(Item *item) { - bool is_null; enum_field_types f_type= item->field_type_for_temporal_comparison(warn_item); - tmp.val= get_datetime_value(item, f_type, &is_null); + tmp.val= item->val_temporal_packed(f_type); if (item->null_value) return 0; tmp.unsigned_flag= 1L; @@ -4040,9 +3993,8 @@ cmp_item* cmp_item_decimal::make_same() void cmp_item_datetime::store_value(Item *item) { - bool is_null; enum_field_types f_type= item->field_type_for_temporal_comparison(warn_item); - value= get_datetime_value(item, f_type, &is_null); + value= item->val_temporal_packed(f_type); m_null_value= item->null_value; } diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 82bfd11a94d..ced3f343707 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1266,7 +1266,6 @@ public: /* Class to represent a vector of constant DATE/DATETIME values. - Values are obtained with help of the get_datetime_value() function. */ class in_datetime :public in_longlong { @@ -1436,7 +1435,6 @@ public: /* Compare items in the DATETIME context. - Values are obtained with help of the get_datetime_value() function. */ class cmp_item_datetime : public cmp_item_scalar { @@ -2614,10 +2612,6 @@ inline bool is_cond_or(Item *item) Item *and_expressions(Item *a, Item *b, Item **org_item); -longlong get_datetime_value(Item ***item_arg, Item **cache_arg, - enum_field_types f_type, bool *is_null); - - class Comp_creator { public: From 0943b33de3daa0fcbf58803be8e991941de63218 Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Wed, 14 Mar 2018 14:35:27 +0000 Subject: [PATCH 045/139] MDEV-12190 YASSL isn't able to negotiate TLS version correctly Backport from 10.2 --- extra/yassl/src/handshake.cpp | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/extra/yassl/src/handshake.cpp b/extra/yassl/src/handshake.cpp index aa2de39333c..bb8e3791552 100644 --- a/extra/yassl/src/handshake.cpp +++ b/extra/yassl/src/handshake.cpp @@ -787,6 +787,16 @@ int DoProcessReply(SSL& ssl) needHdr = true; else { buffer >> hdr; + /* + According to RFC 4346 (see "7.4.1.3. Server Hello"), the Server Hello + packet needs to specify the highest supported TLS version, but not + higher than what client requests. YaSSL highest supported version is + TLSv1.1 (=3.2) - if the client requests a higher version, downgrade it + here to 3.2. + See also Appendix E of RFC 5246 (TLS 1.2) + */ + if (hdr.version_.major_ == 3 && hdr.version_.minor_ > 2) + hdr.version_.minor_ = 2; ssl.verifyState(hdr); } From efb9dec2b9f7dc60b2b45bf9d368c665c9132795 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Thu, 15 Mar 2018 10:23:42 +0400 Subject: [PATCH 046/139] A 32bit cleanup for MDEV-14452 Precision in INTERVAL xxx DAY_MICROSECOND parsed wrong? "mtr func_date_add" failed on 32-bit platforms. Removing a wrong case to "long". Both values[] and log_10_int[] are arrays of "ulonglong", no cast is needed. --- sql/item_timefunc.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/item_timefunc.cc b/sql/item_timefunc.cc index d7506026c62..44105bd4a12 100644 --- a/sql/item_timefunc.cc +++ b/sql/item_timefunc.cc @@ -732,9 +732,9 @@ static bool get_interval_info(const char *str,uint length,CHARSET_INFO *cs, if (transform_msec && field_length > 0) { if (field_length < 6) - values[count - 1] *= (long) log_10_int[6 - field_length]; + values[count - 1] *= log_10_int[6 - field_length]; else if (field_length > 6) - values[count - 1] /= (long) log_10_int[field_length - 6]; + values[count - 1] /= log_10_int[field_length - 6]; } return (str != end); From 8b54c314863e7e9861470844e09c0453800748ae Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Wed, 14 Mar 2018 13:31:28 +1100 Subject: [PATCH 047/139] MDEV-8743: where O_CLOEXEC is available, use for innodb buf_dump As this is the only moderately critical fopened for writing file, create an alternate path to use open and fdopen for non-glibc platforms that support O_CLOEXEC (BSDs). Tested on Linux (by modifing the GLIBC defination) to take this alternate path: $ cd /proc/23874 $ more fdinfo/71 pos: 0 flags: 02100001 mnt_id: 24 $ ls -la fd/71 l-wx------. 1 dan dan 64 Mar 14 13:30 fd/71 -> /dev/shm/var_auto_i7rl/mysqld.1/data/ib_buffer_pool.incomplete --- include/my_global.h | 5 +++++ storage/innobase/buf/buf0dump.cc | 15 ++++++++++++++- storage/xtradb/buf/buf0dump.cc | 15 ++++++++++++++- 3 files changed, 33 insertions(+), 2 deletions(-) diff --git a/include/my_global.h b/include/my_global.h index 5ea761d587e..ad1527d7b37 100644 --- a/include/my_global.h +++ b/include/my_global.h @@ -601,6 +601,11 @@ typedef SOCKET_SIZE_TYPE size_socket; #ifndef O_CLOEXEC #define O_CLOEXEC 0 #endif +#ifdef __GLIBC__ +#define STR_O_CLOEXEC "e" +#else +#define STR_O_CLOEXEC "" +#endif #ifndef SOCK_CLOEXEC #define SOCK_CLOEXEC 0 #endif diff --git a/storage/innobase/buf/buf0dump.cc b/storage/innobase/buf/buf0dump.cc index 5d5be0505f6..306dcc2ec18 100644 --- a/storage/innobase/buf/buf0dump.cc +++ b/storage/innobase/buf/buf0dump.cc @@ -220,7 +220,20 @@ buf_dump( buf_dump_status(STATUS_NOTICE, "Dumping buffer pool(s) to %s", full_filename); - f = fopen(tmp_filename, "w"); +#if defined(__GLIBC__) || defined(__WIN__) || O_CLOEXEC == 0 + f = fopen(tmp_filename, "w" STR_O_CLOEXEC); +#else + { + int fd; + fd = open(tmp_filename, O_CREAT | O_TRUNC | O_CLOEXEC | O_WRONLY, 0640); + if (fd >= 0) { + f = fdopen(fd, "w"); + } + else { + f = NULL; + } + } +#endif if (f == NULL) { buf_dump_status(STATUS_ERR, "Cannot open '%s' for writing: %s", diff --git a/storage/xtradb/buf/buf0dump.cc b/storage/xtradb/buf/buf0dump.cc index 09ac460f865..0bc222d2cc2 100644 --- a/storage/xtradb/buf/buf0dump.cc +++ b/storage/xtradb/buf/buf0dump.cc @@ -220,7 +220,20 @@ buf_dump( buf_dump_status(STATUS_NOTICE, "Dumping buffer pool(s) to %s", full_filename); - f = fopen(tmp_filename, "w"); +#if defined(__GLIBC__) || defined(__WIN__) || O_CLOEXEC == 0 + f = fopen(tmp_filename, "w" STR_O_CLOEXEC); +#else + { + int fd; + fd = open(tmp_filename, O_CREAT | O_TRUNC | O_CLOEXEC | O_WRONLY, 0640); + if (fd >= 0) { + f = fdopen(fd, "w"); + } + else { + f = NULL; + } + } +#endif if (f == NULL) { buf_dump_status(STATUS_ERR, "Cannot open '%s' for writing: %s", From 0368e75a55abfc5fd7188d8a8e26d539d4cf274d Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Thu, 15 Mar 2018 11:36:36 +0100 Subject: [PATCH 048/139] MDEV-13549 Re-enable tests MW-328A, MW-328B, MW-328C and MW-329 These tests were also subject to error "Wrong usage of mutex 'LOCK_wsrep_thd' and 'LOCK_thd_kill'". --- mysql-test/suite/galera/disabled.def | 5 ----- 1 file changed, 5 deletions(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index d61da97355d..e70ab112df9 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -13,7 +13,6 @@ MW-336 : MDEV-13549 Galera test failures galera_gra_log : MDEV-13549 Galera test failures galera_flush_local : MDEV-13549 Galera test failures galera_flush : MDEV-13549 Galera test failures -MW-329 : MDEV-13549 Galera test failures galera_account_management : MariaDB 10.0 does not support ALTER USER galera_binlog_row_image : MariaDB 10.0 does not support binlog_row_image galera_binlog_rows_query_log_events: MariaDB does not support binlog_rows_query_log_events @@ -53,10 +52,6 @@ galera_pc_ignore_sb : MDEV-13549 Galera test failures 10.1 galera_lock_table : MDEV-13549 Galera test failures 10.1 MW-284 : MDEV-13549 Galera test failures 10.1 galera_as_slave : MDEV-13549 Galera test failures 10.1 -MW-328C : MDEV-13549 Galera test failures 10.1 -MW-328A : MDEV-13549 Galera test failures 10.1 -MW-328B : MDEV-13549 Galera test failures 10.1 -MW-328 : MDEV-13549 Galera test failures 10.1 galera_suspend_slave : MDEV-13549 Galera test failures 10.1 galera_gtid : MDEV-13549 Galera test failures 10.1 galera_gtid_slave : MDEV-13549 Galera test failures 10.1 From 8c8028ca68fa7150f75707f31b9109fb46f92d80 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Thu, 15 Mar 2018 19:15:09 +0530 Subject: [PATCH 049/139] MDEV-15384 buf_flush_LRU_list_batch() always reports n->flushed=0, n->evicted=0 - Work around for MDEV-13942: Drop spatial index to avoid possible hang --- .../suite/innodb/r/purge_secondary.result | 14 +++++++++----- mysql-test/suite/innodb/t/purge_secondary.test | 17 ++++++++++++----- 2 files changed, 21 insertions(+), 10 deletions(-) diff --git a/mysql-test/suite/innodb/r/purge_secondary.result b/mysql-test/suite/innodb/r/purge_secondary.result index eff5e71c9cc..8550fe0cdb1 100644 --- a/mysql-test/suite/innodb/r/purge_secondary.result +++ b/mysql-test/suite/innodb/r/purge_secondary.result @@ -99,7 +99,7 @@ l LINESTRING NOT NULL DEFAULT ST_linefromtext('linestring(448 -689, 9716 9648,9720 9650,9721 9648,9723 9648,9726 4648,12726 4653,12731 4655, 12734 4660,12730 4661,12733 4664,12733 4665,12735 4670,12737 4674,12741 4674, 12738 4675,12740 4675,12737 4675,12742 4678,12743 4681,12746 4677)'), -INDEX(b,c), SPATIAL INDEX(l) +INDEX(b,c), SPATIAL INDEX `sidx`(l) ) ENGINE=InnoDB ROW_FORMAT=REDUNDANT; INSERT INTO t1 () VALUES (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); SELECT LENGTH(l) FROM t1; @@ -123,10 +123,6 @@ LENGTH(l) 11197 INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; -INSERT INTO t1 (a) SELECT NULL FROM t1; -INSERT INTO t1 (a) SELECT NULL FROM t1; -INSERT INTO t1 (a) SELECT NULL FROM t1; -INSERT INTO t1 (a) SELECT NULL FROM t1; CHECK TABLE t1; Table Op Msg_type Msg_text test.t1 check status OK @@ -143,6 +139,14 @@ SELECT OTHER_INDEX_SIZE FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS WHERE NAME='test/t1'; OTHER_INDEX_SIZE 1 +ALTER TABLE t1 DROP INDEX `sidx`; +INSERT INTO t1 () VALUES (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE NAME="buffer_LRU_batch_evict_total_pages" AND COUNT > 0; NAME SUBSYSTEM diff --git a/mysql-test/suite/innodb/t/purge_secondary.test b/mysql-test/suite/innodb/t/purge_secondary.test index e447e63389e..9f77fba117c 100644 --- a/mysql-test/suite/innodb/t/purge_secondary.test +++ b/mysql-test/suite/innodb/t/purge_secondary.test @@ -103,16 +103,12 @@ CREATE TABLE t1 ( 9716 9648,9720 9650,9721 9648,9723 9648,9726 4648,12726 4653,12731 4655, 12734 4660,12730 4661,12733 4664,12733 4665,12735 4670,12737 4674,12741 4674, 12738 4675,12740 4675,12737 4675,12742 4678,12743 4681,12746 4677)'), - INDEX(b,c), SPATIAL INDEX(l) + INDEX(b,c), SPATIAL INDEX `sidx`(l) ) ENGINE=InnoDB ROW_FORMAT=REDUNDANT; INSERT INTO t1 () VALUES (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); SELECT LENGTH(l) FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; -INSERT INTO t1 (a) SELECT NULL FROM t1; -INSERT INTO t1 (a) SELECT NULL FROM t1; -INSERT INTO t1 (a) SELECT NULL FROM t1; -INSERT INTO t1 (a) SELECT NULL FROM t1; CHECK TABLE t1; UPDATE t1 SET c=true, l=ST_linefromtext('linestring(0 0,1 1,2 2)'); DELETE FROM t1; @@ -124,6 +120,17 @@ ANALYZE TABLE t1; SELECT OTHER_INDEX_SIZE FROM INFORMATION_SCHEMA.INNODB_SYS_TABLESTATS WHERE NAME='test/t1'; +# Work around MDEV-13942, Dropping the spatial index to avoid the possible hang +ALTER TABLE t1 DROP INDEX `sidx`; + +INSERT INTO t1 () VALUES (),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(),(); +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; + SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE NAME="buffer_LRU_batch_evict_total_pages" AND COUNT > 0; From ba6cf25396641aca657e0af794ef980e71d0bfa9 Mon Sep 17 00:00:00 2001 From: Daniele Sciascia Date: Thu, 15 Mar 2018 16:03:25 +0100 Subject: [PATCH 050/139] MDEV-13549 Fix and re-enable test galera.galera_var_slave_threads Two changes were made to the test: 1) Suppress warning "Refusing exit for the last slave thread." This warning was already suppressed, but on the wrong node. 2) The test occasionally fails because it expects that the number of applier threads changes immediately after changing the value of ```variable wsrep_slave_threads```. Which is not true. This patch turns snippets like this: ``` SET GLOBAL wsrep_slave_threads = x; SELECT COUNT(*) = x FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; ``` Into proper wait_conditions: ``` SET GLOBAL wsrep_slave_threads = x; let $wait_condition = SELECT COUNT(*) = x FROM ...; --source include/wait_condition.inc ``` --- mysql-test/suite/galera/disabled.def | 1 - .../galera/r/galera_var_slave_threads.result | 14 +++++----- .../galera/t/galera_var_slave_threads.test | 28 ++++++++++++++----- 3 files changed, 28 insertions(+), 15 deletions(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index d82d5dd2023..27ae4af1ae8 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -20,7 +20,6 @@ galera_binlog_rows_query_log_events: MariaDB does not support binlog_rows_query_ GAL-419 : MDEV-13549 Galera test failures galera_toi_ddl_fk_insert : MDEV-13549 Galera test failures galera_var_notify_cmd : MDEV-13549 Galera test failures -galera_var_slave_threads : MDEV-13549 Galera test failures mysql-wsrep#90 : MDEV-13549 Galera test failures galera_as_master_gtid : Requires MySQL GTID galera_as_master_gtid_change_master : Requires MySQL GTID diff --git a/mysql-test/suite/galera/r/galera_var_slave_threads.result b/mysql-test/suite/galera/r/galera_var_slave_threads.result index 2340d25d160..89026bce907 100644 --- a/mysql-test/suite/galera/r/galera_var_slave_threads.result +++ b/mysql-test/suite/galera/r/galera_var_slave_threads.result @@ -1,6 +1,6 @@ -CALL mtr.add_suppression("WSREP: Refusing exit for the last slave thread."); CREATE TABLE t1 (f1 INT PRIMARY KEY) Engine=InnoDB; CREATE TABLE t2 (f1 INT AUTO_INCREMENT PRIMARY KEY) Engine=InnoDB; +CALL mtr.add_suppression("WSREP: Refusing exit for the last slave thread."); SET GLOBAL wsrep_slave_threads = 0; Warnings: Warning 1292 Truncated incorrect wsrep_slave_threads value: '0' @@ -22,9 +22,6 @@ INSERT INTO t1 VALUES (1); SELECT COUNT(*) = 1 FROM t1; COUNT(*) = 1 1 -SELECT COUNT(*) = @@wsrep_slave_threads + 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; -COUNT(*) = @@wsrep_slave_threads + 1 -1 SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%'; COUNT(*) = 1 1 @@ -96,9 +93,6 @@ INSERT INTO t2 VALUES (DEFAULT); SELECT COUNT(*) = 64 FROM t2; COUNT(*) = 64 1 -SELECT COUNT(*) = @@wsrep_slave_threads + 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; -COUNT(*) = @@wsrep_slave_threads + 1 -1 SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%'; COUNT(*) = 1 1 @@ -111,5 +105,11 @@ DROP TABLE t2; CREATE TABLE t1 (i INT AUTO_INCREMENT PRIMARY KEY) ENGINE=INNODB; SET GLOBAL wsrep_slave_threads = 4; SET GLOBAL wsrep_slave_threads = 1; +INSERT INTO t1 VALUES (DEFAULT); +INSERT INTO t1 VALUES (DEFAULT); +INSERT INTO t1 VALUES (DEFAULT); DROP TABLE t1; +SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%'; +COUNT(*) = 1 +1 # End of tests diff --git a/mysql-test/suite/galera/t/galera_var_slave_threads.test b/mysql-test/suite/galera/t/galera_var_slave_threads.test index 5e56800c5d6..5c775e67450 100644 --- a/mysql-test/suite/galera/t/galera_var_slave_threads.test +++ b/mysql-test/suite/galera/t/galera_var_slave_threads.test @@ -6,7 +6,6 @@ --source include/galera_cluster.inc --source include/have_innodb.inc -CALL mtr.add_suppression("WSREP: Refusing exit for the last slave thread."); --let $wsrep_slave_threads_orig = `SELECT @@wsrep_slave_threads` --connection node_1 @@ -14,7 +13,7 @@ CREATE TABLE t1 (f1 INT PRIMARY KEY) Engine=InnoDB; CREATE TABLE t2 (f1 INT AUTO_INCREMENT PRIMARY KEY) Engine=InnoDB; --connection node_2 - +CALL mtr.add_suppression("WSREP: Refusing exit for the last slave thread."); # Setting wsrep_slave_threads to zero triggers a warning SET GLOBAL wsrep_slave_threads = 0; SHOW WARNINGS; @@ -30,15 +29,14 @@ SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system use # SET GLOBAL wsrep_slave_threads = 64; ---sleep 0.5 --connection node_1 INSERT INTO t1 VALUES (1); --connection node_2 SELECT COUNT(*) = 1 FROM t1; - -SELECT COUNT(*) = @@wsrep_slave_threads + 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; +--let $wait_condition = SELECT COUNT(*) = @@wsrep_slave_threads + 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; +--source include/wait_condition.inc SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%'; # @@ -59,8 +57,8 @@ while ($count) --connection node_2 SELECT COUNT(*) = 64 FROM t2; - -SELECT COUNT(*) = @@wsrep_slave_threads + 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; +--let $wait_condition = SELECT COUNT(*) = @@wsrep_slave_threads + 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user'; +--source include/wait_condition.inc SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%'; @@ -78,7 +76,23 @@ CREATE TABLE t1 (i INT AUTO_INCREMENT PRIMARY KEY) ENGINE=INNODB; --connection node_2 SET GLOBAL wsrep_slave_threads = 4; +--let $wait_condition = SELECT COUNT(*) = @@wsrep_slave_threads + 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' +--source include/wait_condition.inc + SET GLOBAL wsrep_slave_threads = 1; + +--connection node_1 +INSERT INTO t1 VALUES (DEFAULT); +INSERT INTO t1 VALUES (DEFAULT); +INSERT INTO t1 VALUES (DEFAULT); DROP TABLE t1; +--connection node_2 +# +# make sure that we are left with exactly one applier thread before we leaving the test +# +--let $wait_condition = SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' +--source include/wait_condition.inc +SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE '%wsrep aborter%'; + --echo # End of tests From a54abf01753a69c2186d60c155212149be59a7a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 13 Mar 2018 14:00:45 +0200 Subject: [PATCH 051/139] innobase_kill_query(): Use lock_trx_handle_wait() The caller of THD::awake() should never hold any InnoDB mutexes, so we can always acquire lock_sys->mutex and trx->mutex. --- storage/innobase/handler/ha_innodb.cc | 70 ++++------------------- storage/xtradb/handler/ha_innodb.cc | 82 +++++---------------------- 2 files changed, 24 insertions(+), 128 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 4f76c568f17..521ac005437 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1199,7 +1199,9 @@ innobase_close_connection( THD* thd); /*!< in: MySQL thread handle for which to close the connection */ -static void innobase_kill_query(handlerton *hton, THD* thd, enum thd_kill_levels level); +/** Cancel any pending lock request associated with the current THD. +@sa THD::awake() @sa ha_kill_query() */ +static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels); static void innobase_commit_ordered(handlerton *hton, THD* thd, bool all); /*****************************************************************//** @@ -4891,21 +4893,11 @@ innobase_close_thd( UNIV_INTERN void lock_cancel_waiting_and_release(lock_t* lock); -/*****************************************************************//** -Cancel any pending lock request associated with the current THD. */ -static -void -innobase_kill_query( -/*======================*/ - handlerton* hton, /*!< in: innobase handlerton */ - THD* thd, /*!< in: MySQL thread being killed */ - enum thd_kill_levels level) /*!< in: kill level */ +/** Cancel any pending lock request associated with the current THD. +@sa THD::awake() @sa ha_kill_query() */ +static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) { - trx_t* trx; - DBUG_ENTER("innobase_kill_query"); - DBUG_ASSERT(hton == innodb_hton_ptr); - #ifdef WITH_WSREP wsrep_thd_LOCK(thd); if (wsrep_thd_get_conflict_state(thd) != NO_CONFLICT) { @@ -4920,51 +4912,11 @@ innobase_kill_query( } wsrep_thd_UNLOCK(thd); #endif /* WITH_WSREP */ - trx = thd_to_trx(thd); - if (trx && trx->lock.wait_lock) { - /* In wsrep BF we have already took lock_sys and trx - mutex either on wsrep_abort_transaction() or - before wsrep_kill_victim(). In replication we - could own lock_sys mutex taken in - lock_deadlock_check_and_resolve(). */ - - WSREP_DEBUG("Killing victim trx %p BF %d trx BF %d trx_id " TRX_ID_FMT " ABORT %d thd %p" - " current_thd %p BF %d wait_lock_modes: %s\n", - trx, wsrep_thd_is_BF(trx->mysql_thd, FALSE), - wsrep_thd_is_BF(thd, FALSE), - trx->id, trx->abort_type, - trx->mysql_thd, - current_thd, - wsrep_thd_is_BF(current_thd, FALSE), - lock_get_info(trx->lock.wait_lock).c_str()); - - if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE) - && trx->abort_type == TRX_SERVER_ABORT) { - ut_ad(!lock_mutex_own()); - lock_mutex_enter(); - } - - if (trx->abort_type != TRX_WSREP_ABORT) { - trx_mutex_enter(trx); - } - - ut_ad(lock_mutex_own()); - ut_ad(trx_mutex_own(trx)); - - /* Cancel a pending lock request. */ - if (trx->lock.wait_lock) { - lock_cancel_waiting_and_release(trx->lock.wait_lock); - } - - if (trx->abort_type != TRX_WSREP_ABORT) { - trx_mutex_exit(trx); - } - - if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE) && - trx->abort_type == TRX_SERVER_ABORT) { - lock_mutex_exit(); - } + if (trx_t* trx = thd_to_trx(thd)) { + ut_ad(trx->mysql_thd == thd); + /* Cancel a pending lock request if there are any */ + lock_trx_handle_wait(trx); } DBUG_VOID_RETURN; @@ -18759,7 +18711,7 @@ wsrep_innobase_kill_one_trx( thd_get_thread_id(thd))); WSREP_DEBUG("kill query for: %ld", thd_get_thread_id(thd)); - /* Note that innobase_kill_connection will take lock_mutex + /* Note that innobase_kill_query will take lock_mutex and trx_mutex */ wsrep_thd_UNLOCK(thd); wsrep_thd_awake(thd, signal); diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 9749bcecfe9..d0a98eda0de 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -1423,19 +1423,12 @@ innobase_close_connection( THD* thd); /*!< in: MySQL thread handle for which to close the connection */ +/** Cancel any pending lock request associated with the current THD. +@sa THD::awake() @sa ha_kill_query() */ +static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels); static void innobase_commit_ordered(handlerton *hton, THD* thd, bool all); static void innobase_checkpoint_request(handlerton *hton, void *cookie); -/*****************************************************************//** -Cancel any pending lock request associated with the current THD. */ -static -void -innobase_kill_connection( -/*======================*/ - handlerton* hton, /*!< in: innobase handlerton */ - THD* thd, /*!< in: handle to the MySQL thread being killed */ - thd_kill_levels); - /*****************************************************************//** Commits a transaction in an InnoDB database or marks an SQL statement ended. @@ -3886,7 +3879,7 @@ innobase_init( innobase_hton->release_temporary_latches = innobase_release_temporary_latches; - innobase_hton->kill_query = innobase_kill_connection; + innobase_hton->kill_query = innobase_kill_query; if (srv_file_per_table) innobase_hton->tablefile_extensions = ha_innobase_exts; @@ -5496,20 +5489,11 @@ ha_innobase::get_row_type() const return(ROW_TYPE_NOT_USED); } -/*****************************************************************//** -Cancel any pending lock request associated with the current THD. */ -static -void -innobase_kill_connection( -/*======================*/ - handlerton* hton, /*!< in: innobase handlerton */ - THD* thd, /*!< in: handle to the MySQL thread being killed */ - thd_kill_levels) +/** Cancel any pending lock request associated with the current THD. +@sa THD::awake() @sa ha_kill_query() */ +static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) { - trx_t* trx; - - DBUG_ENTER("innobase_kill_connection"); - DBUG_ASSERT(hton == innodb_hton_ptr); + DBUG_ENTER("innobase_kill_query"); #ifdef WITH_WSREP wsrep_thd_LOCK(thd); @@ -5525,50 +5509,10 @@ innobase_kill_connection( } wsrep_thd_UNLOCK(thd); #endif /* WITH_WSREP */ - trx = thd_to_trx(thd); - - if (trx && trx->lock.wait_lock) { - /* In wsrep BF we have already took lock_sys and trx - mutex either on wsrep_abort_transaction() or - before wsrep_kill_victim(). In replication we - could own lock_sys mutex taken in - lock_deadlock_check_and_resolve().*/ - - WSREP_DEBUG("Killing victim trx %p BF %d trx BF %d trx_id " TRX_ID_FMT " ABORT %d thd %p" - " current_thd %p BF %d wait_lock_modes: %s\n", - trx, wsrep_thd_is_BF(trx->mysql_thd, FALSE), - wsrep_thd_is_BF(thd, FALSE), - trx->id, trx->abort_type, - trx->mysql_thd, - current_thd, - wsrep_thd_is_BF(current_thd, FALSE), - lock_get_info(trx->lock.wait_lock).c_str()); - - if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE) - && trx->abort_type == TRX_SERVER_ABORT) { - ut_ad(!lock_mutex_own()); - lock_mutex_enter(); - } - - if (trx->abort_type != TRX_WSREP_ABORT) { - trx_mutex_enter(trx); - } - - ut_ad(lock_mutex_own()); - ut_ad(trx_mutex_own(trx)); - - if (trx->lock.wait_lock) { - lock_cancel_waiting_and_release(trx->lock.wait_lock); - } - - if (trx->abort_type != TRX_WSREP_ABORT) { - trx_mutex_exit(trx); - } - - if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE) && - trx->abort_type == TRX_SERVER_ABORT) { - lock_mutex_exit(); - } + if (trx_t* trx = thd_to_trx(thd)) { + ut_ad(trx->mysql_thd == thd); + /* Cancel a pending lock request if there are any */ + lock_trx_handle_wait(trx); } DBUG_VOID_RETURN; @@ -19802,7 +19746,7 @@ wsrep_innobase_kill_one_trx( thd_get_thread_id(thd))); WSREP_DEBUG("kill query for: %ld", thd_get_thread_id(thd)); - /* Note that innobase_kill_connection will take lock_mutex + /* Note that innobase_kill_query will take lock_mutex and trx_mutex */ wsrep_thd_UNLOCK(thd); wsrep_thd_awake(thd, signal); From 723f87e9d318aedad30dfb9dde104312d6612662 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 14 Mar 2018 09:39:47 +0200 Subject: [PATCH 052/139] lock_table_create(), lock_rec_create(): Clean up the WSREP code By definition, c_lock->trx->lock.wait_lock==c_lock cannot hold. That is, the owner transaction of a lock cannot be waiting for that particular lock. It must have been waiting for some other lock. Remove the dead code related to that. Also, test c_lock for NULLness only once. --- storage/innobase/lock/lock0lock.cc | 43 +++++++----------------------- storage/xtradb/lock/lock0lock.cc | 43 +++++++----------------------- 2 files changed, 20 insertions(+), 66 deletions(-) diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index eb3d260e78a..68b19a6d2b5 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -898,7 +898,7 @@ UNIV_INLINE void lock_reset_lock_and_trx_wait( /*=========================*/ - lock_t* lock) /*!< in/out: record lock */ + lock_t* lock) /*!< in/out: record lock */ { ut_ad(lock_get_wait(lock)); ut_ad(lock_mutex_own()); @@ -2216,13 +2216,6 @@ lock_rec_create( trx_mutex_enter(trx); } - /* trx might not wait for c_lock, but some other lock - does not matter if wait_lock was released above - */ - if (c_lock->trx->lock.wait_lock == c_lock) { - lock_reset_lock_and_trx_wait(lock); - } - trx_mutex_exit(c_lock->trx); if (wsrep_debug) { @@ -4970,19 +4963,18 @@ lock_table_create( UT_LIST_ADD_LAST(trx_locks, trx->lock.trx_locks, lock); #ifdef WITH_WSREP - if (wsrep_thd_is_wsrep(trx->mysql_thd)) { - if (c_lock && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + if (c_lock) { + if (wsrep_thd_is_wsrep(trx->mysql_thd) + && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { UT_LIST_INSERT_AFTER( un_member.tab_lock.locks, table->locks, c_lock, lock); } else { UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock); } - if (c_lock) { - trx_mutex_enter(c_lock->trx); - } + trx_mutex_enter(c_lock->trx); - if (c_lock && c_lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { + if (c_lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { c_lock->trx->lock.was_chosen_as_deadlock_victim = TRUE; @@ -4991,36 +4983,21 @@ lock_table_create( wsrep_print_wait_locks(c_lock->trx->lock.wait_lock); } - /* have to release trx mutex for the duration of - victim lock release. This will eventually call - lock_grant, which wants to grant trx mutex again - */ - /* caller has trx_mutex, have to release for lock cancel */ + /* The lock release will call lock_grant(), + which would acquire trx->mutex again. */ trx_mutex_exit(trx); lock_cancel_waiting_and_release(c_lock->trx->lock.wait_lock); trx_mutex_enter(trx); - /* trx might not wait for c_lock, but some other lock - does not matter if wait_lock was released above - */ - if (c_lock->trx->lock.wait_lock == c_lock) { - lock_reset_lock_and_trx_wait(lock); - } - if (wsrep_debug) { fprintf(stderr, "WSREP: c_lock canceled " TRX_ID_FMT "\n", c_lock->trx->id); } } - if (c_lock) { - trx_mutex_exit(c_lock->trx); - } - } else { + trx_mutex_exit(c_lock->trx); + } else #endif /* WITH_WSREP */ UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock); -#ifdef WITH_WSREP - } -#endif /* WITH_WSREP */ if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) { diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc index b50452c1d5d..e2b9a1671ca 100644 --- a/storage/xtradb/lock/lock0lock.cc +++ b/storage/xtradb/lock/lock0lock.cc @@ -910,7 +910,7 @@ UNIV_INLINE void lock_reset_lock_and_trx_wait( /*=========================*/ - lock_t* lock) /*!< in/out: record lock */ + lock_t* lock) /*!< in/out: record lock */ { ut_ad(lock_get_wait(lock)); ut_ad(lock_mutex_own()); @@ -2358,13 +2358,6 @@ lock_rec_create( trx_mutex_enter(trx); } - /* trx might not wait for c_lock, but some other lock - does not matter if wait_lock was released above - */ - if (c_lock->trx->lock.wait_lock == c_lock) { - lock_reset_lock_and_trx_wait(lock); - } - trx_mutex_exit(c_lock->trx); if (wsrep_debug) { @@ -5010,19 +5003,18 @@ lock_table_create( UT_LIST_ADD_LAST(trx_locks, trx->lock.trx_locks, lock); #ifdef WITH_WSREP - if (wsrep_thd_is_wsrep(trx->mysql_thd)) { - if (c_lock && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + if (c_lock) { + if (wsrep_thd_is_wsrep(trx->mysql_thd) + && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { UT_LIST_INSERT_AFTER( un_member.tab_lock.locks, table->locks, c_lock, lock); } else { UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock); } - if (c_lock) { - trx_mutex_enter(c_lock->trx); - } + trx_mutex_enter(c_lock->trx); - if (c_lock && c_lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { + if (c_lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { c_lock->trx->lock.was_chosen_as_deadlock_victim = TRUE; @@ -5031,36 +5023,21 @@ lock_table_create( wsrep_print_wait_locks(c_lock->trx->lock.wait_lock); } - /* have to release trx mutex for the duration of - victim lock release. This will eventually call - lock_grant, which wants to grant trx mutex again - */ - /* caller has trx_mutex, have to release for lock cancel */ + /* The lock release will call lock_grant(), + which would acquire trx->mutex again. */ trx_mutex_exit(trx); lock_cancel_waiting_and_release(c_lock->trx->lock.wait_lock); trx_mutex_enter(trx); - /* trx might not wait for c_lock, but some other lock - does not matter if wait_lock was released above - */ - if (c_lock->trx->lock.wait_lock == c_lock) { - lock_reset_lock_and_trx_wait(lock); - } - if (wsrep_debug) { fprintf(stderr, "WSREP: c_lock canceled " TRX_ID_FMT "\n", c_lock->trx->id); } } - if (c_lock) { - trx_mutex_exit(c_lock->trx); - } - } else { + trx_mutex_exit(c_lock->trx); + } else #endif /* WITH_WSREP */ UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock); -#ifdef WITH_WSREP - } -#endif /* WITH_WSREP */ if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) { From dbb3960ad8d92f3cc17527440b059bf80bf83120 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 15 Mar 2018 19:48:29 +0200 Subject: [PATCH 053/139] Follow-up to MDEV-11236/MDEV-14846 debug assertion ha_innobase::unlock_row(): Use a relaxed version of the trx_state_eq() debug assertion, because rr_unlock_row() may be invoked after an error has been already reported and the transaction has been rolled back. --- storage/innobase/handler/ha_innodb.cc | 2 +- storage/xtradb/handler/ha_innodb.cc | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 521ac005437..9c3b04b2a67 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -9040,7 +9040,7 @@ ha_innobase::unlock_row(void) But there are some calls to this function from the SQL layer when the transaction is in state TRX_STATE_NOT_STARTED. The check on prebuilt->select_lock_type above gets around this issue. */ - ut_ad(trx_state_eq(prebuilt->trx, TRX_STATE_ACTIVE)); + ut_ad(trx_state_eq(prebuilt->trx, TRX_STATE_ACTIVE, true)); switch (prebuilt->row_read_type) { case ROW_READ_WITH_LOCKS: diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index d0a98eda0de..347b235b049 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -9616,7 +9616,7 @@ ha_innobase::unlock_row(void) But there are some calls to this function from the SQL layer when the transaction is in state TRX_STATE_NOT_STARTED. The check on prebuilt->select_lock_type above gets around this issue. */ - ut_ad(trx_state_eq(prebuilt->trx, TRX_STATE_ACTIVE)); + ut_ad(trx_state_eq(prebuilt->trx, TRX_STATE_ACTIVE, true)); switch (prebuilt->row_read_type) { case ROW_READ_WITH_LOCKS: From d251cedd8d20a3086bb593996bf3af4346e58f12 Mon Sep 17 00:00:00 2001 From: Galina Shalygina Date: Tue, 13 Mar 2018 02:53:48 +0200 Subject: [PATCH 054/139] MDEV-15478: Lost name of a explicitly named CTE column used in the non-recursive CTE defined with UNION The problem appears as the columns of the non-recursive CTE weren't renamed. The renaming procedure was called for recursive CTEs only. To fix it in the procedure st_select_lex_unit::prepare With_element::rename_columns_of_derived_unit is called now for both CTEs: recursive and non-recursive. --- mysql-test/r/cte_nonrecursive.result | 37 ++++++++++++++++++++++++++++ mysql-test/t/cte_nonrecursive.test | 23 +++++++++++++++++ sql/sql_union.cc | 2 +- 3 files changed, 61 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/cte_nonrecursive.result b/mysql-test/r/cte_nonrecursive.result index f2bfce9f6ba..001df909bcf 100644 --- a/mysql-test/r/cte_nonrecursive.result +++ b/mysql-test/r/cte_nonrecursive.result @@ -1425,3 +1425,40 @@ a DEALLOCATE PREPARE stmt; DROP TABLE t1; DROP VIEW v1,v2; +# +# MDEV-15478: Lost name of a explicitly named CTE column used in +# the non-recursive CTE defined with UNION +# +CREATE TABLE t1 (x int, y int); +INSERT INTO t1 VALUES (1,2),(2,7),(3,3); +WITH cte(a) AS (SELECT 1 UNION SELECT 2) SELECT * FROM cte; +a +1 +2 +WITH cte(a) AS (SELECT 1 UNION SELECT 2) SELECT a FROM cte; +a +1 +2 +WITH cte(a) AS (SELECT 1 UNION ALL SELECT 1) SELECT a FROM cte; +a +1 +1 +WITH cte(a) AS (SELECT x from t1 UNION SELECT 4) SELECT a FROM cte; +a +1 +2 +3 +4 +WITH cte(a) AS (SELECT 4 UNION SELECT x FROM t1 UNION SELECT 5) +SELECT a FROM cte; +a +4 +1 +2 +3 +5 +WITH cte(a,b) AS (SELECT 4,5 UNION SELECT 4,3) SELECT a,b FROM cte; +a b +4 5 +4 3 +DROP TABLE t1; diff --git a/mysql-test/t/cte_nonrecursive.test b/mysql-test/t/cte_nonrecursive.test index 3d073183877..5e1770496f6 100644 --- a/mysql-test/t/cte_nonrecursive.test +++ b/mysql-test/t/cte_nonrecursive.test @@ -989,3 +989,26 @@ DEALLOCATE PREPARE stmt; DROP TABLE t1; DROP VIEW v1,v2; + +--echo # +--echo # MDEV-15478: Lost name of a explicitly named CTE column used in +--echo # the non-recursive CTE defined with UNION +--echo # + +CREATE TABLE t1 (x int, y int); +INSERT INTO t1 VALUES (1,2),(2,7),(3,3); + +WITH cte(a) AS (SELECT 1 UNION SELECT 2) SELECT * FROM cte; + +WITH cte(a) AS (SELECT 1 UNION SELECT 2) SELECT a FROM cte; + +WITH cte(a) AS (SELECT 1 UNION ALL SELECT 1) SELECT a FROM cte; + +WITH cte(a) AS (SELECT x from t1 UNION SELECT 4) SELECT a FROM cte; + +WITH cte(a) AS (SELECT 4 UNION SELECT x FROM t1 UNION SELECT 5) +SELECT a FROM cte; + +WITH cte(a,b) AS (SELECT 4,5 UNION SELECT 4,3) SELECT a,b FROM cte; + +DROP TABLE t1; diff --git a/sql/sql_union.cc b/sql/sql_union.cc index 524f6eb9c8d..19c9330481f 100644 --- a/sql/sql_union.cc +++ b/sql/sql_union.cc @@ -598,7 +598,7 @@ bool st_select_lex_unit::prepare(THD *thd_arg, select_result *sel_result, types= first_sl->item_list; else if (sl == first_sl) { - if (is_recursive) + if (with_element) { if (derived->with->rename_columns_of_derived_unit(thd, this)) goto err; From ca40330d1da6c1b30a33249eab546a5275a1a103 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 16 Mar 2018 08:23:56 +0200 Subject: [PATCH 055/139] Fix a deadlock in thd_report_wait_for() Unlike commit a54abf01753a69c2186d60c155212149be59a7a6 claimed, the caller of THD::awake() may actually hold the InnoDB lock_sys->mutex. That commit introduced a deadlock of threads in the replication slave when running the test rpl.rpl_parallel_optimistic_nobinlog. lock_trx_handle_wait(): Expect the callers to acquire and release lock_sys->mutex and trx->mutex. innobase_kill_query(): Restore the logic for conditionally acquiring and releasing the mutexes. THD::awake() can be called from inside InnoDB while holding one or both mutexes, via thd_report_wait_for() and via wsrep_innobase_kill_one_trx(). --- storage/innobase/handler/ha_innodb.cc | 23 +++++++++++++++++++++++ storage/innobase/lock/lock0lock.cc | 23 ++++++++--------------- storage/innobase/row/row0sel.cc | 4 ++++ storage/xtradb/handler/ha_innodb.cc | 23 +++++++++++++++++++++++ storage/xtradb/lock/lock0lock.cc | 23 ++++++++--------------- storage/xtradb/row/row0sel.cc | 4 ++++ 6 files changed, 70 insertions(+), 30 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 9c3b04b2a67..31ea969fa21 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -4915,8 +4915,31 @@ static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) if (trx_t* trx = thd_to_trx(thd)) { ut_ad(trx->mysql_thd == thd); + + switch (trx->abort_type) { + case TRX_WSREP_ABORT: + break; + case TRX_SERVER_ABORT: + if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + lock_mutex_enter(); + } + /* fall through */ + case TRX_REPLICATION_ABORT: + trx_mutex_enter(trx); + } /* Cancel a pending lock request if there are any */ lock_trx_handle_wait(trx); + switch (trx->abort_type) { + case TRX_WSREP_ABORT: + break; + case TRX_SERVER_ABORT: + if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + lock_mutex_exit(); + } + /* fall through */ + case TRX_REPLICATION_ABORT: + trx_mutex_exit(trx); + } } DBUG_VOID_RETURN; diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 68b19a6d2b5..12c0051d09f 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -7977,26 +7977,19 @@ lock_trx_handle_wait( /*=================*/ trx_t* trx) /*!< in/out: trx lock state */ { - dberr_t err; - - lock_mutex_enter(); - - trx_mutex_enter(trx); + ut_ad(lock_mutex_own()); + ut_ad(trx_mutex_own(trx)); if (trx->lock.was_chosen_as_deadlock_victim) { - err = DB_DEADLOCK; - } else if (trx->lock.wait_lock != NULL) { - lock_cancel_waiting_and_release(trx->lock.wait_lock); - err = DB_LOCK_WAIT; - } else { + return DB_DEADLOCK; + } + if (!trx->lock.wait_lock) { /* The lock was probably granted before we got here. */ - err = DB_SUCCESS; + return DB_SUCCESS; } - lock_mutex_exit(); - trx_mutex_exit(trx); - - return(err); + lock_cancel_waiting_and_release(trx->lock.wait_lock); + return DB_LOCK_WAIT; } /*********************************************************************//** diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 6cca3e21c79..3cf7bc6ee80 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -4615,7 +4615,11 @@ no_gap_lock: a deadlock and the transaction had to wait then release the lock it is waiting on. */ + lock_mutex_enter(); + trx_mutex_enter(trx); err = lock_trx_handle_wait(trx); + lock_mutex_exit(); + trx_mutex_exit(trx); switch (err) { case DB_SUCCESS: diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 347b235b049..7d550c45356 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -5511,8 +5511,31 @@ static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) #endif /* WITH_WSREP */ if (trx_t* trx = thd_to_trx(thd)) { ut_ad(trx->mysql_thd == thd); + + switch (trx->abort_type) { + case TRX_WSREP_ABORT: + break; + case TRX_SERVER_ABORT: + if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + lock_mutex_enter(); + } + /* fall through */ + case TRX_REPLICATION_ABORT: + trx_mutex_enter(trx); + } /* Cancel a pending lock request if there are any */ lock_trx_handle_wait(trx); + switch (trx->abort_type) { + case TRX_WSREP_ABORT: + break; + case TRX_SERVER_ABORT: + if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + lock_mutex_exit(); + } + /* fall through */ + case TRX_REPLICATION_ABORT: + trx_mutex_exit(trx); + } } DBUG_VOID_RETURN; diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc index e2b9a1671ca..b8446013bc7 100644 --- a/storage/xtradb/lock/lock0lock.cc +++ b/storage/xtradb/lock/lock0lock.cc @@ -8087,26 +8087,19 @@ lock_trx_handle_wait( /*=================*/ trx_t* trx) /*!< in/out: trx lock state */ { - dberr_t err; - - lock_mutex_enter(); - - trx_mutex_enter(trx); + ut_ad(lock_mutex_own()); + ut_ad(trx_mutex_own(trx)); if (trx->lock.was_chosen_as_deadlock_victim) { - err = DB_DEADLOCK; - } else if (trx->lock.wait_lock != NULL) { - lock_cancel_waiting_and_release(trx->lock.wait_lock); - err = DB_LOCK_WAIT; - } else { + return DB_DEADLOCK; + } + if (!trx->lock.wait_lock) { /* The lock was probably granted before we got here. */ - err = DB_SUCCESS; + return DB_SUCCESS; } - lock_mutex_exit(); - trx_mutex_exit(trx); - - return(err); + lock_cancel_waiting_and_release(trx->lock.wait_lock); + return DB_LOCK_WAIT; } /*********************************************************************//** diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc index 03ae6822fb7..b81ea60a413 100644 --- a/storage/xtradb/row/row0sel.cc +++ b/storage/xtradb/row/row0sel.cc @@ -4627,7 +4627,11 @@ no_gap_lock: a deadlock and the transaction had to wait then release the lock it is waiting on. */ + lock_mutex_enter(); + trx_mutex_enter(trx); err = lock_trx_handle_wait(trx); + lock_mutex_exit(); + trx_mutex_exit(trx); switch (err) { case DB_SUCCESS: From 7033af9e81a69d75199faa028f8959667f755551 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 16 Mar 2018 08:34:12 +0200 Subject: [PATCH 056/139] Conditionally define TRX_WSREP_ABORT --- storage/innobase/handler/ha_innodb.cc | 4 ++++ storage/innobase/include/trx0trx.h | 12 +++++++----- storage/xtradb/handler/ha_innodb.cc | 4 ++++ storage/xtradb/include/trx0trx.h | 12 +++++++----- 4 files changed, 22 insertions(+), 10 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 31ea969fa21..7c412344920 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -4917,8 +4917,10 @@ static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) ut_ad(trx->mysql_thd == thd); switch (trx->abort_type) { +#ifdef WITH_WSREP case TRX_WSREP_ABORT: break; +#endif case TRX_SERVER_ABORT: if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { lock_mutex_enter(); @@ -4930,8 +4932,10 @@ static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) /* Cancel a pending lock request if there are any */ lock_trx_handle_wait(trx); switch (trx->abort_type) { +#ifdef WITH_WSREP case TRX_WSREP_ABORT: break; +#endif case TRX_SERVER_ABORT: if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { lock_mutex_exit(); diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 68941363620..fe16b8272b8 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2017, MariaDB Corporation. +Copyright (c) 2015, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -695,11 +695,13 @@ lock_rec_convert_impl_to_expl()) will access transactions associated to other connections. The locks of transactions are protected by lock_sys->mutex and sometimes by trx->mutex. */ -typedef enum { +enum trx_abort_t { TRX_SERVER_ABORT = 0, - TRX_WSREP_ABORT = 1, - TRX_REPLICATION_ABORT = 2 -} trx_abort_t; +#ifdef WITH_WSREP + TRX_WSREP_ABORT, +#endif + TRX_REPLICATION_ABORT +}; struct trx_t{ ulint magic_n; diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 7d550c45356..2b7dc01238c 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -5513,8 +5513,10 @@ static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) ut_ad(trx->mysql_thd == thd); switch (trx->abort_type) { +#ifdef WITH_WSREP case TRX_WSREP_ABORT: break; +#endif case TRX_SERVER_ABORT: if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { lock_mutex_enter(); @@ -5526,8 +5528,10 @@ static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) /* Cancel a pending lock request if there are any */ lock_trx_handle_wait(trx); switch (trx->abort_type) { +#ifdef WITH_WSREP case TRX_WSREP_ABORT: break; +#endif case TRX_SERVER_ABORT: if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { lock_mutex_exit(); diff --git a/storage/xtradb/include/trx0trx.h b/storage/xtradb/include/trx0trx.h index 5db19b3959a..77afde4c35c 100644 --- a/storage/xtradb/include/trx0trx.h +++ b/storage/xtradb/include/trx0trx.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2017, MariaDB Corporation +Copyright (c) 2015, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -744,11 +744,13 @@ lock_rec_convert_impl_to_expl()) will access transactions associated to other connections. The locks of transactions are protected by lock_sys->mutex and sometimes by trx->mutex. */ -typedef enum { +enum trx_abort_t { TRX_SERVER_ABORT = 0, - TRX_WSREP_ABORT = 1, - TRX_REPLICATION_ABORT = 2 -} trx_abort_t; +#ifdef WITH_WSREP + TRX_WSREP_ABORT, +#endif + TRX_REPLICATION_ABORT +}; struct trx_t{ ulint magic_n; From b0c43d0c381dba1ab859de8f643d8d91dd806009 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Fri, 16 Mar 2018 12:30:36 +0530 Subject: [PATCH 057/139] MDEV-15384 buf_flush_LRU_list_batch() always reports n->flushed=0, n->evicted=0 MDEV-14545 Backup fails due to MLOG_INDEX_LOAD record - Changed the unsupported_redo test case to avoid checkpoint - Inserting more rows in purge_secondary test case to display evict monitor. --- mysql-test/suite/innodb/r/purge_secondary.result | 1 + mysql-test/suite/innodb/t/purge_secondary.test | 1 + .../suite/mariabackup/unsupported_redo.result | 9 ++++++--- .../suite/mariabackup/unsupported_redo.test | 15 ++++++++++++--- 4 files changed, 20 insertions(+), 6 deletions(-) diff --git a/mysql-test/suite/innodb/r/purge_secondary.result b/mysql-test/suite/innodb/r/purge_secondary.result index 8550fe0cdb1..67f06194119 100644 --- a/mysql-test/suite/innodb/r/purge_secondary.result +++ b/mysql-test/suite/innodb/r/purge_secondary.result @@ -147,6 +147,7 @@ INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE NAME="buffer_LRU_batch_evict_total_pages" AND COUNT > 0; NAME SUBSYSTEM diff --git a/mysql-test/suite/innodb/t/purge_secondary.test b/mysql-test/suite/innodb/t/purge_secondary.test index 9f77fba117c..ce2f48e0d40 100644 --- a/mysql-test/suite/innodb/t/purge_secondary.test +++ b/mysql-test/suite/innodb/t/purge_secondary.test @@ -130,6 +130,7 @@ INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; +INSERT INTO t1 (a) SELECT NULL FROM t1; SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE NAME="buffer_LRU_batch_evict_total_pages" AND COUNT > 0; diff --git a/mysql-test/suite/mariabackup/unsupported_redo.result b/mysql-test/suite/mariabackup/unsupported_redo.result index 325e8233fd8..29f043fc643 100644 --- a/mysql-test/suite/mariabackup/unsupported_redo.result +++ b/mysql-test/suite/mariabackup/unsupported_redo.result @@ -6,20 +6,23 @@ call mtr.add_suppression("InnoDB: Ignoring tablespace for `test`\\.`t21` because call mtr.add_suppression("InnoDB: Cannot open datafile for read-only: "); call mtr.add_suppression("Table .* in the InnoDB data dictionary has tablespace id .*, but tablespace with that id or name does not exist"); CREATE TABLE t1(i INT PRIMARY KEY auto_increment, a int) ENGINE INNODB; -alter table t1 FORCE, algorithm=inplace; +SET GLOBAL INNODB_FAST_SHUTDOWN = 0; +ALTER TABLE t1 FORCE, ALGORITHM=INPLACE; # Fails during full backup DROP TABLE t1; CREATE TABLE t1(i INT PRIMARY KEY auto_increment, a int) ENGINE INNODB; INSERT INTO t1(a) select 1 union select 2 union select 3; # Create full backup , modify table, then fails during creation of # incremental/differential backup -alter table t1 force, algorithm=inplace; -drop table t1; +SET GLOBAL INNODB_FAST_SHUTDOWN = 0; +ALTER TABLE t1 FORCE, ALGORITHM=INPLACE; +DROP TABLE t1; CREATE TABLE t1(i INT) ENGINE INNODB; INSERT INTO t1 VALUES(1); CREATE TABLE t21(i INT) ENGINE INNODB; INSERT INTO t21 VALUES(1); CREATE TABLE t2(i int) ENGINE INNODB; +SET GLOBAL INNODB_FAST_SHUTDOWN = 0; ALTER TABLE t21 FORCE, ALGORITHM=INPLACE; # Create partial backup (excluding table t21), Ignore the # unsupported redo log for the table t21. diff --git a/mysql-test/suite/mariabackup/unsupported_redo.test b/mysql-test/suite/mariabackup/unsupported_redo.test index a9208a3f8ba..f54f97b6c8b 100644 --- a/mysql-test/suite/mariabackup/unsupported_redo.test +++ b/mysql-test/suite/mariabackup/unsupported_redo.test @@ -11,7 +11,11 @@ let $basedir=$MYSQLTEST_VARDIR/tmp/backup; let $incremental_dir=$MYSQLTEST_VARDIR/tmp/backup_inc1; CREATE TABLE t1(i INT PRIMARY KEY auto_increment, a int) ENGINE INNODB; -alter table t1 FORCE, algorithm=inplace; + +SET GLOBAL INNODB_FAST_SHUTDOWN = 0; +--source include/restart_mysqld.inc + +ALTER TABLE t1 FORCE, ALGORITHM=INPLACE; # Below mariabackup operation may complete successfully if checkpoint happens # after the alter table command. @@ -35,14 +39,16 @@ INSERT INTO t1(a) select 1 union select 2 union select 3; exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir; --enable_result_log -alter table t1 force, algorithm=inplace; +SET GLOBAL INNODB_FAST_SHUTDOWN = 0; +--source include/restart_mysqld.inc +ALTER TABLE t1 FORCE, ALGORITHM=INPLACE; --disable_result_log --error 1 exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$incremental_dir --incremental-basedir=$basedir; --enable_result_log -drop table t1; +DROP TABLE t1; rmdir $basedir; rmdir $incremental_dir; @@ -55,6 +61,9 @@ let $MYSQLD_DATADIR= `select @@datadir`; let $targetdir=$MYSQLTEST_VARDIR/tmp/bk; CREATE TABLE t2(i int) ENGINE INNODB; + +SET GLOBAL INNODB_FAST_SHUTDOWN = 0; +--source include/restart_mysqld.inc ALTER TABLE t21 FORCE, ALGORITHM=INPLACE; --echo # Create partial backup (excluding table t21), Ignore the From 0a534348c75cf435d2017959855de2efa798fd0b Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Thu, 15 Mar 2018 15:34:45 +0400 Subject: [PATCH 058/139] MDEV-14265 - RPMLint warning: shared-lib-calls-exit Eliminated last exit() call from libmysqld. --- client/mysql.cc | 6 +----- client/mysql_upgrade.c | 6 ++---- client/mysqladmin.cc | 7 ++----- client/mysqlbinlog.cc | 4 +--- client/mysqlcheck.c | 5 +---- client/mysqldump.c | 3 +-- client/mysqlimport.c | 3 +-- client/mysqlshow.c | 3 +-- client/mysqlslap.c | 6 +----- client/mysqltest.cc | 4 +--- extra/mariabackup/xbcloud.cc | 4 +--- extra/mariabackup/xtrabackup.cc | 12 ++++-------- extra/my_print_defaults.c | 6 ++++-- include/my_default.h | 7 +++++++ mysys/my_default.c | 4 ++-- sql/mysqld.cc | 3 +-- sql/tztime.cc | 4 +--- storage/archive/archive_reader.c | 3 +-- storage/maria/maria_chk.c | 2 +- storage/maria/maria_dump_log.c | 2 +- storage/maria/maria_pack.c | 2 +- storage/maria/maria_read_log.c | 2 +- .../unittest/ma_test_loghandler_multigroup-t.c | 2 +- storage/myisam/myisamchk.c | 4 +--- storage/myisam/myisampack.c | 4 +--- tests/mysql_client_fw.c | 3 +-- tests/mysql_client_test.c | 13 ++----------- tests/thread_test.c | 4 ++-- 28 files changed, 45 insertions(+), 83 deletions(-) diff --git a/client/mysql.cc b/client/mysql.cc index 7973c3c7fc3..0b8be00c542 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -1175,11 +1175,7 @@ int main(int argc,char *argv[]) close(stdout_fileno_copy); /* Clean up dup(). */ } - if (load_defaults("my",load_default_groups,&argc,&argv)) - { - my_end(0); - exit(1); - } + load_defaults_or_exit("my", load_default_groups, &argc, &argv); defaults_argv=argv; if ((status.exit_status= get_options(argc, (char **) argv))) mysql_end(-1); diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c index cbdd398c1e1..5eb495774ce 100644 --- a/client/mysql_upgrade.c +++ b/client/mysql_upgrade.c @@ -1133,6 +1133,8 @@ int main(int argc, char **argv) char self_name[FN_REFLEN + 1]; MY_INIT(argv[0]); + load_defaults_or_exit("my", load_default_groups, &argc, &argv); + defaults_argv= argv; /* Must be freed by 'free_defaults' */ #if __WIN__ if (GetModuleFileName(NULL, self_name, FN_REFLEN) == 0) @@ -1145,10 +1147,6 @@ int main(int argc, char **argv) init_dynamic_string(&conn_args, "", 512, 256)) die("Out of memory"); - if (load_defaults("my", load_default_groups, &argc, &argv)) - die(NULL); - defaults_argv= argv; /* Must be freed by 'free_defaults' */ - if (handle_options(&argc, &argv, my_long_options, get_one_option)) die(NULL); if (debug_info_flag) diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index 897c2eb41c3..d4d40b0a0f2 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -319,8 +319,7 @@ int main(int argc,char *argv[]) MY_INIT(argv[0]); mysql_init(&mysql); sf_leaking_memory=1; /* don't report memory leaks on early exits */ - if ((error= load_defaults("my",load_default_groups,&argc,&argv))) - goto err1; + load_defaults_or_exit("my", load_default_groups, &argc, &argv); save_argv = argv; /* Save for free_defaults */ if ((error=handle_options(&argc, &argv, my_long_options, get_one_option))) @@ -500,10 +499,8 @@ err2: my_free(shared_memory_base_name); #endif free_defaults(save_argv); -err1: my_end(my_end_arg); - exit(error); - return 0; + return error; } diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index b871a70ef01..9753125dd67 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -2666,9 +2666,7 @@ int main(int argc, char** argv) tzset(); // set tzname init_alloc_root(&s_mem_root, 16384, 0, MYF(0)); - if (load_defaults("my", load_groups, &argc, &argv)) - exit(1); - + load_defaults_or_exit("my", load_groups, &argc, &argv); defaults_argv= argv; if (!(binlog_filter= new Rpl_filter)) diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index 47cb38751eb..a4410eba8aa 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -1165,9 +1165,7 @@ int main(int argc, char **argv) /* ** Check out the args */ - if (load_defaults("my", load_default_groups, &argc, &argv)) - goto end2; - + load_defaults_or_exit("my", load_default_groups, &argc, &argv); defaults_argv= argv; if (get_options(&argc, &argv)) goto end1; @@ -1243,7 +1241,6 @@ int main(int argc, char **argv) my_free(shared_memory_base_name); mysql_library_end(); free_defaults(defaults_argv); - end2: my_end(my_end_arg); return ret; } /* main */ diff --git a/client/mysqldump.c b/client/mysqldump.c index 5c0ec2a5510..d2348284e31 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -976,8 +976,7 @@ static int get_options(int *argc, char ***argv) opt_net_buffer_length= *mysql_params->p_net_buffer_length; md_result_file= stdout; - if (load_defaults("my",load_default_groups,argc,argv)) - return 1; + load_defaults_or_exit("my", load_default_groups, argc, argv); defaults_argv= *argv; if (my_hash_init(&ignore_table, charset_info, 16, 0, 0, diff --git a/client/mysqlimport.c b/client/mysqlimport.c index 9c84d4a62a6..a9c24e20b0a 100644 --- a/client/mysqlimport.c +++ b/client/mysqlimport.c @@ -641,8 +641,7 @@ int main(int argc, char **argv) MY_INIT(argv[0]); sf_leaking_memory=1; /* don't report memory leaks on early exits */ - if (load_defaults("my",load_default_groups,&argc,&argv)) - return 1; + load_defaults_or_exit("my", load_default_groups, &argc, &argv); /* argv is changed in the program */ argv_to_free= argv; if (get_options(&argc, &argv)) diff --git a/client/mysqlshow.c b/client/mysqlshow.c index f851c15106e..95ee8d697f3 100644 --- a/client/mysqlshow.c +++ b/client/mysqlshow.c @@ -71,8 +71,7 @@ int main(int argc, char **argv) static char **defaults_argv; MY_INIT(argv[0]); sf_leaking_memory=1; /* don't report memory leaks on early exits */ - if (load_defaults("my",load_default_groups,&argc,&argv)) - exit(1); + load_defaults_or_exit("my", load_default_groups, &argc, &argv); defaults_argv=argv; get_options(&argc,&argv); diff --git a/client/mysqlslap.c b/client/mysqlslap.c index 6a0b214305c..0b88a969bc5 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -325,11 +325,7 @@ int main(int argc, char **argv) MY_INIT(argv[0]); sf_leaking_memory=1; /* don't report memory leaks on early exits */ - if (load_defaults("my",load_default_groups,&argc,&argv)) - { - my_end(0); - exit(1); - } + load_defaults_or_exit("my", load_default_groups, &argc, &argv); defaults_argv=argv; if (get_options(&argc,&argv)) { diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 2897555da3f..895c837f68b 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -7299,9 +7299,7 @@ get_one_option(int optid, const struct my_option *opt, char *argument) int parse_args(int argc, char **argv) { - if (load_defaults("my",load_default_groups,&argc,&argv)) - exit(1); - + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; if ((handle_options(&argc, &argv, my_long_options, get_one_option))) diff --git a/extra/mariabackup/xbcloud.cc b/extra/mariabackup/xbcloud.cc index 56661b03dd0..878b4c81023 100644 --- a/extra/mariabackup/xbcloud.cc +++ b/extra/mariabackup/xbcloud.cc @@ -443,9 +443,7 @@ int parse_args(int argc, char **argv) exit(EXIT_FAILURE); } - if (load_defaults("my", load_default_groups, &argc, &argv)) { - exit(EXIT_FAILURE); - } + load_defaults_or_exit("my", load_default_groups, &argc, &argv); if (handle_options(&argc, &argv, my_long_options, get_one_option)) { exit(EXIT_FAILURE); diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 2228e542dc7..44214b0c207 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -6350,10 +6350,8 @@ handle_options(int argc, char **argv, char ***argv_client, char ***argv_server) *argv_client = argv; *argv_server = argv; - if (load_defaults(conf_file, xb_server_default_groups, - &argc_server, argv_server)) { - exit(EXIT_FAILURE); - } + load_defaults_or_exit(conf_file, xb_server_default_groups, + &argc_server, argv_server); int n; for (n = 0; (*argv_server)[n]; n++) {}; @@ -6403,10 +6401,8 @@ handle_options(int argc, char **argv, char ***argv_client, char ***argv_server) xb_server_options, xb_get_one_option))) exit(ho_error); - if (load_defaults(conf_file, xb_client_default_groups, - &argc_client, argv_client)) { - exit(EXIT_FAILURE); - } + load_defaults_or_exit(conf_file, xb_client_default_groups, + &argc_client, argv_client); for (n = 0; (*argv_client)[n]; n++) {}; argc_client = n; diff --git a/extra/my_print_defaults.c b/extra/my_print_defaults.c index 78940e02ca4..07c95a79ddc 100644 --- a/extra/my_print_defaults.c +++ b/extra/my_print_defaults.c @@ -206,6 +206,9 @@ int main(int argc, char **argv) if ((error= load_defaults(config_file, (const char **) load_default_groups, &count, &arguments))) { + my_end(0); + if (error == 4) + return 0; if (verbose && opt_defaults_file_used) { if (error == 1) @@ -216,8 +219,7 @@ int main(int argc, char **argv) fprintf(stderr, "WARNING: Defaults file '%s' is not a regular file!\n", config_file); } - error= 2; - exit(error); + return 2; } for (argument= arguments+1 ; *argument ; argument++) diff --git a/include/my_default.h b/include/my_default.h index 0ed94b09492..bd3a21f03a8 100644 --- a/include/my_default.h +++ b/include/my_default.h @@ -45,6 +45,13 @@ extern void free_defaults(char **argv); extern void my_print_default_files(const char *conf_file); extern void print_defaults(const char *conf_file, const char **groups); + +/** Simplify load_defaults() common use */ +#define load_defaults_or_exit(A, B, C, D) switch (load_defaults(A, B, C, D)) { \ + case 0: break; \ + case 4: my_end(0); exit(0); \ + default: my_end(0); exit(1); } + C_MODE_END #endif /* MY_DEFAULT_INCLUDED */ diff --git a/mysys/my_default.c b/mysys/my_default.c index 2358aed8f3b..9209c6869fc 100644 --- a/mysys/my_default.c +++ b/mysys/my_default.c @@ -631,7 +631,7 @@ int my_load_defaults(const char *conf_file, const char **groups, if (!my_getopt_is_args_separator((*argv)[i])) /* skip arguments separator */ printf("%s ", (*argv)[i]); puts(""); - exit(0); + DBUG_RETURN(4); } if (default_directories) @@ -641,7 +641,7 @@ int my_load_defaults(const char *conf_file, const char **groups, err: fprintf(stderr,"Fatal error in defaults handling. Program aborted\n"); - return 2; + DBUG_RETURN(2); } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 3cb8e41f94e..4f57d6d1910 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5511,8 +5511,7 @@ int mysqld_main(int argc, char **argv) orig_argc= argc; orig_argv= argv; my_getopt_use_args_separator= TRUE; - if (load_defaults(MYSQL_CONFIG_NAME, load_default_groups, &argc, &argv)) - return 1; + load_defaults_or_exit(MYSQL_CONFIG_NAME, load_default_groups, &argc, &argv); my_getopt_use_args_separator= FALSE; defaults_argc= argc; defaults_argv= argv; diff --git a/sql/tztime.cc b/sql/tztime.cc index bed5f416f13..7df2ad7d0e8 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -2689,9 +2689,7 @@ main(int argc, char **argv) char **default_argv; MY_INIT(argv[0]); - if (load_defaults("my",load_default_groups,&argc,&argv)) - exit(1); - + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; if ((handle_options(&argc, &argv, my_long_options, get_one_option))) diff --git a/storage/archive/archive_reader.c b/storage/archive/archive_reader.c index 1b15fa16ce4..e87bc70ade4 100644 --- a/storage/archive/archive_reader.c +++ b/storage/archive/archive_reader.c @@ -408,8 +408,7 @@ static void print_version(void) static void get_options(int *argc, char ***argv) { - if (load_defaults("my", load_default_groups, argc, argv)) - exit(1); + load_defaults_or_exit("my", load_default_groups, argc, argv); default_argv= *argv; handle_options(argc, argv, my_long_options, get_one_option); diff --git a/storage/maria/maria_chk.c b/storage/maria/maria_chk.c index aa01f4cd895..b47f1b8c824 100644 --- a/storage/maria/maria_chk.c +++ b/storage/maria/maria_chk.c @@ -895,7 +895,7 @@ static void get_options(register int *argc,register char ***argv) { int ho_error; - load_defaults("my", load_default_groups, argc, argv); + load_defaults_or_exit("my", load_default_groups, argc, argv); default_argv= *argv; check_param.testflag= T_UPDATE_STATE; if (isatty(fileno(stdout))) diff --git a/storage/maria/maria_dump_log.c b/storage/maria/maria_dump_log.c index 42c694bf1bf..3570dede80e 100644 --- a/storage/maria/maria_dump_log.c +++ b/storage/maria/maria_dump_log.c @@ -133,7 +133,7 @@ int main(int argc, char **argv) uchar buffer[TRANSLOG_PAGE_SIZE]; MY_INIT(argv[0]); - load_defaults("my", load_default_groups, &argc, &argv); + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; get_options(&argc, &argv); diff --git a/storage/maria/maria_pack.c b/storage/maria/maria_pack.c index 814c50e1db8..28b4ff4cfc7 100644 --- a/storage/maria/maria_pack.c +++ b/storage/maria/maria_pack.c @@ -208,7 +208,7 @@ int main(int argc, char **argv) char **default_argv; MY_INIT(argv[0]); - load_defaults("my",load_default_groups,&argc,&argv); + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; get_options(&argc,&argv); maria_init(); diff --git a/storage/maria/maria_read_log.c b/storage/maria/maria_read_log.c index a0724b2199b..d0cf7521e9b 100644 --- a/storage/maria/maria_read_log.c +++ b/storage/maria/maria_read_log.c @@ -47,7 +47,7 @@ int main(int argc, char **argv) maria_data_root= (char *)"."; sf_leaking_memory=1; /* don't report memory leaks on early exits */ - load_defaults("my", load_default_groups, &argc, &argv); + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; get_options(&argc, &argv); diff --git a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c index 1ea31d228e1..dc7e0c79f87 100644 --- a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c +++ b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c @@ -256,7 +256,7 @@ int main(int argc __attribute__((unused)), char *argv[]) #endif long_buffer= malloc(LONG_BUFFER_SIZE + LSN_STORE_SIZE * 2 + 2); - load_defaults("my", load_default_groups, &argc, &argv); + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; get_options(&argc, &argv); diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c index edbe235e190..dfff5720847 100644 --- a/storage/myisam/myisamchk.c +++ b/storage/myisam/myisamchk.c @@ -753,9 +753,7 @@ static void get_options(register int *argc,register char ***argv) { int ho_error; - if (load_defaults("my", load_default_groups, argc, argv)) - exit(1); - + load_defaults_or_exit("my", load_default_groups, argc, argv); default_argv= *argv; if (isatty(fileno(stdout))) check_param.testflag|=T_WRITE_LOOP; diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c index 966edc877cd..bb9f59f86f7 100644 --- a/storage/myisam/myisampack.c +++ b/storage/myisam/myisampack.c @@ -209,9 +209,7 @@ int main(int argc, char **argv) char **default_argv; MY_INIT(argv[0]); - if (load_defaults("my",load_default_groups,&argc,&argv)) - exit(1); - + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; get_options(&argc,&argv); diff --git a/tests/mysql_client_fw.c b/tests/mysql_client_fw.c index b7211989f1f..71fb3894b3c 100644 --- a/tests/mysql_client_fw.c +++ b/tests/mysql_client_fw.c @@ -1409,8 +1409,7 @@ int main(int argc, char **argv) for (i= 0; i < argc; i++) original_argv[i]= strdup(argv[i]); - if (load_defaults("my", client_test_load_default_groups, &argc, &argv)) - exit(1); + load_defaults_or_exit("my", client_test_load_default_groups, &argc, &argv); get_options(&argc, &argv); /* Set main opt_count. */ diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index c367e7ca35d..b63bcbaa037 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -7095,11 +7095,7 @@ static void test_embedded_start_stop() MY_INIT(argv[0]); /* Load the client defaults from the .cnf file[s]. */ - if (load_defaults("my", client_test_load_default_groups, &argc, &argv)) - { - myerror("load_defaults failed"); - exit(1); - } + load_defaults_or_exit("my", client_test_load_default_groups, &argc, &argv); /* Parse the options (including the ones given from defaults files). */ get_options(&argc, &argv); @@ -7147,12 +7143,7 @@ static void test_embedded_start_stop() MY_INIT(argv[0]); - if (load_defaults("my", client_test_load_default_groups, &argc, &argv)) - { - myerror("load_defaults failed \n "); - exit(1); - } - + load_defaults_or_exit("my", client_test_load_default_groups, &argc, &argv); get_options(&argc, &argv); /* Must start the main embedded server again after the test. */ diff --git a/tests/thread_test.c b/tests/thread_test.c index bf0fb8ea2c0..38e453e9cb8 100644 --- a/tests/thread_test.c +++ b/tests/thread_test.c @@ -168,8 +168,8 @@ static void get_options(int argc, char **argv) { int ho_error; - if ((ho_error= load_defaults("my",load_default_groups,&argc,&argv)) || - (ho_error= handle_options(&argc, &argv, my_long_options, get_one_option))) + load_defaults_or_exit("my", load_default_groups, &argc, &argv); + if ((ho_error= handle_options(&argc, &argv, my_long_options, get_one_option))) exit(ho_error); free_defaults(argv); From a0c722d853071e605ea025168da1b01bfe21092c Mon Sep 17 00:00:00 2001 From: Varun Gupta Date: Fri, 16 Mar 2018 18:35:41 +0530 Subject: [PATCH 059/139] MDEV-15321:different results when using value of optimizer_use_condition_selectivity=4 and =1 To disallow equality propagation for DATETIME with non-zero YYYYMMDD part we were setting null_value to true. This caused issues when we were calculating selectivity for a condition as this returned IMPOSSIBLE WHERE. The issue is resolved by not setting null_value to true for DATETIME with non-zero YYYYMMDD. --- mysql-test/r/type_time.result | 31 +++++++++++++++++++++++++++++++ mysql-test/t/type_time.test | 23 +++++++++++++++++++++++ sql/item.cc | 14 ++++++++------ 3 files changed, 62 insertions(+), 6 deletions(-) diff --git a/mysql-test/r/type_time.result b/mysql-test/r/type_time.result index b5689d31aef..fc287e86fbd 100644 --- a/mysql-test/r/type_time.result +++ b/mysql-test/r/type_time.result @@ -1215,5 +1215,36 @@ MAX(a) MAX(COALESCE(a)) 10:20:30 10:20:30 DROP TABLE t1; # +# MDEV-15321: different results when using value of optimizer_use_condition_selectivity=4 and =1 +# +SET @save_old_mode=@@old_mode; +SET @@old_mode=zero_date_time_cast; +CREATE TABLE t1 (a TIME); +INSERT INTO t1 VALUES ('0000-00-00 10:20:30'),('0000-00-00 10:20:31'); +INSERT INTO t1 VALUES ('0000-00-01 10:20:30'),('0000-00-01 10:20:31'); +INSERT INTO t1 VALUES ('31 10:20:30'),('32 10:20:30'),('33 10:20:30'),('34 10:20:30'); +SET @save_optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity; +SET @@optimizer_use_condition_selectivity=1; +SELECT * FROM t1 WHERE a='0000-00-01 10:20:30' AND LENGTH(a)=8; +a +34:20:30 +EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='0000-00-01 10:20:30' AND LENGTH(a)=8; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 8 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where ((`test`.`t1`.`a` = '0000-00-01 10:20:30') and (length(`test`.`t1`.`a`) = 8)) +SET @@optimizer_use_condition_selectivity=4; +SELECT * FROM t1 WHERE a='0000-00-01 10:20:30' AND LENGTH(a)=8; +a +34:20:30 +EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='0000-00-01 10:20:30' AND LENGTH(a)=8; +id select_type table type possible_keys key key_len ref rows filtered Extra +1 SIMPLE t1 ALL NULL NULL NULL NULL 8 100.00 Using where +Warnings: +Note 1003 select `test`.`t1`.`a` AS `a` from `test`.`t1` where ((`test`.`t1`.`a` = '0000-00-01 10:20:30') and (length(`test`.`t1`.`a`) = 8)) +drop table t1; +SET @@optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; +set @@old_mode= @save_old_mode; +# # End of 10.1 tests # diff --git a/mysql-test/t/type_time.test b/mysql-test/t/type_time.test index 27679b9ec5a..10b75f27546 100644 --- a/mysql-test/t/type_time.test +++ b/mysql-test/t/type_time.test @@ -723,6 +723,29 @@ INSERT INTO t1 VALUES ('10:10:10'),('10:20:30'); SELECT MAX(a), MAX(COALESCE(a)) FROM t1; DROP TABLE t1; +--echo # +--echo # MDEV-15321: different results when using value of optimizer_use_condition_selectivity=4 and =1 +--echo # + +SET @save_old_mode=@@old_mode; +SET @@old_mode=zero_date_time_cast; +CREATE TABLE t1 (a TIME); +INSERT INTO t1 VALUES ('0000-00-00 10:20:30'),('0000-00-00 10:20:31'); +INSERT INTO t1 VALUES ('0000-00-01 10:20:30'),('0000-00-01 10:20:31'); +INSERT INTO t1 VALUES ('31 10:20:30'),('32 10:20:30'),('33 10:20:30'),('34 10:20:30'); + +SET @save_optimizer_use_condition_selectivity= @@optimizer_use_condition_selectivity; +SET @@optimizer_use_condition_selectivity=1; +SELECT * FROM t1 WHERE a='0000-00-01 10:20:30' AND LENGTH(a)=8; +EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='0000-00-01 10:20:30' AND LENGTH(a)=8; + +SET @@optimizer_use_condition_selectivity=4; +SELECT * FROM t1 WHERE a='0000-00-01 10:20:30' AND LENGTH(a)=8; +EXPLAIN EXTENDED SELECT * FROM t1 WHERE a='0000-00-01 10:20:30' AND LENGTH(a)=8; +drop table t1; +SET @@optimizer_use_condition_selectivity= @save_optimizer_use_condition_selectivity; +set @@old_mode= @save_old_mode; + --echo # --echo # End of 10.1 tests --echo # diff --git a/sql/item.cc b/sql/item.cc index 2cc45dc6010..1d0ed6a6ea5 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -166,19 +166,21 @@ bool Item::get_time_with_conversion(THD *thd, MYSQL_TIME *ltime, - truncate the YYYYMMDD part - add (MM*33+DD)*24 to hours - add (MM*31+DD)*24 to hours - Let's return NULL here, to disallow equal field propagation. + Let's return TRUE here, to disallow equal field propagation. Note, If we start to use this method in more pieces of the code other - than eqial field propagation, we should probably return - NULL only if some flag in fuzzydate is set. + than equal field propagation, we should probably return + TRUE only if some flag in fuzzydate is set. */ - return (null_value= true); + return true; } if (datetime_to_time_with_warn(thd, ltime, <ime2, TIME_SECOND_PART_DIGITS)) { /* - Time difference between CURRENT_DATE and ltime - did not fit into the supported TIME range + If the time difference between CURRENT_DATE and ltime + did not fit into the supported TIME range, then we set the + difference to the maximum possible value in the supported TIME range */ + DBUG_ASSERT(0); return (null_value= true); } *ltime= ltime2; From 84129fb1b52a0631f05ba193f464665c5b369e77 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Fri, 16 Mar 2018 14:35:42 +0200 Subject: [PATCH 060/139] After-merge fix for commit 98eb9518db1da854048b09d94244a982a1d32f9a The merge only covered 10.1 up to commit 4d248974e00eb915a2fc433cc6b2fb5146281594. Actually merge the changes up to commit 0a534348c75cf435d2017959855de2efa798fd0b. Also, remove the unused InnoDB field trx_t::abort_type. --- client/mysql.cc | 6 +- client/mysql_upgrade.c | 6 +- client/mysqladmin.cc | 7 +- client/mysqlbinlog.cc | 4 +- client/mysqlcheck.c | 5 +- client/mysqldump.c | 3 +- client/mysqlimport.c | 3 +- client/mysqlshow.c | 3 +- client/mysqlslap.c | 6 +- client/mysqltest.cc | 4 +- extra/mariabackup/xbcloud.cc | 4 +- extra/mariabackup/xtrabackup.cc | 12 +- extra/my_print_defaults.c | 6 +- include/my_default.h | 7 + mysys/my_default.c | 4 +- sql/mysqld.cc | 3 +- sql/tztime.cc | 4 +- storage/archive/archive_reader.c | 3 +- storage/innobase/handler/ha_innodb.cc | 69 ++---- storage/innobase/include/lock0lock.h | 4 +- storage/innobase/include/trx0trx.h | 7 - storage/innobase/lock/lock0lock.cc | 196 ++++++------------ storage/innobase/row/row0sel.cc | 3 +- storage/maria/maria_chk.c | 2 +- storage/maria/maria_dump_log.c | 2 +- storage/maria/maria_pack.c | 2 +- storage/maria/maria_read_log.c | 2 +- .../ma_test_loghandler_multigroup-t.c | 2 +- storage/myisam/myisamchk.c | 4 +- storage/myisam/myisampack.c | 4 +- storage/xtradb/handler/ha_innodb.cc | 101 ++++----- storage/xtradb/lock/lock0lock.cc | 66 ++---- storage/xtradb/row/row0sel.cc | 4 + tests/mysql_client_fw.c | 3 +- tests/mysql_client_test.c | 13 +- tests/thread_test.c | 4 +- 36 files changed, 178 insertions(+), 400 deletions(-) diff --git a/client/mysql.cc b/client/mysql.cc index 9e50bb86933..a13617cc9cb 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -1176,11 +1176,7 @@ int main(int argc,char *argv[]) close(stdout_fileno_copy); /* Clean up dup(). */ } - if (load_defaults("my",load_default_groups,&argc,&argv)) - { - my_end(0); - exit(1); - } + load_defaults_or_exit("my", load_default_groups, &argc, &argv); defaults_argv=argv; if ((status.exit_status= get_options(argc, (char **) argv))) { diff --git a/client/mysql_upgrade.c b/client/mysql_upgrade.c index 14a47bce352..0f153fde158 100644 --- a/client/mysql_upgrade.c +++ b/client/mysql_upgrade.c @@ -1133,6 +1133,8 @@ int main(int argc, char **argv) char self_name[FN_REFLEN + 1]; MY_INIT(argv[0]); + load_defaults_or_exit("my", load_default_groups, &argc, &argv); + defaults_argv= argv; /* Must be freed by 'free_defaults' */ #if __WIN__ if (GetModuleFileName(NULL, self_name, FN_REFLEN) == 0) @@ -1145,10 +1147,6 @@ int main(int argc, char **argv) init_dynamic_string(&conn_args, "", 512, 256)) die("Out of memory"); - if (load_defaults("my", load_default_groups, &argc, &argv)) - die(NULL); - defaults_argv= argv; /* Must be freed by 'free_defaults' */ - if (handle_options(&argc, &argv, my_long_options, get_one_option)) die(NULL); if (debug_info_flag) diff --git a/client/mysqladmin.cc b/client/mysqladmin.cc index bd80dd01a1e..5e7fb80b2b5 100644 --- a/client/mysqladmin.cc +++ b/client/mysqladmin.cc @@ -320,8 +320,7 @@ int main(int argc,char *argv[]) MY_INIT(argv[0]); mysql_init(&mysql); sf_leaking_memory=1; /* don't report memory leaks on early exits */ - if ((error= load_defaults("my",load_default_groups,&argc,&argv))) - goto err1; + load_defaults_or_exit("my", load_default_groups, &argc, &argv); save_argv = argv; /* Save for free_defaults */ if ((error=handle_options(&argc, &argv, my_long_options, get_one_option))) @@ -501,10 +500,8 @@ err2: my_free(shared_memory_base_name); #endif free_defaults(save_argv); -err1: my_end(my_end_arg); - exit(error); - return 0; + return error; } diff --git a/client/mysqlbinlog.cc b/client/mysqlbinlog.cc index 9d18c45d7a1..9a3a4aaa9eb 100644 --- a/client/mysqlbinlog.cc +++ b/client/mysqlbinlog.cc @@ -2948,9 +2948,7 @@ int main(int argc, char** argv) my_init_time(); // for time functions tzset(); // set tzname - if (load_defaults("my", load_groups, &argc, &argv)) - exit(1); - + load_defaults_or_exit("my", load_groups, &argc, &argv); defaults_argv= argv; if (!(binlog_filter= new Rpl_filter)) diff --git a/client/mysqlcheck.c b/client/mysqlcheck.c index d3bb5c48837..45ad2612d44 100644 --- a/client/mysqlcheck.c +++ b/client/mysqlcheck.c @@ -1166,9 +1166,7 @@ int main(int argc, char **argv) /* ** Check out the args */ - if (load_defaults("my", load_default_groups, &argc, &argv)) - goto end2; - + load_defaults_or_exit("my", load_default_groups, &argc, &argv); defaults_argv= argv; if (get_options(&argc, &argv)) goto end1; @@ -1244,7 +1242,6 @@ int main(int argc, char **argv) my_free(shared_memory_base_name); mysql_library_end(); free_defaults(defaults_argv); - end2: my_end(my_end_arg); return ret; } /* main */ diff --git a/client/mysqldump.c b/client/mysqldump.c index 57d087b331b..9a89c805955 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -980,8 +980,7 @@ static int get_options(int *argc, char ***argv) opt_net_buffer_length= *mysql_params->p_net_buffer_length; md_result_file= stdout; - if (load_defaults("my",load_default_groups,argc,argv)) - return 1; + load_defaults_or_exit("my", load_default_groups, argc, argv); defaults_argv= *argv; if (my_hash_init(&ignore_table, charset_info, 16, 0, 0, diff --git a/client/mysqlimport.c b/client/mysqlimport.c index 6da14aa1520..02caf2df198 100644 --- a/client/mysqlimport.c +++ b/client/mysqlimport.c @@ -643,8 +643,7 @@ int main(int argc, char **argv) MY_INIT(argv[0]); sf_leaking_memory=1; /* don't report memory leaks on early exits */ - if (load_defaults("my",load_default_groups,&argc,&argv)) - return 1; + load_defaults_or_exit("my", load_default_groups, &argc, &argv); /* argv is changed in the program */ argv_to_free= argv; if (get_options(&argc, &argv)) diff --git a/client/mysqlshow.c b/client/mysqlshow.c index 0a761edff4c..65b915655a6 100644 --- a/client/mysqlshow.c +++ b/client/mysqlshow.c @@ -72,8 +72,7 @@ int main(int argc, char **argv) static char **defaults_argv; MY_INIT(argv[0]); sf_leaking_memory=1; /* don't report memory leaks on early exits */ - if (load_defaults("my",load_default_groups,&argc,&argv)) - exit(1); + load_defaults_or_exit("my", load_default_groups, &argc, &argv); defaults_argv=argv; get_options(&argc,&argv); diff --git a/client/mysqlslap.c b/client/mysqlslap.c index f0ad2f40abc..05bc9430541 100644 --- a/client/mysqlslap.c +++ b/client/mysqlslap.c @@ -325,11 +325,7 @@ int main(int argc, char **argv) MY_INIT(argv[0]); sf_leaking_memory=1; /* don't report memory leaks on early exits */ - if (load_defaults("my",load_default_groups,&argc,&argv)) - { - my_end(0); - exit(1); - } + load_defaults_or_exit("my", load_default_groups, &argc, &argv); defaults_argv=argv; if (get_options(&argc,&argv)) { diff --git a/client/mysqltest.cc b/client/mysqltest.cc index 4003e57226d..6f041e92a32 100644 --- a/client/mysqltest.cc +++ b/client/mysqltest.cc @@ -7298,9 +7298,7 @@ get_one_option(int optid, const struct my_option *opt, char *argument) int parse_args(int argc, char **argv) { - if (load_defaults("my",load_default_groups,&argc,&argv)) - exit(1); - + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; if ((handle_options(&argc, &argv, my_long_options, get_one_option))) diff --git a/extra/mariabackup/xbcloud.cc b/extra/mariabackup/xbcloud.cc index 56661b03dd0..878b4c81023 100644 --- a/extra/mariabackup/xbcloud.cc +++ b/extra/mariabackup/xbcloud.cc @@ -443,9 +443,7 @@ int parse_args(int argc, char **argv) exit(EXIT_FAILURE); } - if (load_defaults("my", load_default_groups, &argc, &argv)) { - exit(EXIT_FAILURE); - } + load_defaults_or_exit("my", load_default_groups, &argc, &argv); if (handle_options(&argc, &argv, my_long_options, get_one_option)) { exit(EXIT_FAILURE); diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 78003cd64af..e560ce1e042 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -5209,10 +5209,8 @@ handle_options(int argc, char **argv, char ***argv_client, char ***argv_server) *argv_client = argv; *argv_server = argv; - if (load_defaults(conf_file, xb_server_default_groups, - &argc_server, argv_server)) { - exit(EXIT_FAILURE); - } + load_defaults_or_exit(conf_file, xb_server_default_groups, + &argc_server, argv_server); int n; for (n = 0; (*argv_server)[n]; n++) {}; @@ -5262,10 +5260,8 @@ handle_options(int argc, char **argv, char ***argv_client, char ***argv_server) xb_server_options, xb_get_one_option))) exit(ho_error); - if (load_defaults(conf_file, xb_client_default_groups, - &argc_client, argv_client)) { - exit(EXIT_FAILURE); - } + load_defaults_or_exit(conf_file, xb_client_default_groups, + &argc_client, argv_client); for (n = 0; (*argv_client)[n]; n++) {}; argc_client = n; diff --git a/extra/my_print_defaults.c b/extra/my_print_defaults.c index 78940e02ca4..07c95a79ddc 100644 --- a/extra/my_print_defaults.c +++ b/extra/my_print_defaults.c @@ -206,6 +206,9 @@ int main(int argc, char **argv) if ((error= load_defaults(config_file, (const char **) load_default_groups, &count, &arguments))) { + my_end(0); + if (error == 4) + return 0; if (verbose && opt_defaults_file_used) { if (error == 1) @@ -216,8 +219,7 @@ int main(int argc, char **argv) fprintf(stderr, "WARNING: Defaults file '%s' is not a regular file!\n", config_file); } - error= 2; - exit(error); + return 2; } for (argument= arguments+1 ; *argument ; argument++) diff --git a/include/my_default.h b/include/my_default.h index 0ed94b09492..bd3a21f03a8 100644 --- a/include/my_default.h +++ b/include/my_default.h @@ -45,6 +45,13 @@ extern void free_defaults(char **argv); extern void my_print_default_files(const char *conf_file); extern void print_defaults(const char *conf_file, const char **groups); + +/** Simplify load_defaults() common use */ +#define load_defaults_or_exit(A, B, C, D) switch (load_defaults(A, B, C, D)) { \ + case 0: break; \ + case 4: my_end(0); exit(0); \ + default: my_end(0); exit(1); } + C_MODE_END #endif /* MY_DEFAULT_INCLUDED */ diff --git a/mysys/my_default.c b/mysys/my_default.c index 544a6392e4d..9efbf054131 100644 --- a/mysys/my_default.c +++ b/mysys/my_default.c @@ -631,7 +631,7 @@ int my_load_defaults(const char *conf_file, const char **groups, if (!my_getopt_is_args_separator((*argv)[i])) /* skip arguments separator */ printf("%s ", (*argv)[i]); puts(""); - exit(0); + DBUG_RETURN(4); } if (default_directories) @@ -641,7 +641,7 @@ int my_load_defaults(const char *conf_file, const char **groups, err: fprintf(stderr,"Fatal error in defaults handling. Program aborted\n"); - return 2; + DBUG_RETURN(2); } diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 6783231022c..01f5febd5a0 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5659,8 +5659,7 @@ int mysqld_main(int argc, char **argv) orig_argc= argc; orig_argv= argv; my_getopt_use_args_separator= TRUE; - if (load_defaults(MYSQL_CONFIG_NAME, load_default_groups, &argc, &argv)) - return 1; + load_defaults_or_exit(MYSQL_CONFIG_NAME, load_default_groups, &argc, &argv); my_getopt_use_args_separator= FALSE; defaults_argc= argc; defaults_argv= argv; diff --git a/sql/tztime.cc b/sql/tztime.cc index a9db91668bb..ccd5ed4a86b 100644 --- a/sql/tztime.cc +++ b/sql/tztime.cc @@ -2687,9 +2687,7 @@ main(int argc, char **argv) char **default_argv; MY_INIT(argv[0]); - if (load_defaults("my",load_default_groups,&argc,&argv)) - exit(1); - + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; if ((handle_options(&argc, &argv, my_long_options, get_one_option))) diff --git a/storage/archive/archive_reader.c b/storage/archive/archive_reader.c index 1b15fa16ce4..e87bc70ade4 100644 --- a/storage/archive/archive_reader.c +++ b/storage/archive/archive_reader.c @@ -408,8 +408,7 @@ static void print_version(void) static void get_options(int *argc, char ***argv) { - if (load_defaults("my", load_default_groups, argc, argv)) - exit(1); + load_defaults_or_exit("my", load_default_groups, argc, argv); default_argv= *argv; handle_options(argc, argv, my_long_options, get_one_option); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 1b230c4b91d..2a860ff8e7c 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1254,7 +1254,9 @@ innobase_close_connection( THD* thd); /*!< in: MySQL thread handle for which to close the connection */ -static void innobase_kill_query(handlerton *hton, THD* thd, enum thd_kill_levels level); +/** Cancel any pending lock request associated with the current THD. +@sa THD::awake() @sa ha_kill_query() */ +static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels); static void innobase_commit_ordered(handlerton *hton, THD* thd, bool all); /*****************************************************************//** @@ -5299,21 +5301,11 @@ innobase_close_connection( UNIV_INTERN void lock_cancel_waiting_and_release(lock_t* lock); -/*****************************************************************//** -Cancel any pending lock request associated with the current THD. */ -static -void -innobase_kill_query( -/*================*/ - handlerton* hton, /*!< in: innobase handlerton */ - THD* thd, /*!< in: MySQL thread being killed */ - enum thd_kill_levels level) /*!< in: kill level */ +/** Cancel any pending lock request associated with the current THD. +@sa THD::awake() @sa ha_kill_query() */ +static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) { - trx_t* trx; - DBUG_ENTER("innobase_kill_query"); - DBUG_ASSERT(hton == innodb_hton_ptr); - #ifdef WITH_WSREP wsrep_thd_LOCK(thd); if (wsrep_thd_get_conflict_state(thd) != NO_CONFLICT) { @@ -5329,43 +5321,10 @@ innobase_kill_query( wsrep_thd_UNLOCK(thd); #endif /* WITH_WSREP */ - trx = thd_to_trx(thd); - - if (trx != NULL) { + if (trx_t* trx = thd_to_trx(thd)) { + ut_ad(trx->mysql_thd == thd); /* Cancel a pending lock request if there are any */ - bool lock_mutex_taken = false; - bool trx_mutex_taken = false; - - if (trx->lock.wait_lock) { - WSREP_DEBUG("Killing victim trx %p BF %d trx BF %d trx_id " IB_ID_FMT " ABORT %d thd %p" - " current_thd %p BF %d", - trx, wsrep_thd_is_BF(trx->mysql_thd, FALSE), - wsrep_thd_is_BF(thd, FALSE), - trx->id, trx->abort_type, - trx->mysql_thd, - current_thd, - wsrep_thd_is_BF(current_thd, FALSE)); - } - - if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE) && trx->abort_type != TRX_WSREP_ABORT) { - lock_mutex_enter(); - lock_mutex_taken = true; - } - - if (trx->abort_type != TRX_WSREP_ABORT) { - trx_mutex_enter(trx); - trx_mutex_taken = true; - } - - lock_trx_handle_wait(trx, true, true); - - if (lock_mutex_taken) { - lock_mutex_exit(); - } - - if (trx_mutex_taken) { - trx_mutex_exit(trx); - } + lock_trx_handle_wait(trx); } DBUG_VOID_RETURN; @@ -9392,8 +9351,8 @@ ha_innobase::unlock_row(void) transaction is in state TRX_STATE_NOT_STARTED. The check on m_prebuilt->select_lock_type above gets around this issue. */ - ut_ad(trx_state_eq(m_prebuilt->trx, TRX_STATE_ACTIVE) - || trx_state_eq(m_prebuilt->trx, TRX_STATE_FORCED_ROLLBACK)); + ut_ad(trx_state_eq(m_prebuilt->trx, TRX_STATE_ACTIVE, true) + || trx_state_eq(m_prebuilt->trx, TRX_STATE_FORCED_ROLLBACK, true)); switch (m_prebuilt->row_read_type) { case ROW_READ_WITH_LOCKS: @@ -20063,7 +20022,7 @@ wsrep_innobase_kill_one_trx( thd_get_thread_id(thd))); WSREP_DEBUG("kill query for: %ld", thd_get_thread_id(thd)); - /* Note that innobase_kill_connection will take lock_mutex + /* Note that innobase_kill_query will take lock_mutex and trx_mutex */ wsrep_thd_UNLOCK(thd); wsrep_thd_awake(thd, signal); @@ -20140,12 +20099,10 @@ wsrep_abort_transaction( if (victim_trx) { lock_mutex_enter(); trx_mutex_enter(victim_trx); - victim_trx->abort_type = TRX_WSREP_ABORT; int rcode = wsrep_innobase_kill_one_trx(bf_thd, bf_trx, victim_trx, signal); - trx_mutex_exit(victim_trx); lock_mutex_exit(); - victim_trx->abort_type = TRX_SERVER_ABORT; + trx_mutex_exit(victim_trx); wsrep_srv_conc_cancel_wait(victim_trx); DBUG_RETURN(rcode); } else { diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index b1f27e7f456..026aac1b367 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -781,9 +781,7 @@ the wait lock. dberr_t lock_trx_handle_wait( /*=================*/ - trx_t* trx, /*!< in/out: trx lock state */ - bool lock_mutex_taken, - bool trx_mutex_taken); + trx_t* trx); /*!< in/out: trx lock state */ /*********************************************************************//** Get the number of locks on a table. @return number of locks */ diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 412a35a14f4..50e270fb3c5 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -827,12 +827,6 @@ lock_rec_convert_impl_to_expl()) will access transactions associated to other connections. The locks of transactions are protected by lock_sys->mutex and sometimes by trx->mutex. */ -typedef enum { - TRX_SERVER_ABORT = 0, - TRX_WSREP_ABORT = 1 -} trx_abort_t; - - /** Represents an instance of rollback segment along with its state variables.*/ struct trx_undo_ptr_t { trx_rseg_t* rseg; /*!< rollback segment assigned to the @@ -1088,7 +1082,6 @@ struct trx_t { /*------------------------------*/ THD* mysql_thd; /*!< MySQL thread handle corresponding to this trx, or NULL */ - trx_abort_t abort_type; /*!< Transaction abort type*/ const char* mysql_log_file_name; /*!< if MySQL binlog is used, this field diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 67a40e7639f..819e53e14cc 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -1282,10 +1282,8 @@ wsrep_kill_victim( << wsrep_thd_query(lock->trx->mysql_thd); } - lock->trx->abort_type = TRX_WSREP_ABORT; wsrep_innobase_kill_one_trx(trx->mysql_thd, (const trx_t*) trx, lock->trx, TRUE); - lock->trx->abort_type = TRX_SERVER_ABORT; } } } @@ -2936,21 +2934,8 @@ lock_grant_and_move_on_page( && lock_get_wait(lock) && !lock_rec_has_to_wait_in_queue(lock)) { - bool exit_trx_mutex = false; - - if (lock->trx->abort_type != TRX_SERVER_ABORT) { - ut_ad(trx_mutex_own(lock->trx)); - trx_mutex_exit(lock->trx); - exit_trx_mutex = true; - } - lock_grant(lock, false); - if (exit_trx_mutex) { - ut_ad(!trx_mutex_own(lock->trx)); - trx_mutex_enter(lock->trx); - } - if (previous != NULL) { /* Move the lock to the head of the list. */ HASH_GET_NEXT(hash, previous) = HASH_GET_NEXT(hash, lock); @@ -3030,20 +3015,7 @@ lock_rec_dequeue_from_page( /* Grant the lock */ ut_ad(lock->trx != in_lock->trx); - bool exit_trx_mutex = false; - - if (lock->trx->abort_type != TRX_SERVER_ABORT) { - ut_ad(trx_mutex_own(lock->trx)); - trx_mutex_exit(lock->trx); - exit_trx_mutex = true; - } - lock_grant(lock, false); - - if (exit_trx_mutex) { - ut_ad(!trx_mutex_own(lock->trx)); - trx_mutex_enter(lock->trx); - } } } } else { @@ -4270,58 +4242,50 @@ lock_table_create( UT_LIST_ADD_LAST(trx->lock.trx_locks, lock); #ifdef WITH_WSREP - if (c_lock && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { - ut_list_insert(table->locks, c_lock, lock, TableLockGetNode()); - if (wsrep_debug) { - ib::info() << "table lock BF conflict for " << - ib::hex(c_lock->trx->id); - ib::info() << " SQL: " - << wsrep_thd_query(c_lock->trx->mysql_thd); - } - } else { - ut_list_append(table->locks, lock, TableLockGetNode()); - } if (c_lock) { - ut_ad(!trx_mutex_own(c_lock->trx)); + if (wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + ut_list_insert(table->locks, c_lock, lock, + TableLockGetNode()); + if (wsrep_debug) { + ib::info() << "table lock BF conflict for " + << ib::hex(c_lock->trx->id) + << " SQL: " + << wsrep_thd_query( + c_lock->trx->mysql_thd); + } + } else { + ut_list_append(table->locks, lock, TableLockGetNode()); + } + trx_mutex_enter(c_lock->trx); - } - if (c_lock && c_lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { - c_lock->trx->lock.was_chosen_as_deadlock_victim = TRUE; + if (c_lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { + c_lock->trx->lock.was_chosen_as_deadlock_victim = TRUE; - if (wsrep_debug) { - wsrep_print_wait_locks(c_lock); + if (wsrep_debug) { + wsrep_print_wait_locks(c_lock); + } + + /* The lock release will call lock_grant(), + which would acquire trx->mutex again. */ + trx_mutex_exit(trx); + lock_cancel_waiting_and_release( + c_lock->trx->lock.wait_lock); + trx_mutex_enter(trx); + + if (wsrep_debug) { + ib::info() << "WSREP: c_lock canceled " + << ib::hex(c_lock->trx->id) + << " SQL: " + << wsrep_thd_query( + c_lock->trx->mysql_thd); + } } - /* have to release trx mutex for the duration of - victim lock release. This will eventually call - lock_grant, which wants to grant trx mutex again - */ - /* caller has trx_mutex, have to release for lock cancel */ - trx_mutex_exit(trx); - lock_cancel_waiting_and_release(c_lock->trx->lock.wait_lock); - trx_mutex_enter(trx); - - /* trx might not wait for c_lock, but some other lock - does not matter if wait_lock was released above - */ - if (c_lock->trx->lock.wait_lock == c_lock) { - lock_reset_lock_and_trx_wait(lock); - } - - if (wsrep_debug) { - ib::info() << "WSREP: c_lock canceled " << ib::hex(c_lock->trx->id); - ib::info() << " SQL: " - << wsrep_thd_query(c_lock->trx->mysql_thd); - } - } - - if (c_lock) { trx_mutex_exit(c_lock->trx); - } -#else - ut_list_append(table->locks, lock, TableLockGetNode()); + } else #endif /* WITH_WSREP */ + ut_list_append(table->locks, lock, TableLockGetNode()); if (type_mode & LOCK_WAIT) { @@ -7586,6 +7550,23 @@ lock_trx_release_locks( mem_heap_empty(trx->lock.lock_heap); } +static inline dberr_t lock_trx_handle_wait_low(trx_t* trx) +{ + ut_ad(lock_mutex_own()); + ut_ad(trx_mutex_own(trx)); + + if (trx->lock.was_chosen_as_deadlock_victim) { + return DB_DEADLOCK; + } + if (!trx->lock.wait_lock) { + /* The lock was probably granted before we got here. */ + return DB_SUCCESS; + } + + lock_cancel_waiting_and_release(trx->lock.wait_lock); + return DB_LOCK_WAIT; +} + /*********************************************************************//** Check whether the transaction has already been rolled back because it was selected as a deadlock victim, or if it has to wait then cancel @@ -7594,71 +7575,14 @@ the wait lock. dberr_t lock_trx_handle_wait( /*=================*/ - trx_t* trx, /*!< in/out: trx lock state */ - bool lock_mutex_taken, - bool trx_mutex_taken) + trx_t* trx) /*!< in/out: trx lock state */ { - dberr_t err=DB_SUCCESS; - bool take_lock_mutex = false; - bool take_trx_mutex = false; - - if (!lock_mutex_taken) { - ut_ad(!lock_mutex_own()); - lock_mutex_enter(); - take_lock_mutex = true; - } - - if (!trx_mutex_taken) { - ut_ad(!trx_mutex_own(trx)); - trx_mutex_enter(trx); - take_trx_mutex = true; - } - - if (trx->lock.was_chosen_as_deadlock_victim) { - err = DB_DEADLOCK; - } else if (trx->lock.wait_lock != NULL) { - bool take_wait_trx_mutex = false; - trx_t* wait_trx = trx->lock.wait_lock->trx; - - /* We take trx mutex for waiting trx if we have not yet - already taken it or we know that waiting trx and parameter - trx are not same and we are not already holding trx mutex. */ - if ((wait_trx && wait_trx == trx && !take_trx_mutex && !trx_mutex_taken) || - (wait_trx && wait_trx != trx && wait_trx->abort_type == TRX_SERVER_ABORT)) { - ut_ad(!trx_mutex_own(wait_trx)); - trx_mutex_enter(wait_trx); - take_wait_trx_mutex = true; - } - - ut_ad(trx_mutex_own(wait_trx)); - - lock_cancel_waiting_and_release(trx->lock.wait_lock); - - if (wait_trx && take_wait_trx_mutex) { - ut_ad(trx_mutex_own(wait_trx)); - trx_mutex_exit(wait_trx); - } - - err = DB_LOCK_WAIT; - } else { - /* The lock was probably granted before we got here. */ - err = DB_SUCCESS; - } - - if (take_lock_mutex) { - ut_ad(lock_mutex_own()); - lock_mutex_exit(); - } - - if (take_trx_mutex) { - ut_ad(trx_mutex_own(trx)); - trx_mutex_exit(trx); - } - - ut_ad(err == DB_SUCCESS || err == DB_LOCK_WAIT - || err == DB_DEADLOCK); - - return(err); + lock_mutex_enter(); + trx_mutex_enter(trx); + dberr_t err = lock_trx_handle_wait_low(trx); + lock_mutex_exit(); + trx_mutex_exit(trx); + return err; } /*********************************************************************//** diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 46cd1e00021..623961945b2 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -5008,8 +5008,7 @@ no_gap_lock: a deadlock and the transaction had to wait then release the lock it is waiting on. */ - trx->abort_type = TRX_SERVER_ABORT; - err = lock_trx_handle_wait(trx, false, false); + err = lock_trx_handle_wait(trx); switch (err) { case DB_SUCCESS: diff --git a/storage/maria/maria_chk.c b/storage/maria/maria_chk.c index cb8b374691e..bff6a87dc61 100644 --- a/storage/maria/maria_chk.c +++ b/storage/maria/maria_chk.c @@ -895,7 +895,7 @@ static void get_options(register int *argc,register char ***argv) { int ho_error; - load_defaults("my", load_default_groups, argc, argv); + load_defaults_or_exit("my", load_default_groups, argc, argv); default_argv= *argv; check_param.testflag= T_UPDATE_STATE; if (isatty(fileno(stdout))) diff --git a/storage/maria/maria_dump_log.c b/storage/maria/maria_dump_log.c index 42c694bf1bf..3570dede80e 100644 --- a/storage/maria/maria_dump_log.c +++ b/storage/maria/maria_dump_log.c @@ -133,7 +133,7 @@ int main(int argc, char **argv) uchar buffer[TRANSLOG_PAGE_SIZE]; MY_INIT(argv[0]); - load_defaults("my", load_default_groups, &argc, &argv); + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; get_options(&argc, &argv); diff --git a/storage/maria/maria_pack.c b/storage/maria/maria_pack.c index 814c50e1db8..28b4ff4cfc7 100644 --- a/storage/maria/maria_pack.c +++ b/storage/maria/maria_pack.c @@ -208,7 +208,7 @@ int main(int argc, char **argv) char **default_argv; MY_INIT(argv[0]); - load_defaults("my",load_default_groups,&argc,&argv); + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; get_options(&argc,&argv); maria_init(); diff --git a/storage/maria/maria_read_log.c b/storage/maria/maria_read_log.c index 2c24c125f36..551732d8ba3 100644 --- a/storage/maria/maria_read_log.c +++ b/storage/maria/maria_read_log.c @@ -47,7 +47,7 @@ int main(int argc, char **argv) maria_data_root= (char *)"."; sf_leaking_memory=1; /* don't report memory leaks on early exits */ - load_defaults("my", load_default_groups, &argc, &argv); + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; get_options(&argc, &argv); diff --git a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c index cffe188e855..cbf914f5d45 100644 --- a/storage/maria/unittest/ma_test_loghandler_multigroup-t.c +++ b/storage/maria/unittest/ma_test_loghandler_multigroup-t.c @@ -256,7 +256,7 @@ int main(int argc __attribute__((unused)), char *argv[]) #endif long_buffer= malloc(LONG_BUFFER_SIZE + LSN_STORE_SIZE * 2 + 2); - load_defaults("my", load_default_groups, &argc, &argv); + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; get_options(&argc, &argv); diff --git a/storage/myisam/myisamchk.c b/storage/myisam/myisamchk.c index 2e36c364453..dcf1d5ccaaf 100644 --- a/storage/myisam/myisamchk.c +++ b/storage/myisam/myisamchk.c @@ -752,9 +752,7 @@ static void get_options(register int *argc,register char ***argv) { int ho_error; - if (load_defaults("my", load_default_groups, argc, argv)) - exit(1); - + load_defaults_or_exit("my", load_default_groups, argc, argv); default_argv= *argv; if (isatty(fileno(stdout))) check_param.testflag|=T_WRITE_LOOP; diff --git a/storage/myisam/myisampack.c b/storage/myisam/myisampack.c index 966edc877cd..bb9f59f86f7 100644 --- a/storage/myisam/myisampack.c +++ b/storage/myisam/myisampack.c @@ -209,9 +209,7 @@ int main(int argc, char **argv) char **default_argv; MY_INIT(argv[0]); - if (load_defaults("my",load_default_groups,&argc,&argv)) - exit(1); - + load_defaults_or_exit("my", load_default_groups, &argc, &argv); default_argv= argv; get_options(&argc,&argv); diff --git a/storage/xtradb/handler/ha_innodb.cc b/storage/xtradb/handler/ha_innodb.cc index 464f56721af..0f5ef12bfb3 100644 --- a/storage/xtradb/handler/ha_innodb.cc +++ b/storage/xtradb/handler/ha_innodb.cc @@ -1428,19 +1428,12 @@ innobase_close_connection( THD* thd); /*!< in: MySQL thread handle for which to close the connection */ +/** Cancel any pending lock request associated with the current THD. +@sa THD::awake() @sa ha_kill_query() */ +static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels); static void innobase_commit_ordered(handlerton *hton, THD* thd, bool all); static void innobase_checkpoint_request(handlerton *hton, void *cookie); -/*****************************************************************//** -Cancel any pending lock request associated with the current THD. */ -static -void -innobase_kill_connection( -/*======================*/ - handlerton* hton, /*!< in: innobase handlerton */ - THD* thd, /*!< in: handle to the MySQL thread being killed */ - thd_kill_levels); - /*****************************************************************//** Commits a transaction in an InnoDB database or marks an SQL statement ended. @@ -3855,7 +3848,7 @@ innobase_init( innobase_hton->flags = HTON_SUPPORTS_EXTENDED_KEYS | HTON_SUPPORTS_FOREIGN_KEYS; - innobase_hton->kill_query = innobase_kill_connection; + innobase_hton->kill_query = innobase_kill_query; if (srv_file_per_table) innobase_hton->tablefile_extensions = ha_innobase_exts; @@ -5465,20 +5458,11 @@ ha_innobase::get_row_type() const return(ROW_TYPE_NOT_USED); } -/*****************************************************************//** -Cancel any pending lock request associated with the current THD. */ -static -void -innobase_kill_connection( -/*======================*/ - handlerton* hton, /*!< in: innobase handlerton */ - THD* thd, /*!< in: handle to the MySQL thread being killed */ - thd_kill_levels) +/** Cancel any pending lock request associated with the current THD. +@sa THD::awake() @sa ha_kill_query() */ +static void innobase_kill_query(handlerton*, THD* thd, enum thd_kill_levels) { - trx_t* trx; - - DBUG_ENTER("innobase_kill_connection"); - DBUG_ASSERT(hton == innodb_hton_ptr); + DBUG_ENTER("innobase_kill_query"); #ifdef WITH_WSREP wsrep_thd_LOCK(thd); @@ -5494,50 +5478,37 @@ innobase_kill_connection( } wsrep_thd_UNLOCK(thd); #endif /* WITH_WSREP */ - trx = thd_to_trx(thd); + if (trx_t* trx = thd_to_trx(thd)) { + ut_ad(trx->mysql_thd == thd); - if (trx && trx->lock.wait_lock) { - /* In wsrep BF we have already took lock_sys and trx - mutex either on wsrep_abort_transaction() or - before wsrep_kill_victim(). In replication we - could own lock_sys mutex taken in - lock_deadlock_check_and_resolve().*/ - - WSREP_DEBUG("Killing victim trx %p BF %d trx BF %d trx_id " TRX_ID_FMT " ABORT %d thd %p" - " current_thd %p BF %d wait_lock_modes: %s\n", - trx, wsrep_thd_is_BF(trx->mysql_thd, FALSE), - wsrep_thd_is_BF(thd, FALSE), - trx->id, trx->abort_type, - trx->mysql_thd, - current_thd, - wsrep_thd_is_BF(current_thd, FALSE), - lock_get_info(trx->lock.wait_lock).c_str()); - - if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE) - && trx->abort_type == TRX_SERVER_ABORT) { - ut_ad(!lock_mutex_own()); - lock_mutex_enter(); - } - - if (trx->abort_type != TRX_WSREP_ABORT) { + switch (trx->abort_type) { +#ifdef WITH_WSREP + case TRX_WSREP_ABORT: + break; +#endif + case TRX_SERVER_ABORT: + if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + lock_mutex_enter(); + } + /* fall through */ + case TRX_REPLICATION_ABORT: trx_mutex_enter(trx); } - - ut_ad(lock_mutex_own()); - ut_ad(trx_mutex_own(trx)); - - if (trx->lock.wait_lock) { - lock_cancel_waiting_and_release(trx->lock.wait_lock); - } - - if (trx->abort_type != TRX_WSREP_ABORT) { + /* Cancel a pending lock request if there are any */ + lock_trx_handle_wait(trx); + switch (trx->abort_type) { +#ifdef WITH_WSREP + case TRX_WSREP_ABORT: + break; +#endif + case TRX_SERVER_ABORT: + if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + lock_mutex_exit(); + } + /* fall through */ + case TRX_REPLICATION_ABORT: trx_mutex_exit(trx); } - - if (!wsrep_thd_is_BF(trx->mysql_thd, FALSE) && - trx->abort_type == TRX_SERVER_ABORT) { - lock_mutex_exit(); - } } DBUG_VOID_RETURN; @@ -9635,7 +9606,7 @@ ha_innobase::unlock_row(void) But there are some calls to this function from the SQL layer when the transaction is in state TRX_STATE_NOT_STARTED. The check on prebuilt->select_lock_type above gets around this issue. */ - ut_ad(trx_state_eq(prebuilt->trx, TRX_STATE_ACTIVE)); + ut_ad(trx_state_eq(prebuilt->trx, TRX_STATE_ACTIVE, true)); switch (prebuilt->row_read_type) { case ROW_READ_WITH_LOCKS: @@ -19765,7 +19736,7 @@ wsrep_innobase_kill_one_trx( thd_get_thread_id(thd))); WSREP_DEBUG("kill query for: %ld", thd_get_thread_id(thd)); - /* Note that innobase_kill_connection will take lock_mutex + /* Note that innobase_kill_query will take lock_mutex and trx_mutex */ wsrep_thd_UNLOCK(thd); wsrep_thd_awake(thd, signal); diff --git a/storage/xtradb/lock/lock0lock.cc b/storage/xtradb/lock/lock0lock.cc index b50452c1d5d..b8446013bc7 100644 --- a/storage/xtradb/lock/lock0lock.cc +++ b/storage/xtradb/lock/lock0lock.cc @@ -910,7 +910,7 @@ UNIV_INLINE void lock_reset_lock_and_trx_wait( /*=========================*/ - lock_t* lock) /*!< in/out: record lock */ + lock_t* lock) /*!< in/out: record lock */ { ut_ad(lock_get_wait(lock)); ut_ad(lock_mutex_own()); @@ -2358,13 +2358,6 @@ lock_rec_create( trx_mutex_enter(trx); } - /* trx might not wait for c_lock, but some other lock - does not matter if wait_lock was released above - */ - if (c_lock->trx->lock.wait_lock == c_lock) { - lock_reset_lock_and_trx_wait(lock); - } - trx_mutex_exit(c_lock->trx); if (wsrep_debug) { @@ -5010,19 +5003,18 @@ lock_table_create( UT_LIST_ADD_LAST(trx_locks, trx->lock.trx_locks, lock); #ifdef WITH_WSREP - if (wsrep_thd_is_wsrep(trx->mysql_thd)) { - if (c_lock && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + if (c_lock) { + if (wsrep_thd_is_wsrep(trx->mysql_thd) + && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { UT_LIST_INSERT_AFTER( un_member.tab_lock.locks, table->locks, c_lock, lock); } else { UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock); } - if (c_lock) { - trx_mutex_enter(c_lock->trx); - } + trx_mutex_enter(c_lock->trx); - if (c_lock && c_lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { + if (c_lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { c_lock->trx->lock.was_chosen_as_deadlock_victim = TRUE; @@ -5031,36 +5023,21 @@ lock_table_create( wsrep_print_wait_locks(c_lock->trx->lock.wait_lock); } - /* have to release trx mutex for the duration of - victim lock release. This will eventually call - lock_grant, which wants to grant trx mutex again - */ - /* caller has trx_mutex, have to release for lock cancel */ + /* The lock release will call lock_grant(), + which would acquire trx->mutex again. */ trx_mutex_exit(trx); lock_cancel_waiting_and_release(c_lock->trx->lock.wait_lock); trx_mutex_enter(trx); - /* trx might not wait for c_lock, but some other lock - does not matter if wait_lock was released above - */ - if (c_lock->trx->lock.wait_lock == c_lock) { - lock_reset_lock_and_trx_wait(lock); - } - if (wsrep_debug) { fprintf(stderr, "WSREP: c_lock canceled " TRX_ID_FMT "\n", c_lock->trx->id); } } - if (c_lock) { - trx_mutex_exit(c_lock->trx); - } - } else { + trx_mutex_exit(c_lock->trx); + } else #endif /* WITH_WSREP */ UT_LIST_ADD_LAST(un_member.tab_lock.locks, table->locks, lock); -#ifdef WITH_WSREP - } -#endif /* WITH_WSREP */ if (UNIV_UNLIKELY(type_mode & LOCK_WAIT)) { @@ -8110,26 +8087,19 @@ lock_trx_handle_wait( /*=================*/ trx_t* trx) /*!< in/out: trx lock state */ { - dberr_t err; - - lock_mutex_enter(); - - trx_mutex_enter(trx); + ut_ad(lock_mutex_own()); + ut_ad(trx_mutex_own(trx)); if (trx->lock.was_chosen_as_deadlock_victim) { - err = DB_DEADLOCK; - } else if (trx->lock.wait_lock != NULL) { - lock_cancel_waiting_and_release(trx->lock.wait_lock); - err = DB_LOCK_WAIT; - } else { + return DB_DEADLOCK; + } + if (!trx->lock.wait_lock) { /* The lock was probably granted before we got here. */ - err = DB_SUCCESS; + return DB_SUCCESS; } - lock_mutex_exit(); - trx_mutex_exit(trx); - - return(err); + lock_cancel_waiting_and_release(trx->lock.wait_lock); + return DB_LOCK_WAIT; } /*********************************************************************//** diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc index 03ae6822fb7..b81ea60a413 100644 --- a/storage/xtradb/row/row0sel.cc +++ b/storage/xtradb/row/row0sel.cc @@ -4627,7 +4627,11 @@ no_gap_lock: a deadlock and the transaction had to wait then release the lock it is waiting on. */ + lock_mutex_enter(); + trx_mutex_enter(trx); err = lock_trx_handle_wait(trx); + lock_mutex_exit(); + trx_mutex_exit(trx); switch (err) { case DB_SUCCESS: diff --git a/tests/mysql_client_fw.c b/tests/mysql_client_fw.c index bf06e2b502b..f69eb28a287 100644 --- a/tests/mysql_client_fw.c +++ b/tests/mysql_client_fw.c @@ -1410,8 +1410,7 @@ int main(int argc, char **argv) for (i= 0; i < argc; i++) original_argv[i]= strdup(argv[i]); - if (load_defaults("my", client_test_load_default_groups, &argc, &argv)) - exit(1); + load_defaults_or_exit("my", client_test_load_default_groups, &argc, &argv); get_options(&argc, &argv); /* Set main opt_count. */ diff --git a/tests/mysql_client_test.c b/tests/mysql_client_test.c index 741e1e53532..99cee8f7ed4 100644 --- a/tests/mysql_client_test.c +++ b/tests/mysql_client_test.c @@ -7109,11 +7109,7 @@ static void test_embedded_start_stop() MY_INIT(argv[0]); /* Load the client defaults from the .cnf file[s]. */ - if (load_defaults("my", client_test_load_default_groups, &argc, &argv)) - { - myerror("load_defaults failed"); - exit(1); - } + load_defaults_or_exit("my", client_test_load_default_groups, &argc, &argv); /* Parse the options (including the ones given from defaults files). */ get_options(&argc, &argv); @@ -7161,12 +7157,7 @@ static void test_embedded_start_stop() MY_INIT(argv[0]); - if (load_defaults("my", client_test_load_default_groups, &argc, &argv)) - { - myerror("load_defaults failed \n "); - exit(1); - } - + load_defaults_or_exit("my", client_test_load_default_groups, &argc, &argv); get_options(&argc, &argv); /* Must start the main embedded server again after the test. */ diff --git a/tests/thread_test.c b/tests/thread_test.c index bf0fb8ea2c0..38e453e9cb8 100644 --- a/tests/thread_test.c +++ b/tests/thread_test.c @@ -168,8 +168,8 @@ static void get_options(int argc, char **argv) { int ho_error; - if ((ho_error= load_defaults("my",load_default_groups,&argc,&argv)) || - (ho_error= handle_options(&argc, &argv, my_long_options, get_one_option))) + load_defaults_or_exit("my", load_default_groups, &argc, &argv); + if ((ho_error= handle_options(&argc, &argv, my_long_options, get_one_option))) exit(ho_error); free_defaults(argv); From e15e879fae949a05de549a6676ae66d4f7f8c566 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 13 Mar 2018 11:07:34 +0200 Subject: [PATCH 061/139] Remove the unreachable error DB_QUE_THR_SUSPENDED --- storage/innobase/include/db0err.h | 3 +- storage/innobase/include/lock0lock.h | 18 ++++---- storage/innobase/include/lock0prdt.h | 7 +-- storage/innobase/include/lock0priv.h | 6 +-- storage/innobase/lock/lock0lock.cc | 65 ++++++---------------------- storage/innobase/lock/lock0prdt.cc | 8 ++-- storage/innobase/row/row0mysql.cc | 25 +---------- storage/innobase/ut/ut0ut.cc | 2 - 8 files changed, 35 insertions(+), 99 deletions(-) diff --git a/storage/innobase/include/db0err.h b/storage/innobase/include/db0err.h index ef6f8b39abb..ec8e29d458c 100644 --- a/storage/innobase/include/db0err.h +++ b/storage/innobase/include/db0err.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2017, MariaDB Corporation. +Copyright (c) 2015, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -43,7 +43,6 @@ enum dberr_t { DB_DEADLOCK, DB_ROLLBACK, DB_DUPLICATE_KEY, - DB_QUE_THR_SUSPENDED, DB_MISSING_HISTORY, /*!< required history data has been deleted due to lack of space in rollback segment */ diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index 026aac1b367..03535b6aaed 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -285,7 +285,7 @@ a record. If they do, first tests if the query thread should anyway be suspended for some reason; if not, then puts the transaction and the query thread to the lock wait state and inserts a waiting request for a gap x-lock to the lock queue. -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_rec_insert_check_and_lock( /*===========================*/ @@ -309,7 +309,7 @@ first tests if the query thread should anyway be suspended for some reason; if not, then puts the transaction and the query thread to the lock wait state and inserts a waiting request for a record x-lock to the lock queue. -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_clust_rec_modify_check_and_lock( /*=================================*/ @@ -325,7 +325,7 @@ lock_clust_rec_modify_check_and_lock( /*********************************************************************//** Checks if locks of other transactions prevent an immediate modify (delete mark or delete unmark) of a secondary index record. -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_sec_rec_modify_check_and_lock( /*===============================*/ @@ -345,8 +345,7 @@ lock_sec_rec_modify_check_and_lock( /*********************************************************************//** Like lock_clust_rec_read_check_and_lock(), but reads a secondary index record. -@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK, -or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_sec_rec_read_check_and_lock( /*=============================*/ @@ -374,8 +373,7 @@ if the query thread should anyway be suspended for some reason; if not, then puts the transaction and the query thread to the lock wait state and inserts a waiting request for a record lock to the lock queue. Sets the requested mode lock on the record. -@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK, -or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_clust_rec_read_check_and_lock( /*===============================*/ @@ -405,7 +403,7 @@ waiting request for a record lock to the lock queue. Sets the requested mode lock on the record. This is an alternative version of lock_clust_rec_read_check_and_lock() that does not require the parameter "offsets". -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_clust_rec_read_check_and_lock_alt( /*===================================*/ @@ -460,7 +458,7 @@ lock_sec_rec_cons_read_sees( /*********************************************************************//** Locks the specified database table in the mode given. If the lock cannot be granted immediately, the query thread is put to wait. -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_table( /*=======*/ diff --git a/storage/innobase/include/lock0prdt.h b/storage/innobase/include/lock0prdt.h index 878d575ddc7..e4e37776d22 100644 --- a/storage/innobase/include/lock0prdt.h +++ b/storage/innobase/include/lock0prdt.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -36,7 +37,7 @@ typedef struct lock_prdt { /*********************************************************************//** Acquire a predicate lock on a block -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_prdt_lock( /*===========*/ @@ -56,7 +57,7 @@ lock_prdt_lock( /*********************************************************************//** Acquire a "Page" lock on a block -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_place_prdt_page_lock( /*======================*/ @@ -129,7 +130,7 @@ lock_prdt_update_parent( /*********************************************************************//** Checks if locks of other transactions prevent an immediate insert of a predicate record. -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_prdt_insert_check_and_lock( /*============================*/ diff --git a/storage/innobase/include/lock0priv.h b/storage/innobase/include/lock0priv.h index 185779e476f..43f59151991 100644 --- a/storage/innobase/include/lock0priv.h +++ b/storage/innobase/include/lock0priv.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2016, MariaDB Corporation +Copyright (c) 2015, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -715,7 +715,7 @@ public: @param[in, out] wait_for The lock that the the joining transaction is waiting for @param[in] prdt Predicate [optional] - @return DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED, or + @return DB_LOCK_WAIT, DB_DEADLOCK, or DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that there was a deadlock, but another transaction was chosen as a victim, and we got the lock immediately: no need to @@ -843,7 +843,7 @@ private: /** Check and resolve any deadlocks @param[in, out] lock The lock being acquired - @return DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED, or + @return DB_LOCK_WAIT, DB_DEADLOCK, or DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that there was a deadlock, but another transaction was chosen as a victim, and we got the lock immediately: no need to diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 819e53e14cc..db079abf10f 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -1999,7 +1999,7 @@ RecLock::check_deadlock_result(const trx_t* victim_trx, lock_t* lock) /** Check and resolve any deadlocks @param[in, out] lock The lock being acquired -@return DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED, or +@return DB_LOCK_WAIT, DB_DEADLOCK, or DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that there was a deadlock, but another transaction was chosen as a victim, and we got the lock immediately: no need to @@ -2097,7 +2097,7 @@ queue is itself waiting roll it back, also do a deadlock check and resolve. @param[in, out] wait_for The lock that the joining transaction is waiting for @param[in] prdt Predicate [optional] -@return DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED, or +@return DB_LOCK_WAIT, DB_DEADLOCK, or DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that there was a deadlock, but another transaction was chosen as a victim, and we got the lock immediately: no need to @@ -2380,8 +2380,7 @@ This is the general, and slower, routine for locking a record. This is a low-level function which does NOT look at implicit locks! Checks lock compatibility within explicit locks. This function sets a normal next-key lock, or in the case of a page supremum record, a gap type lock. -@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK, -or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, or DB_DEADLOCK */ static dberr_t lock_rec_lock_slow( @@ -2467,8 +2466,7 @@ possible, enqueues a waiting lock request. This is a low-level function which does NOT look at implicit locks! Checks lock compatibility within explicit locks. This function sets a normal next-key lock, or in the case of a page supremum record, a gap type lock. -@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK, -or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, or DB_DEADLOCK */ static dberr_t lock_rec_lock( @@ -4446,7 +4444,7 @@ lock_table_remove_low( /*********************************************************************//** Enqueues a waiting request for a table lock which cannot be granted immediately. Checks for deadlocks. -@return DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED, or +@return DB_LOCK_WAIT, DB_DEADLOCK, or DB_SUCCESS; DB_SUCCESS means that there was a deadlock, but another transaction was chosen as a victim, and we got the lock immediately: no need to wait then */ @@ -4468,16 +4466,7 @@ lock_table_enqueue_waiting( trx = thr_get_trx(thr); ut_ad(trx_mutex_own(trx)); - - /* Test if there already is some other reason to suspend thread: - we do not enqueue a lock request if the query thread should be - stopped anyway */ - - if (que_thr_stop(thr)) { - ut_error; - - return(DB_QUE_THR_SUSPENDED); - } + ut_a(!que_thr_stop(thr)); switch (trx_get_dict_operation(trx)) { case TRX_DICT_OP_NONE: @@ -4583,7 +4572,7 @@ lock_table_other_has_incompatible( /*********************************************************************//** Locks the specified database table in the mode given. If the lock cannot be granted immediately, the query thread is put to wait. -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_table( /*=======*/ @@ -4805,32 +4794,8 @@ run_again: } else { que_thr_stop_for_mysql(thr); - if (err != DB_QUE_THR_SUSPENDED) { - bool was_lock_wait; - - was_lock_wait = row_mysql_handle_errors( - &err, trx, thr, NULL); - - if (was_lock_wait) { - goto run_again; - } - } else { - que_thr_t* run_thr; - que_node_t* parent; - - parent = que_node_get_parent(thr); - - run_thr = que_fork_start_command( - static_cast(parent)); - - ut_a(run_thr == thr); - - /* There was a lock wait but the thread was not - in a ready to run or running state. */ - trx->error_state = DB_LOCK_WAIT; - + if (row_mysql_handle_errors(&err, trx, thr, NULL)) { goto run_again; - } } @@ -6498,7 +6463,7 @@ a record. If they do, first tests if the query thread should anyway be suspended for some reason; if not, then puts the transaction and the query thread to the lock wait state and inserts a waiting request for a gap x-lock to the lock queue. -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_rec_insert_check_and_lock( /*===========================*/ @@ -6739,7 +6704,7 @@ first tests if the query thread should anyway be suspended for some reason; if not, then puts the transaction and the query thread to the lock wait state and inserts a waiting request for a record x-lock to the lock queue. -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_clust_rec_modify_check_and_lock( /*=================================*/ @@ -6797,7 +6762,7 @@ lock_clust_rec_modify_check_and_lock( /*********************************************************************//** Checks if locks of other transactions prevent an immediate modify (delete mark or delete unmark) of a secondary index record. -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_sec_rec_modify_check_and_lock( /*===============================*/ @@ -6882,8 +6847,7 @@ lock_sec_rec_modify_check_and_lock( /*********************************************************************//** Like lock_clust_rec_read_check_and_lock(), but reads a secondary index record. -@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK, -or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_sec_rec_read_check_and_lock( /*=============================*/ @@ -6961,8 +6925,7 @@ if the query thread should anyway be suspended for some reason; if not, then puts the transaction and the query thread to the lock wait state and inserts a waiting request for a record lock to the lock queue. Sets the requested mode lock on the record. -@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, DB_DEADLOCK, -or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_SUCCESS_LOCKED_REC, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_clust_rec_read_check_and_lock( /*===============================*/ @@ -7036,7 +6999,7 @@ waiting request for a record lock to the lock queue. Sets the requested mode lock on the record. This is an alternative version of lock_clust_rec_read_check_and_lock() that does not require the parameter "offsets". -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_clust_rec_read_check_and_lock_alt( /*===================================*/ diff --git a/storage/innobase/lock/lock0prdt.cc b/storage/innobase/lock/lock0prdt.cc index 0e79dd5b460..f6859b70297 100644 --- a/storage/innobase/lock/lock0prdt.cc +++ b/storage/innobase/lock/lock0prdt.cc @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 2014, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -502,7 +503,7 @@ lock_prdt_add_to_queue( /*********************************************************************//** Checks if locks of other transactions prevent an immediate insert of a predicate record. -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_prdt_insert_check_and_lock( /*============================*/ @@ -785,7 +786,7 @@ lock_init_prdt_from_mbr( /*********************************************************************//** Acquire a predicate lock on a block -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_prdt_lock( /*===========*/ @@ -900,7 +901,7 @@ lock_prdt_lock( /*********************************************************************//** Acquire a "Page" lock on a block -@return DB_SUCCESS, DB_LOCK_WAIT, DB_DEADLOCK, or DB_QUE_THR_SUSPENDED */ +@return DB_SUCCESS, DB_LOCK_WAIT, or DB_DEADLOCK */ dberr_t lock_place_prdt_page_lock( /*======================*/ @@ -1049,4 +1050,3 @@ lock_prdt_page_free_from_discard( lock = next_lock; } } - diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index c7414d9c766..49a042acc20 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -3155,30 +3155,7 @@ run_again: } else { que_thr_stop_for_mysql(thr); - if (err != DB_QUE_THR_SUSPENDED) { - ibool was_lock_wait; - - was_lock_wait = row_mysql_handle_errors( - &err, trx, thr, NULL); - - if (was_lock_wait) { - goto run_again; - } - } else { - que_thr_t* run_thr; - que_node_t* parent; - - parent = que_node_get_parent(thr); - - run_thr = que_fork_start_command( - static_cast(parent)); - - ut_a(run_thr == thr); - - /* There was a lock wait but the thread was not - in a ready to run or running state. */ - trx->error_state = DB_LOCK_WAIT; - + if (row_mysql_handle_errors(&err, trx, thr, NULL)) { goto run_again; } } diff --git a/storage/innobase/ut/ut0ut.cc b/storage/innobase/ut/ut0ut.cc index 2a64d77da6f..f655c800901 100644 --- a/storage/innobase/ut/ut0ut.cc +++ b/storage/innobase/ut/ut0ut.cc @@ -582,8 +582,6 @@ ut_strerr( return("Rollback"); case DB_DUPLICATE_KEY: return("Duplicate key"); - case DB_QUE_THR_SUSPENDED: - return("The queue thread has been suspended"); case DB_MISSING_HISTORY: return("Required history data has been deleted"); case DB_CLUSTER_NOT_FOUND: From bd7ed1b923e8ddd896103d73461c4313a175cca6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Sun, 11 Mar 2018 23:34:23 +0200 Subject: [PATCH 062/139] MDEV-13935 INSERT stuck at state Unlocking tables Revert the dead code for MySQL 5.7 multi-master replication (GCS), also known as WL#6835: InnoDB: GCS Replication: Deterministic Deadlock Handling (High Prio Transactions in InnoDB). Also, make innodb_lock_schedule_algorithm=vats skip SPATIAL INDEX, because the code does not seem to be compatible with them. Add FIXME comments to some SPATIAL INDEX locking code. It looks like Galera write-set replication might not work with SPATIAL INDEX. --- extra/mariabackup/xtrabackup.cc | 1 + storage/innobase/gis/gis0rtree.cc | 2 +- storage/innobase/gis/gis0sea.cc | 1 + storage/innobase/handler/ha_innodb.cc | 331 +------ storage/innobase/handler/handler0alter.cc | 3 +- storage/innobase/include/ha_prototypes.h | 23 +- storage/innobase/include/hash0hash.h | 3 +- storage/innobase/include/lock0lock.h | 79 ++ storage/innobase/include/lock0lock.ic | 46 +- storage/innobase/include/lock0priv.h | 401 -------- storage/innobase/include/lock0priv.ic | 2 + storage/innobase/include/trx0trx.h | 330 +------ storage/innobase/include/trx0trx.ic | 69 +- storage/innobase/include/trx0types.h | 22 +- storage/innobase/include/trx0undo.h | 5 - storage/innobase/lock/lock0lock.cc | 1095 ++++++--------------- storage/innobase/lock/lock0prdt.cc | 62 +- storage/innobase/lock/lock0wait.cc | 6 +- storage/innobase/page/page0page.cc | 3 +- storage/innobase/page/page0zip.cc | 1 + storage/innobase/row/row0ins.cc | 2 - storage/innobase/row/row0merge.cc | 4 +- storage/innobase/row/row0mysql.cc | 3 - storage/innobase/row/row0sel.cc | 18 +- storage/innobase/row/row0trunc.cc | 5 +- storage/innobase/trx/trx0roll.cc | 33 +- storage/innobase/trx/trx0trx.cc | 284 +----- 27 files changed, 536 insertions(+), 2298 deletions(-) diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index e560ce1e042..c5bdec35f90 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -69,6 +69,7 @@ Place, Suite 330, Boston, MA 02111-1307 USA #include #include #include +#include "trx0sys.h" #include #include diff --git a/storage/innobase/gis/gis0rtree.cc b/storage/innobase/gis/gis0rtree.cc index b8220d73ec0..d45e40c8151 100644 --- a/storage/innobase/gis/gis0rtree.cc +++ b/storage/innobase/gis/gis0rtree.cc @@ -34,7 +34,7 @@ Created 2013/03/27 Allen Lai and Jimmy Yang #include "rem0cmp.h" #include "lock0lock.h" #include "ibuf0ibuf.h" -#include "trx0trx.h" +#include "trx0undo.h" #include "srv0mon.h" #include "gis0geo.h" diff --git a/storage/innobase/gis/gis0sea.cc b/storage/innobase/gis/gis0sea.cc index dcf8cc6f781..173fc76ddfc 100644 --- a/storage/innobase/gis/gis0sea.cc +++ b/storage/innobase/gis/gis0sea.cc @@ -37,6 +37,7 @@ Created 2014/01/16 Jimmy Yang #include "ibuf0ibuf.h" #include "trx0trx.h" #include "srv0mon.h" +#include "que0que.h" #include "gis0geo.h" /** Restore the stored position of a persistent cursor bufferfixing the page */ diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 2a860ff8e7c..b8e72f398f1 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -1721,41 +1721,6 @@ innobase_reset_background_thd(MYSQL_THD thd) } -#if 0 -/** -Check if the transaction can be rolled back -@param[in] requestor Session requesting the lock -@param[in] holder Session that holds the lock -@return the session that will be rolled back, null don't care */ - -THD* -thd_trx_arbitrate(THD* requestor, THD* holder) -{ - /* Non-user (thd==0) transactions by default can't rollback, in - practice DDL transactions should never rollback and that's because - they should never wait on table/record locks either */ - - ut_a(holder != NULL); - ut_a(holder != requestor); - - THD* victim = thd_tx_arbitrate(requestor, holder); - - ut_a(victim == NULL || victim == requestor || victim == holder); - - return(victim); -} - -/** -@param[in] thd Session to check -@return the priority */ - -int -thd_trx_priority(THD* thd) -{ - return(thd == NULL ? 0 : thd_tx_priority(thd)); -} -#endif - /******************************************************************//** Check if the transaction is an auto-commit transaction. TRUE also implies that it is a SELECT (read-only) transaction. @@ -2057,7 +2022,6 @@ convert_error_code_to_mysql( case DB_RECORD_NOT_FOUND: return(HA_ERR_NO_ACTIVE_RECORD); - case DB_FORCED_ABORT: case DB_DEADLOCK: /* Since we rolled back the whole transaction, we must tell it also to MySQL so that MySQL knows to empty the @@ -2879,10 +2843,6 @@ check_trx_exists( return trx; } else { trx = innobase_trx_allocate(thd); - /* User trx can be forced to rollback, - so we unset the disable flag. */ - ut_ad(trx->in_innodb & TRX_FORCE_ROLLBACK_DISABLE); - trx->in_innodb &= TRX_FORCE_ROLLBACK_MASK; thd_set_ha_data(thd, innodb_hton_ptr, trx); return trx; } @@ -3094,11 +3054,8 @@ ha_innobase::update_thd( trx_t* trx = check_trx_exists(thd); - TrxInInnoDB trx_in_innodb(trx); - - ut_ad(trx_in_innodb.is_aborted() - || (trx->dict_operation_lock_mode == 0 - && trx->dict_operation == TRX_DICT_OP_NONE)); + ut_ad(trx->dict_operation_lock_mode == 0); + ut_ad(trx->dict_operation == TRX_DICT_OP_NONE); if (m_prebuilt->trx != trx) { @@ -3564,8 +3521,6 @@ ha_innobase::init_table_handle_for_HANDLER(void) trx_start_if_not_started_xa(m_prebuilt->trx, false); - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - /* Assign a read view if the transaction does not have it yet */ trx_assign_read_view(m_prebuilt->trx); @@ -4549,8 +4504,6 @@ innobase_start_trx_and_assign_read_view( trx_t* trx = check_trx_exists(thd); - TrxInInnoDB trx_in_innodb(trx); - innobase_srv_conc_force_exit_innodb(trx); /* The transaction should not be active yet, start it */ @@ -4687,7 +4640,6 @@ innobase_commit_ordered( DBUG_ASSERT(hton == innodb_hton_ptr); trx = check_trx_exists(thd); - TrxInInnoDB trx_in_innodb(trx); if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { /* We cannot throw error here; instead we will catch this error @@ -4731,16 +4683,6 @@ innobase_commit( trx_t* trx = check_trx_exists(thd); - TrxInInnoDB trx_in_innodb(trx); - - if (trx_in_innodb.is_aborted()) { - - innobase_rollback(hton, thd, commit_trx); - - DBUG_RETURN(convert_error_code_to_mysql( - DB_FORCED_ABORT, 0, thd)); - } - ut_ad(trx->dict_operation_lock_mode == 0); ut_ad(trx->dict_operation == TRX_DICT_OP_NONE); @@ -4832,11 +4774,8 @@ innobase_rollback( trx_t* trx = check_trx_exists(thd); - TrxInInnoDB trx_in_innodb(trx); - - ut_ad(trx_in_innodb.is_aborted() - || (trx->dict_operation_lock_mode == 0 - && trx->dict_operation == TRX_DICT_OP_NONE)); + ut_ad(trx->dict_operation_lock_mode == 0); + ut_ad(trx->dict_operation == TRX_DICT_OP_NONE); innobase_srv_conc_force_exit_innodb(trx); @@ -4847,10 +4786,7 @@ innobase_rollback( /* If we had reserved the auto-inc lock for some table (if we come here to roll back the latest SQL statement) we release it now before a possibly lengthy rollback */ - - if (!trx_in_innodb.is_aborted()) { - lock_unlock_table_autoinc(trx); - } + lock_unlock_table_autoinc(trx); /* This is a statement level variable. */ @@ -4863,18 +4799,6 @@ innobase_rollback( error = trx_rollback_for_mysql(trx); - if (trx->state == TRX_STATE_FORCED_ROLLBACK) { -#ifndef DBUG_OFF - char buffer[1024]; - - DBUG_LOG("trx", "Forced rollback: " - << thd_get_error_context_description( - thd, buffer, sizeof buffer, 512)); -#endif /* !DBUG_OFF */ - - trx->state = TRX_STATE_NOT_STARTED; - } - trx_deregister_from_2pc(trx); } else { @@ -4901,9 +4825,7 @@ innobase_rollback_trx( /* If we had reserved the auto-inc lock for some table (if we come here to roll back the latest SQL statement) we release it now before a possibly lengthy rollback */ - if (!TrxInInnoDB::is_aborted(trx)) { - lock_unlock_table_autoinc(trx); - } + lock_unlock_table_autoinc(trx); if (!trx->has_logged()) { trx->will_lock = 0; @@ -5075,8 +4997,6 @@ innobase_rollback_to_savepoint( trx_t* trx = check_trx_exists(thd); - TrxInInnoDB trx_in_innodb(trx); - innobase_srv_conc_force_exit_innodb(trx); /* TODO: use provided savepoint data area to store savepoint data */ @@ -5117,8 +5037,6 @@ innobase_rollback_to_savepoint_can_release_mdl( trx_t* trx = check_trx_exists(thd); - TrxInInnoDB trx_in_innodb(trx); - /* If transaction has not acquired any locks then it is safe to release MDL after rollback to savepoint */ if (UT_LIST_GET_LEN(trx->lock.trx_locks) == 0) { @@ -5152,8 +5070,6 @@ innobase_release_savepoint( trx = check_trx_exists(thd); - TrxInInnoDB trx_in_innodb(trx); - /* TODO: use provided savepoint data area to store savepoint data */ longlong2str((ulint) savepoint, name, 36); @@ -5187,8 +5103,6 @@ innobase_savepoint( trx_t* trx = check_trx_exists(thd); - TrxInInnoDB trx_in_innodb(trx); - innobase_srv_conc_force_exit_innodb(trx); /* Cannot happen outside of transaction */ @@ -5224,7 +5138,6 @@ innobase_close_connection( DBUG_ASSERT(hton == innodb_hton_ptr); trx_t* trx = thd_to_trx(thd); - bool free_trx = false; /* During server initialization MySQL layer will try to open some of the master-slave tables those residing in InnoDB. @@ -5241,16 +5154,6 @@ innobase_close_connection( if (trx) { - TrxInInnoDB trx_in_innodb(trx); - - if (trx_in_innodb.is_aborted()) { - - while (trx_is_started(trx)) { - - os_thread_sleep(20); - } - } - if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { sql_print_error("Transaction not registered for MariaDB 2PC, " @@ -5268,9 +5171,8 @@ innobase_close_connection( if (trx->has_logged_persistent()) { trx_disconnect_prepared(trx); } else { - trx_rollback_for_mysql(trx); trx_deregister_from_2pc(trx); - free_trx = true; + goto rollback_and_free; } } else { sql_print_warning( @@ -5278,24 +5180,15 @@ innobase_close_connection( "InnoDB transaction. " TRX_ID_FMT " row modifications " "will roll back.", trx->undo_no); - ut_d(ib::warn() - << "trx: " << trx << " started on: " - << innobase_basename(trx->start_file) - << ":" << trx->start_line); - innobase_rollback_trx(trx); - free_trx = true; + goto rollback_and_free; } } else { +rollback_and_free: innobase_rollback_trx(trx); - free_trx = true; + trx_free_for_mysql(trx); } } - /* Free trx only after TrxInInnoDB is deleted. */ - if (free_trx) { - trx_free_for_mysql(trx); - } - DBUG_RETURN(0); } @@ -8320,15 +8213,6 @@ ha_innobase::write_row( DBUG_ENTER("ha_innobase::write_row"); trx_t* trx = thd_to_trx(m_user_thd); - TrxInInnoDB trx_in_innodb(trx); - - if (trx_in_innodb.is_aborted()) { - - innobase_rollback(ht, m_user_thd, false); - - DBUG_RETURN(convert_error_code_to_mysql( - DB_FORCED_ABORT, 0, m_user_thd)); - } /* Validation checks before we commence write_row operation. */ if (high_level_read_only) { @@ -9161,14 +9045,6 @@ ha_innobase::update_row( goto func_exit; } - if (TrxInInnoDB::is_aborted(trx)) { - - innobase_rollback(ht, m_user_thd, false); - - DBUG_RETURN(convert_error_code_to_mysql( - DB_FORCED_ABORT, 0, m_user_thd)); - } - /* This is not a delete */ m_prebuilt->upd_node->is_delete = FALSE; @@ -9258,18 +9134,9 @@ ha_innobase::delete_row( { dberr_t error; trx_t* trx = thd_to_trx(m_user_thd); - TrxInInnoDB trx_in_innodb(trx); DBUG_ENTER("ha_innobase::delete_row"); - if (trx_in_innodb.is_aborted()) { - - innobase_rollback(ht, m_user_thd, false); - - DBUG_RETURN(convert_error_code_to_mysql( - DB_FORCED_ABORT, 0, m_user_thd)); - } - ut_a(m_prebuilt->trx == trx); if (high_level_read_only) { @@ -9340,19 +9207,7 @@ ha_innobase::unlock_row(void) DBUG_VOID_RETURN; } - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - - if (trx_in_innodb.is_aborted()) { - DBUG_VOID_RETURN; - } - - /* Ideally, this assert must be in the beginning of the function. - But there are some calls to this function from the SQL layer when the - transaction is in state TRX_STATE_NOT_STARTED. The check on - m_prebuilt->select_lock_type above gets around this issue. */ - - ut_ad(trx_state_eq(m_prebuilt->trx, TRX_STATE_ACTIVE, true) - || trx_state_eq(m_prebuilt->trx, TRX_STATE_FORCED_ROLLBACK, true)); + ut_ad(trx_state_eq(m_prebuilt->trx, TRX_STATE_ACTIVE, true)); switch (m_prebuilt->row_read_type) { case ROW_READ_WITH_LOCKS: @@ -9634,14 +9489,6 @@ ha_innobase::index_read( innobase_srv_conc_enter_innodb(m_prebuilt); - if (TrxInInnoDB::is_aborted(m_prebuilt->trx)) { - - innobase_rollback(ht, m_user_thd, false); - - DBUG_RETURN(convert_error_code_to_mysql( - DB_FORCED_ABORT, 0, m_user_thd)); - } - ret = row_search_mvcc( buf, mode, m_prebuilt, match_mode, 0); @@ -9822,16 +9669,6 @@ ha_innobase::change_active_index( ut_ad(m_user_thd == ha_thd()); ut_a(m_prebuilt->trx == thd_to_trx(m_user_thd)); - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - - if (trx_in_innodb.is_aborted()) { - - innobase_rollback(ht, m_user_thd, false); - - DBUG_RETURN(convert_error_code_to_mysql( - DB_FORCED_ABORT, 0, m_user_thd)); - } - active_index = keynr; m_prebuilt->index = innobase_get_index(keynr); @@ -9950,14 +9787,6 @@ ha_innobase::general_fetch( ut_ad(trx == thd_to_trx(m_user_thd)); - if (TrxInInnoDB::is_aborted(trx)) { - - innobase_rollback(ht, m_user_thd, false); - - DBUG_RETURN(convert_error_code_to_mysql( - DB_FORCED_ABORT, 0, m_user_thd)); - } - if (m_prebuilt->table->is_readable()) { } else if (m_prebuilt->table->corrupted) { DBUG_RETURN(HA_ERR_CRASHED); @@ -10124,7 +9953,6 @@ ha_innobase::rnd_init( /*==================*/ bool scan) /*!< in: true if table/index scan FALSE otherwise */ { - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); int err; /* Store the active index value so that we can restore the original @@ -10174,8 +10002,6 @@ ha_innobase::rnd_next( DBUG_ENTER("rnd_next"); - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - if (m_start_of_scan) { error = index_first(buf); @@ -10299,21 +10125,6 @@ ha_innobase::ft_init_ext( trx_t* trx = m_prebuilt->trx; - TrxInInnoDB trx_in_innodb(trx); - - if (trx_in_innodb.is_aborted()) { - - innobase_rollback(ht, m_user_thd, false); - - int err; - err = convert_error_code_to_mysql( - DB_FORCED_ABORT, 0, m_user_thd); - - my_error(err, MYF(0)); - - return(NULL); - } - /* FTS queries are not treated as autocommit non-locking selects. This is because the FTS implementation can acquire locks behind the scenes. This has not been verified but it is safer to treat @@ -10455,16 +10266,6 @@ ha_innobase::ft_read( /*=================*/ uchar* buf) /*!< in/out: buf contain result row */ { - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - - if (trx_in_innodb.is_aborted()) { - - innobase_rollback(ht, m_user_thd, false); - - return(convert_error_code_to_mysql( - DB_FORCED_ABORT, 0, m_user_thd)); - } - row_prebuilt_t* ft_prebuilt; ft_prebuilt = reinterpret_cast(ft_handler)->ft_prebuilt; @@ -13268,15 +13069,6 @@ ha_innobase::discard_or_import_tablespace( DBUG_RETURN(HA_ERR_TABLE_NEEDS_UPGRADE); } - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - - if (trx_in_innodb.is_aborted()) { - innobase_rollback(ht, m_user_thd, false); - - DBUG_RETURN(convert_error_code_to_mysql( - DB_FORCED_ABORT, 0, m_user_thd)); - } - trx_start_if_not_started(m_prebuilt->trx, true); /* Obtain an exclusive lock on the table. */ @@ -13376,8 +13168,6 @@ ha_innobase::truncate() update_thd(ha_thd()); - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - if (!trx_is_started(m_prebuilt->trx)) { ++m_prebuilt->trx->will_lock; } @@ -13452,8 +13242,6 @@ ha_innobase::delete_table( trx_t* parent_trx = check_trx_exists(thd); - TrxInInnoDB trx_in_innodb(parent_trx); - /* Remove the to-be-dropped table from the list of modified tables by parent_trx. Otherwise we may end up with an orphaned pointer to the table object from parent_trx::mod_tables. This could happen in: @@ -13697,8 +13485,6 @@ innobase_rename_table( DEBUG_SYNC_C("innodb_rename_table_ready"); - TrxInInnoDB trx_in_innodb(trx); - trx_start_if_not_started(trx, true); /* Serialize data dictionary operations with dictionary mutex: @@ -13790,13 +13576,6 @@ ha_innobase::rename_table( DBUG_RETURN(HA_ERR_TABLE_READONLY); } - /* Get the transaction associated with the current thd, or create one - if not yet created */ - - trx_t* parent_trx = check_trx_exists(thd); - - TrxInInnoDB trx_in_innodb(parent_trx); - trx_t* trx = innobase_trx_allocate(thd); /* We are doing a DDL operation. */ @@ -13880,8 +13659,6 @@ ha_innobase::records_in_range( m_prebuilt->trx->op_info = "estimating records in index range"; - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - active_index = keynr; key = table->key_info + active_index; @@ -14013,8 +13790,6 @@ ha_innobase::estimate_rows_upper_bound() update_thd(ha_thd()); - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - m_prebuilt->trx->op_info = "calculating upper bound for table rows"; index = dict_table_get_first_index(m_prebuilt->table); @@ -14856,8 +14631,6 @@ ha_innobase::optimize( HA_CHECK_OPT* check_opt) /*!< in: currently ignored */ { - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - /* FTS-FIXME: Since MySQL doesn't support engine-specific commands, we have to hijack some existing command in order to be able to test the new admin commands added in InnoDB's FTS support. For now, we @@ -14926,8 +14699,6 @@ ha_innobase::check( ut_a(m_prebuilt->trx->magic_n == TRX_MAGIC_N); ut_a(m_prebuilt->trx == thd_to_trx(thd)); - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - if (m_prebuilt->mysql_template == NULL) { /* Build the template; we will use a dummy template in index scans done in checking */ @@ -15418,8 +15189,6 @@ ha_innobase::get_foreign_key_list( { update_thd(ha_thd()); - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - m_prebuilt->trx->op_info = "getting list of foreign keys"; mutex_enter(&dict_sys->mutex); @@ -15458,8 +15227,6 @@ ha_innobase::get_parent_foreign_key_list( { update_thd(ha_thd()); - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - m_prebuilt->trx->op_info = "getting list of referencing foreign keys"; mutex_enter(&dict_sys->mutex); @@ -15556,8 +15323,6 @@ ha_innobase::get_cascade_foreign_key_table_list( THD* thd, List* fk_table_list) { - TrxInInnoDB trx_in_innodb(m_prebuilt->trx); - m_prebuilt->trx->op_info = "getting cascading foreign keys"; std::list > table_list; @@ -15807,15 +15572,6 @@ ha_innobase::end_stmt() /* This is a statement level counter. */ m_prebuilt->autoinc_last_value = 0; - /* This transaction had called ha_innobase::start_stmt() */ - trx_t* trx = m_prebuilt->trx; - - if (trx->lock.start_stmt) { - TrxInInnoDB::end_stmt(trx); - - trx->lock.start_stmt = false; - } - return(0); } @@ -15855,8 +15611,6 @@ ha_innobase::start_stmt( ut_ad(m_prebuilt->table != NULL); - TrxInInnoDB trx_in_innodb(trx); - trx = m_prebuilt->trx; innobase_srv_conc_force_exit_innodb(trx); @@ -15929,14 +15683,6 @@ ha_innobase::start_stmt( ++trx->will_lock; } - /* Only do it once per transaction. */ - if (!trx->lock.start_stmt && lock_type != TL_UNLOCK) { - - TrxInInnoDB::begin_stmt(trx); - - trx->lock.start_stmt = true; - } - DBUG_RETURN(0); } @@ -16167,13 +15913,8 @@ ha_innobase::external_lock( ++trx->will_lock; } - TrxInInnoDB::begin_stmt(trx); - DBUG_RETURN(0); } else { - - TrxInInnoDB::end_stmt(trx); - DEBUG_SYNC_C("ha_innobase_end_statement"); } @@ -16268,8 +16009,6 @@ innodb_show_status( innobase_srv_conc_force_exit_innodb(trx); - TrxInInnoDB trx_in_innodb(trx); - /* We let the InnoDB Monitor to output at most MAX_STATUS_SIZE bytes of text. */ @@ -16854,8 +16593,6 @@ ha_innobase::store_lock( trx_t* trx = check_trx_exists(thd); - TrxInInnoDB trx_in_innodb(trx); - /* NOTE: MySQL can call this function with lock 'type' TL_IGNORE! Be careful to ignore TL_IGNORE if we are going to do something with only 'real' locks! */ @@ -17176,8 +16913,6 @@ ha_innobase::get_auto_increment( trx = m_prebuilt->trx; - TrxInInnoDB trx_in_innodb(trx); - /* Note: We can't rely on *first_value since some MySQL engines, in particular the partition engine, don't initialize it to 0 when invoking this method. So we are not sure if it's guaranteed to @@ -17578,16 +17313,6 @@ innobase_xa_prepare( innobase_srv_conc_force_exit_innodb(trx); - TrxInInnoDB trx_in_innodb(trx); - - if (trx_in_innodb.is_aborted()) { - - innobase_rollback(hton, thd, prepare_trx); - - return(convert_error_code_to_mysql( - DB_FORCED_ABORT, 0, thd)); - } - if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { sql_print_error("Transaction not registered for MariaDB 2PC," @@ -17602,18 +17327,7 @@ innobase_xa_prepare( ut_ad(trx_is_registered_for_2pc(trx)); - dberr_t err = trx_prepare_for_mysql(trx); - - ut_ad(err == DB_SUCCESS || err == DB_FORCED_ABORT); - - if (err == DB_FORCED_ABORT) { - - innobase_rollback(hton, thd, prepare_trx); - - return(convert_error_code_to_mysql( - DB_FORCED_ABORT, 0, thd)); - } - + trx_prepare_for_mysql(trx); } else { /* We just mark the SQL statement ended and do not do a transaction prepare */ @@ -17690,14 +17404,10 @@ innobase_commit_by_xid( } if (trx_t* trx = trx_get_trx_by_xid(xid)) { - ut_ad(trx->in_innodb & TRX_FORCE_ROLLBACK_DISABLE); /* use cases are: disconnected xa, slave xa, recovery */ - { - TrxInInnoDB trx_in_innodb(trx); - innobase_commit_low(trx); - ut_ad(trx->mysql_thd == NULL); - trx_deregister_from_2pc(trx); - } + innobase_commit_low(trx); + ut_ad(trx->mysql_thd == NULL); + trx_deregister_from_2pc(trx); ut_ad(!trx->will_lock); /* trx cache requirement */ trx_free_for_background(trx); @@ -17726,14 +17436,9 @@ innobase_rollback_by_xid( } if (trx_t* trx = trx_get_trx_by_xid(xid)) { - int ret; - ut_ad(trx->in_innodb & TRX_FORCE_ROLLBACK_DISABLE); - { - TrxInInnoDB trx_in_innodb(trx); - ret = innobase_rollback_trx(trx); - trx_deregister_from_2pc(trx); - ut_ad(!trx->will_lock); - } + int ret = innobase_rollback_trx(trx); + trx_deregister_from_2pc(trx); + ut_ad(!trx->will_lock); trx_free_for_background(trx); return(ret); diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index c7368a43192..074401e37d9 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -2804,8 +2804,7 @@ online_retry_drop_indexes_with_trx( dict_table_t* table, /*!< in/out: table */ trx_t* trx) /*!< in/out: transaction */ { - ut_ad(trx_state_eq(trx, TRX_STATE_NOT_STARTED) - || trx_state_eq(trx, TRX_STATE_FORCED_ROLLBACK)); + ut_ad(trx_state_eq(trx, TRX_STATE_NOT_STARTED)); ut_ad(trx->dict_operation_lock_mode == RW_X_LATCH); diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h index 5b97b4b3a88..86defe9b166 100644 --- a/storage/innobase/include/ha_prototypes.h +++ b/storage/innobase/include/ha_prototypes.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2006, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -361,27 +361,6 @@ thd_trx_is_read_only( /*=================*/ THD* thd); /*!< in/out: thread handle */ -#if 0 -/** -Check if the transaction can be rolled back -@param[in] requestor Session requesting the lock -@param[in] holder Session that holds the lock -@return the session that will be rolled back, null don't care */ - -THD* -thd_trx_arbitrate(THD* requestor, THD* holder); - -/** -@param[in] thd Session to check -@return the priority */ - -int -thd_trx_priority(THD* thd); - -#else -static inline THD* thd_trx_arbitrate(THD*, THD*) { return NULL; } -static inline int thd_trx_priority(THD*) { return 0; } -#endif /******************************************************************//** Check if the transaction is an auto-commit transaction. TRUE also implies that it is a SELECT (read-only) transaction. diff --git a/storage/innobase/include/hash0hash.h b/storage/innobase/include/hash0hash.h index 3d099cd2f3a..cbb6da488b5 100644 --- a/storage/innobase/include/hash0hash.h +++ b/storage/innobase/include/hash0hash.h @@ -1,6 +1,7 @@ /***************************************************************************** Copyright (c) 1997, 2016, Oracle and/or its affiliates. All Rights Reserved. +Copyright (c) 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -122,7 +123,6 @@ do {\ }\ } while (0) -#ifdef WITH_WSREP /*******************************************************************//** Inserts a struct to the head of hash table. */ @@ -148,7 +148,6 @@ do { \ cell3333->node = DATA; \ } \ } while (0) -#endif /*WITH_WSREP */ #ifdef UNIV_HASH_DEBUG # define HASH_ASSERT_VALID(DATA) ut_a((void*) (DATA) != (void*) -1) # define HASH_INVALIDATE(DATA, NAME) *(void**) (&DATA->NAME) = (void*) -1 diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index 03535b6aaed..ddbbdeebfee 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -954,6 +954,30 @@ struct lock_sys_t{ is running */ }; +/*********************************************************************//** +Creates a new record lock and inserts it to the lock queue. Does NOT check +for deadlocks or lock compatibility! +@return created lock */ +UNIV_INLINE +lock_t* +lock_rec_create( +/*============*/ +#ifdef WITH_WSREP + lock_t* c_lock, /*!< conflicting lock */ + que_thr_t* thr, /*!< thread owning trx */ +#endif + ulint type_mode,/*!< in: lock mode and wait + flag, type is ignored and + replaced by LOCK_REC */ + const buf_block_t* block, /*!< in: buffer block containing + the record */ + ulint heap_no,/*!< in: heap number of the record */ + dict_index_t* index, /*!< in: index of record */ + trx_t* trx, /*!< in,out: transaction */ + bool caller_owns_trx_mutex); + /*!< in: true if caller owns + trx mutex */ + /*************************************************************//** Removes a record lock request, waiting or granted, from the queue. */ void @@ -963,6 +987,61 @@ lock_rec_discard( record locks which are contained in this lock object are removed */ +/** Create a new record lock and inserts it to the lock queue, +without checking for deadlocks or conflicts. +@param[in] type_mode lock mode and wait flag; type will be replaced + with LOCK_REC +@param[in] space tablespace id +@param[in] page_no index page number +@param[in] page R-tree index page, or NULL +@param[in] heap_no record heap number in the index page +@param[in] index the index tree +@param[in,out] trx transaction +@param[in] holds_trx_mutex whether the caller holds trx->mutex +@return created lock */ +lock_t* +lock_rec_create_low( +#ifdef WITH_WSREP + lock_t* c_lock, /*!< conflicting lock */ + que_thr_t* thr, /*!< thread owning trx */ +#endif + ulint type_mode, + ulint space, + ulint page_no, + const page_t* page, + ulint heap_no, + dict_index_t* index, + trx_t* trx, + bool holds_trx_mutex); +/** Enqueue a waiting request for a lock which cannot be granted immediately. +Check for deadlocks. +@param[in] type_mode the requested lock mode (LOCK_S or LOCK_X) + possibly ORed with LOCK_GAP or + LOCK_REC_NOT_GAP, ORed with + LOCK_INSERT_INTENTION if this + waiting lock request is set + when performing an insert of + an index record +@param[in] block leaf page in the index +@param[in] heap_no record heap number in the block +@param[in] index index tree +@param[in,out] thr query thread +@param[in] prdt minimum bounding box (spatial index) +@retval DB_LOCK_WAIT if the waiting lock was enqueued +@retval DB_DEADLOCK if this transaction was chosen as the victim +@retval DB_SUCCESS_LOCKED_REC if the other transaction was chosen as a victim + (or it happened to commit) */ +dberr_t +lock_rec_enqueue_waiting( +#ifdef WITH_WSREP + lock_t* c_lock, /*!< conflicting lock */ +#endif + ulint type_mode, + const buf_block_t* block, + ulint heap_no, + dict_index_t* index, + que_thr_t* thr, + lock_prdt_t* prdt); /*************************************************************//** Moves the explicit locks on user records to another page if a record list start is moved to another page. */ diff --git a/storage/innobase/include/lock0lock.ic b/storage/innobase/include/lock0lock.ic index b73843e7a1f..475f2ccedf1 100644 --- a/storage/innobase/include/lock0lock.ic +++ b/storage/innobase/include/lock0lock.ic @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -24,19 +24,9 @@ The transaction lock system Created 5/7/1996 Heikki Tuuri *******************************************************/ -#include "srv0srv.h" #include "dict0dict.h" -#include "row0row.h" -#include "trx0sys.h" -#include "trx0trx.h" #include "buf0buf.h" #include "page0page.h" -#include "page0cur.h" -#include "row0vers.h" -#include "que0que.h" -#include "btr0cur.h" -#include "read0read.h" -#include "log0recv.h" /*********************************************************************//** Calculates the fold value of a page file address: used in inserting or @@ -108,3 +98,37 @@ lock_hash_get( } } +/*********************************************************************//** +Creates a new record lock and inserts it to the lock queue. Does NOT check +for deadlocks or lock compatibility! +@return created lock */ +UNIV_INLINE +lock_t* +lock_rec_create( +/*============*/ +#ifdef WITH_WSREP + lock_t* c_lock, /*!< conflicting lock */ + que_thr_t* thr, /*!< thread owning trx */ +#endif + ulint type_mode,/*!< in: lock mode and wait + flag, type is ignored and + replaced by LOCK_REC */ + const buf_block_t* block, /*!< in: buffer block containing + the record */ + ulint heap_no,/*!< in: heap number of the record */ + dict_index_t* index, /*!< in: index of record */ + trx_t* trx, /*!< in,out: transaction */ + bool caller_owns_trx_mutex) + /*!< in: TRUE if caller owns + trx mutex */ +{ + btr_assert_not_corrupted(block, index); + return lock_rec_create_low( +#ifdef WITH_WSREP + c_lock, thr, +#endif + type_mode, + block->page.id.space(), block->page.id.page_no(), + block->frame, heap_no, + index, trx, caller_owns_trx_mutex); +} diff --git a/storage/innobase/include/lock0priv.h b/storage/innobase/include/lock0priv.h index 43f59151991..0f35e0ca6d0 100644 --- a/storage/innobase/include/lock0priv.h +++ b/storage/innobase/include/lock0priv.h @@ -562,407 +562,6 @@ enum lock_rec_req_status { LOCK_REC_SUCCESS_CREATED }; -/** -Record lock ID */ -struct RecID { - - RecID(ulint space_id, ulint page_no, ulint heap_no) - : - m_space_id(static_cast(space_id)), - m_page_no(static_cast(page_no)), - m_heap_no(static_cast(heap_no)), - m_fold(lock_rec_fold(m_space_id, m_page_no)) - { - ut_ad(space_id < UINT32_MAX); - ut_ad(page_no < UINT32_MAX); - ut_ad(heap_no < UINT32_MAX); - } - - RecID(const buf_block_t* block, ulint heap_no) - : - m_space_id(block->page.id.space()), - m_page_no(block->page.id.page_no()), - m_heap_no(static_cast(heap_no)), - m_fold(lock_rec_fold(m_space_id, m_page_no)) - { - ut_ad(heap_no < UINT32_MAX); - } - - /** - @return the "folded" value of {space, page_no} */ - ulint fold() const - { - return(m_fold); - } - - /** - Tablespace ID */ - uint32_t m_space_id; - - /** - Page number within the space ID */ - uint32_t m_page_no; - - /** - Heap number within the page */ - uint32_t m_heap_no; - - /** - Hashed key value */ - ulint m_fold; -}; - -/** -Create record locks */ -class RecLock { -public: - - /** - @param[in,out] thr Transaction query thread requesting the record - lock - @param[in] index Index on which record lock requested - @param[in] rec_id Record lock tuple {space, page_no, heap_no} - @param[in] mode The lock mode */ - RecLock(que_thr_t* thr, - dict_index_t* index, - const RecID& rec_id, - ulint mode) - : - m_thr(thr), - m_trx(thr_get_trx(thr)), - m_mode(mode), - m_index(index), - m_rec_id(rec_id) - { - ut_ad(is_predicate_lock(m_mode)); - - init(NULL); - } - - /** - @param[in,out] thr Transaction query thread requesting the record - lock - @param[in] index Index on which record lock requested - @param[in] block Buffer page containing record - @param[in] heap_no Heap number within the block - @param[in] mode The lock mode - @param[in] prdt The predicate for the rtree lock */ - RecLock(que_thr_t* thr, - dict_index_t* index, - const buf_block_t* - block, - ulint heap_no, - ulint mode, - lock_prdt_t* prdt = NULL) - : - m_thr(thr), - m_trx(thr_get_trx(thr)), - m_mode(mode), - m_index(index), - m_rec_id(block, heap_no) - { - btr_assert_not_corrupted(block, index); - - init(block->frame); - } - - /** - @param[in] index Index on which record lock requested - @param[in] rec_id Record lock tuple {space, page_no, heap_no} - @param[in] mode The lock mode */ - RecLock(dict_index_t* index, - const RecID& rec_id, - ulint mode) - : - m_thr(), - m_trx(), - m_mode(mode), - m_index(index), - m_rec_id(rec_id) - { - ut_ad(is_predicate_lock(m_mode)); - - init(NULL); - } - - /** - @param[in] index Index on which record lock requested - @param[in] block Buffer page containing record - @param[in] heap_no Heap number withing block - @param[in] mode The lock mode */ - RecLock(dict_index_t* index, - const buf_block_t* - block, - ulint heap_no, - ulint mode) - : - m_thr(), - m_trx(), - m_mode(mode), - m_index(index), - m_rec_id(block, heap_no) - { - btr_assert_not_corrupted(block, index); - - init(block->frame); - } - - /** - Enqueue a lock wait for a transaction. If it is a high priority - transaction (cannot rollback) then jump ahead in the record lock wait - queue and if the transaction at the head of the queue is itself waiting - roll it back. - @param[in, out] wait_for The lock that the the joining - transaction is waiting for - @param[in] prdt Predicate [optional] - @return DB_LOCK_WAIT, DB_DEADLOCK, or - DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that - there was a deadlock, but another transaction was chosen - as a victim, and we got the lock immediately: no need to - wait then */ - dberr_t add_to_waitq( - lock_t* wait_for, - const lock_prdt_t* - prdt = NULL); - - /** - Create a lock for a transaction and initialise it. - @param[in, out] trx Transaction requesting the new lock - @param[in] owns_trx_mutex true if caller owns the trx_t::mutex - @param[in] add_to_hash add the lock to hash table - @param[in] prdt Predicate lock (optional) - @param[in,out] c_lock Conflicting lock request or NULL - in Galera conflicting lock is selected - as deadlock victim if requester - is BF transaction. - @return new lock instance */ - lock_t* create( - trx_t* trx, - bool owns_trx_mutex, - bool add_to_hash, - const lock_prdt_t* - prdt = NULL -#ifdef WITH_WSREP - ,lock_t* c_lock = NULL -#endif /* WITH_WSREP */ - ); - - /** - Check of the lock is on m_rec_id. - @param[in] lock Lock to compare with - @return true if the record lock is on m_rec_id*/ - bool is_on_row(const lock_t* lock) const; - - /** - Create the lock instance - @param[in, out] trx The transaction requesting the lock - @param[in, out] index Index on which record lock is required - @param[in] mode The lock mode desired - @param[in] rec_id The record id - @param[in] size Size of the lock + bitmap requested - @return a record lock instance */ - static lock_t* lock_alloc( - trx_t* trx, - dict_index_t* index, - ulint mode, - const RecID& rec_id, - ulint size); - -private: - /* - @return the record lock size in bytes */ - size_t lock_size() const - { - return(m_size); - } - - /** - Do some checks and prepare for creating a new record lock */ - void prepare() const; - - /** - Collect the transactions that will need to be rolled back asynchronously - @param[in, out] trx Transaction to be rolled back */ - void mark_trx_for_rollback(trx_t* trx); - - /** - Jump the queue for the record over all low priority transactions and - add the lock. If all current granted locks are compatible, grant the - lock. Otherwise, mark all granted transaction for asynchronous - rollback and add to hit list. - @param[in, out] lock Lock being requested - @param[in] conflict_lock First conflicting lock from the head - @return true if the lock is granted */ - bool jump_queue(lock_t* lock, const lock_t* conflict_lock); - - /** Find position in lock queue and add the high priority transaction - lock. Intention and GAP only locks can be granted even if there are - waiting locks in front of the queue. To add the High priority - transaction in a safe position we keep the following rule. - - 1. If the lock can be granted, add it before the first waiting lock - in the queue so that all currently waiting locks need to do conflict - check before getting granted. - - 2. If the lock has to wait, add it after the last granted lock or the - last waiting high priority transaction in the queue whichever is later. - This ensures that the transaction is granted only after doing conflict - check with all granted transactions. - @param[in] lock Lock being requested - @param[in] conflict_lock First conflicting lock from the head - @param[out] high_priority high priority transaction ahead in queue - @return true if the lock can be granted */ - bool - lock_add_priority( - lock_t* lock, - const lock_t* conflict_lock, - bool* high_priority); - - /** Iterate over the granted locks and prepare the hit list for ASYNC Rollback. - If the transaction is waiting for some other lock then wake up with deadlock error. - Currently we don't mark following transactions for ASYNC Rollback. - 1. Read only transactions - 2. Background transactions - 3. Other High priority transactions - @param[in] lock Lock being requested - @param[in] conflict_lock First conflicting lock from the head */ - void make_trx_hit_list(lock_t* lock, const lock_t* conflict_lock); - - /** - Setup the requesting transaction state for lock grant - @param[in,out] lock Lock for which to change state */ - void set_wait_state(lock_t* lock); - - /** - Add the lock to the record lock hash and the transaction's lock list - @param[in,out] lock Newly created record lock to add to the - rec hash and the transaction lock list - @param[in] add_to_hash If the lock should be added to the hash table */ - void lock_add(lock_t* lock, bool add_to_hash); - - /** - Check and resolve any deadlocks - @param[in, out] lock The lock being acquired - @return DB_LOCK_WAIT, DB_DEADLOCK, or - DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that - there was a deadlock, but another transaction was chosen - as a victim, and we got the lock immediately: no need to - wait then */ - dberr_t deadlock_check(lock_t* lock); - - /** - Check the outcome of the deadlock check - @param[in,out] victim_trx Transaction selected for rollback - @param[in,out] lock Lock being requested - @return DB_LOCK_WAIT, DB_DEADLOCK or DB_SUCCESS_LOCKED_REC */ - dberr_t check_deadlock_result(const trx_t* victim_trx, lock_t* lock); - - /** - Setup the context from the requirements */ - void init(const page_t* page) - { - ut_ad(lock_mutex_own()); - ut_ad(!srv_read_only_mode); - ut_ad(dict_index_is_clust(m_index) - || !dict_index_is_online_ddl(m_index)); - ut_ad(m_thr == NULL || m_trx == thr_get_trx(m_thr)); - - m_size = is_predicate_lock(m_mode) - ? lock_size(m_mode) : lock_size(page); - - /** If rec is the supremum record, then we reset the - gap and LOCK_REC_NOT_GAP bits, as all locks on the - supremum are automatically of the gap type */ - - if (m_rec_id.m_heap_no == PAGE_HEAP_NO_SUPREMUM) { - ut_ad(!(m_mode & LOCK_REC_NOT_GAP)); - - m_mode &= ~(LOCK_GAP | LOCK_REC_NOT_GAP); - } - } - - /** - Calculate the record lock physical size required for a predicate lock. - @param[in] mode For predicate locks the lock mode - @return the size of the lock data structure required in bytes */ - static size_t lock_size(ulint mode) - { - ut_ad(is_predicate_lock(mode)); - - /* The lock is always on PAGE_HEAP_NO_INFIMUM(0), - so we only need 1 bit (which is rounded up to 1 - byte) for lock bit setting */ - - size_t n_bytes; - - if (mode & LOCK_PREDICATE) { - const ulint align = UNIV_WORD_SIZE - 1; - - /* We will attach the predicate structure - after lock. Make sure the memory is - aligned on 8 bytes, the mem_heap_alloc - will align it with MEM_SPACE_NEEDED - anyway. */ - - n_bytes = (1 + sizeof(lock_prdt_t) + align) & ~align; - - /* This should hold now */ - - ut_ad(n_bytes == sizeof(lock_prdt_t) + UNIV_WORD_SIZE); - - } else { - n_bytes = 1; - } - - return(n_bytes); - } - - /** - Calculate the record lock physical size required, non-predicate lock. - @param[in] page For non-predicate locks the buffer page - @return the size of the lock data structure required in bytes */ - static size_t lock_size(const page_t* page) - { - ulint n_recs = page_dir_get_n_heap(page); - - /* Make lock bitmap bigger by a safety margin */ - - return(1 + ((n_recs + LOCK_PAGE_BITMAP_MARGIN) / 8)); - } - - /** - @return true if the requested lock mode is for a predicate - or page lock */ - static bool is_predicate_lock(ulint mode) - { - return(mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)); - } - -private: - /** The query thread of the transaction */ - que_thr_t* m_thr; - - /** - Transaction requesting the record lock */ - trx_t* m_trx; - - /** - Lock mode requested */ - ulint m_mode; - - /** - Size of the record lock in bytes */ - size_t m_size; - - /** - Index on which the record lock is required */ - dict_index_t* m_index; - - /** - The record lock tuple {space, page_no, heap_no} */ - RecID m_rec_id; -}; - #ifdef UNIV_DEBUG /** The count of the types of locks. */ static const ulint lock_types = UT_ARR_SIZE(lock_compatibility_matrix); diff --git a/storage/innobase/include/lock0priv.ic b/storage/innobase/include/lock0priv.ic index f6e5f7acb8f..150a80b7be4 100644 --- a/storage/innobase/include/lock0priv.ic +++ b/storage/innobase/include/lock0priv.ic @@ -32,6 +32,8 @@ methods but they are used only in that file. */ #error Do not include lock0priv.ic outside of the lock/ module #endif +#include "row0row.h" + /*********************************************************************//** Gets the type of a lock. @return LOCK_TABLE or LOCK_REC */ diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 50e270fb3c5..6ed69e6c5c4 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -28,13 +28,11 @@ Created 3/26/1996 Heikki Tuuri #define trx0trx_h #include -#include #include "ha_prototypes.h" #include "dict0types.h" #include "trx0types.h" -#include "ut0new.h" #include "lock0types.h" #include "log0log.h" @@ -43,7 +41,6 @@ Created 3/26/1996 Heikki Tuuri #include "trx0xa.h" #include "ut0vec.h" #include "fts0fts.h" -#include "srv0srv.h" // Forward declaration struct mtr_t; @@ -255,14 +252,9 @@ dberr_t trx_commit_for_mysql( /*=================*/ trx_t* trx); /*!< in/out: transaction */ - -/** -Does the transaction prepare for MySQL. -@param[in, out] trx Transaction instance to prepare */ - -dberr_t -trx_prepare_for_mysql(trx_t* trx); - +/** XA PREPARE a transaction. +@param[in,out] trx transaction to prepare */ +void trx_prepare_for_mysql(trx_t* trx); /**********************************************************************//** This function is used to find number of prepared transactions and their transaction objects for a recovery. @@ -551,29 +543,6 @@ trx_release_reference( Check if the transaction is being referenced. */ #define trx_is_referenced(t) ((t)->n_ref > 0) -/** -@param[in] requestor Transaction requesting the lock -@param[in] holder Transaction holding the lock -@return the transaction that will be rolled back, null don't care */ - -UNIV_INLINE -const trx_t* -trx_arbitrate(const trx_t* requestor, const trx_t* holder); - -/** -@param[in] trx Transaction to check -@return true if the transaction is a high priority transaction.*/ -UNIV_INLINE -bool -trx_is_high_priority(const trx_t* trx); - -/** -Kill all transactions that are blocking this transaction from acquiring locks. -@param[in,out] trx High priority transaction */ - -void -trx_kill_blocking(trx_t* trx); - /** Transactions that aren't started by the MySQL server don't set the trx_t::mysql_thd field. For such transactions we set the lock @@ -622,7 +591,6 @@ Check transaction state */ case TRX_STATE_COMMITTED_IN_MEMORY: \ continue; \ case TRX_STATE_NOT_STARTED: \ - case TRX_STATE_FORCED_ROLLBACK: \ break; \ } \ ut_error; \ @@ -631,8 +599,7 @@ Check transaction state */ /** Check if transaction is free so that it can be re-initialized. @param t transaction handle */ #define assert_trx_is_free(t) do { \ - ut_ad(trx_state_eq((t), TRX_STATE_NOT_STARTED) \ - || trx_state_eq((t), TRX_STATE_FORCED_ROLLBACK)); \ + ut_ad(trx_state_eq((t), TRX_STATE_NOT_STARTED)); \ ut_ad(!trx->has_logged()); \ ut_ad(!MVCC::is_view_active((t)->read_view)); \ ut_ad((t)->lock.wait_thr == NULL); \ @@ -662,7 +629,6 @@ The tranasction must be in the mysql_trx_list. */ ut_ad(!(t)->in_rw_trx_list); \ ut_ad((t)->in_mysql_trx_list); \ ut_ad(t_state == TRX_STATE_NOT_STARTED \ - || t_state == TRX_STATE_FORCED_ROLLBACK \ || t_state == TRX_STATE_ACTIVE); \ } else { \ check_trx_state(t); \ @@ -769,10 +735,6 @@ struct trx_lock_t { Protected by both the lock sys mutex and the trx_t::mutex. */ ulint n_rec_locks; /*!< number of rec locks in this trx */ - - /** The transaction called ha_innobase::start_stmt() to - lock a table. Most likely a temporary table. */ - bool start_stmt; }; /** Type used to store the list of tables that are modified by a given @@ -858,47 +820,12 @@ struct trx_rsegs_t { trx_temp_undo_t m_noredo; }; -struct TrxVersion { - TrxVersion(trx_t* trx); - - /** - @return true if the trx_t instance is the same */ - bool operator==(const TrxVersion& rhs) const - { - return(rhs.m_trx == m_trx); - } - - trx_t* m_trx; - ulint m_version; -}; - -typedef std::list > hit_list_t; - struct trx_t { TrxMutex mutex; /*!< Mutex protecting the fields state and lock (except some fields of lock, which are protected by lock_sys->mutex) */ - /* Note: in_depth was split from in_innodb for fixing a RO - performance issue. Acquiring the trx_t::mutex for each row - costs ~3% in performance. It is not required for correctness. - Therefore we increment/decrement in_depth without holding any - mutex. The assumption is that the Server will only ever call - the handler from one thread. This is not true for kill_connection. - Therefore in innobase_kill_connection. We don't increment this - counter via TrxInInnoDB. */ - - ib_uint32_t in_depth; /*!< Track nested TrxInInnoDB - count */ - - ib_uint32_t in_innodb; /*!< if the thread is executing - in the InnoDB context count > 0. */ - - bool abort; /*!< if this flag is set then - this transaction must abort when - it can */ - trx_id_t id; /*!< transaction id */ trx_id_t no; /*!< transaction serialization number: @@ -915,7 +842,6 @@ struct trx_t { Possible states: TRX_STATE_NOT_STARTED - TRX_STATE_FORCED_ROLLBACK TRX_STATE_ACTIVE TRX_STATE_PREPARED TRX_STATE_COMMITTED_IN_MEMORY (alias below COMMITTED) @@ -995,22 +921,6 @@ struct trx_t { protected by trx_sys->mutex when trx->in_rw_trx_list holds */ - hit_list_t hit_list; /*!< List of transactions to kill, - when a high priority transaction - is blocked on a lock wait. */ - - os_thread_id_t killed_by; /*!< The thread ID that wants to - kill this transaction asynchronously. - This is required because we recursively - enter the handlerton methods and need - to distinguish between the kill thread - and the transaction thread. - - Note: We need to be careful w.r.t the - Thread Pool. The thread doing the kill - should not leave InnoDB between the - mark and the actual async kill because - the running thread can change. */ /* These fields are not protected by any mutex. */ const char* op_info; /*!< English text describing the @@ -1223,12 +1133,6 @@ struct trx_t { signify that it is no longer "active". */ - /** Version of this instance. It is incremented each time the - instance is re-used in trx_start_low(). It is used to track - whether a transaction has been restarted since it was tagged - for asynchronous rollback. */ - ulint version; - XID* xid; /*!< X/Open XA transaction identification to identify a transaction branch */ @@ -1292,13 +1196,9 @@ private: Check if transaction is started. @param[in] trx Transaction whose state we need to check @reutrn true if transaction is in state started */ -inline -bool -trx_is_started( - const trx_t* trx) +inline bool trx_is_started(const trx_t* trx) { - return(trx->state != TRX_STATE_NOT_STARTED - && trx->state != TRX_STATE_FORCED_ROLLBACK); + return trx->state != TRX_STATE_NOT_STARTED; } /* Transaction isolation levels (trx->isolation_level) */ @@ -1371,224 +1271,6 @@ struct commit_node_t{ mutex_exit(&t->mutex); \ } while (0) -/** Track if a transaction is executing inside InnoDB code. It acts -like a gate between the Server and InnoDB. */ -class TrxInInnoDB { -public: - /** - @param[in,out] trx Transaction entering InnoDB via the handler - @param[in] disable true if called from COMMIT/ROLLBACK method */ - TrxInInnoDB(trx_t* trx, bool disable = false) - : - m_trx(trx) - { - enter(trx, disable); - } - - /** - Destructor */ - ~TrxInInnoDB() - { - exit(m_trx); - } - - /** - @return true if the transaction has been marked for asynchronous - rollback */ - bool is_aborted() const - { - return(is_aborted(m_trx)); - } - - /** - @return true if the transaction can't be rolled back asynchronously */ - bool is_rollback_disabled() const - { - return((m_trx->in_innodb & TRX_FORCE_ROLLBACK_DISABLE) > 0); - } - - /** - @return true if the transaction has been marked for asynchronous - rollback */ - static bool is_aborted(const trx_t* trx) - { - if (trx->state == TRX_STATE_NOT_STARTED) { - return(false); - } - - ut_ad(srv_read_only_mode || trx->in_depth > 0); - ut_ad(srv_read_only_mode || trx->in_innodb > 0); - - return(trx->abort - || trx->state == TRX_STATE_FORCED_ROLLBACK); - } - - /** - Start statement requested for transaction. - @param[in, out] trx Transaction at the start of a SQL statement */ - static void begin_stmt(trx_t* trx) - { - enter(trx, false); - } - - /** - Note an end statement for transaction - @param[in, out] trx Transaction at end of a SQL statement */ - static void end_stmt(trx_t* trx) - { - exit(trx); - } - - /** - @return true if the rollback is being initiated by the thread that - marked the transaction for asynchronous rollback */ - static bool is_async_rollback(const trx_t* trx) - { - return(trx->killed_by == os_thread_get_curr_id()); - } - -private: - /** - Note that we have crossed into InnoDB code. - @param[in] disable true if called from COMMIT/ROLLBACK method */ - static void enter(trx_t* trx, bool disable) - { - if (srv_read_only_mode) { - - return; - } - - ut_ad(!is_async_rollback(trx)); - - /* If it hasn't already been marked for async rollback. - and it will be committed/rolled back. */ - if (disable) { - - trx_mutex_enter(trx); - if (!is_forced_rollback(trx) - && is_started(trx) - && !trx_is_autocommit_non_locking(trx)) { - - ut_ad(trx->killed_by == 0); - - /* This transaction has crossed the point of - no return and cannot be rolled back - asynchronously now. It must commit or rollback - synhronously. */ - - trx->in_innodb |= TRX_FORCE_ROLLBACK_DISABLE; - } - trx_mutex_exit(trx); - } - - /* Avoid excessive mutex acquire/release */ - ++trx->in_depth; - - /* If trx->in_depth is greater than 1 then - transaction is already in InnoDB. */ - if (trx->in_depth > 1) { - - return; - } - - trx_mutex_enter(trx); - - wait(trx); - - ut_ad((trx->in_innodb & TRX_FORCE_ROLLBACK_MASK) == 0); - - ++trx->in_innodb; - - trx_mutex_exit(trx); - } - - /** - Note that we are exiting InnoDB code */ - static void exit(trx_t* trx) - { - if (srv_read_only_mode) { - - return; - } - - /* Avoid excessive mutex acquire/release */ - - ut_ad(trx->in_depth > 0); - - --trx->in_depth; - - if (trx->in_depth > 0) { - - return; - } - - trx_mutex_enter(trx); - - ut_ad((trx->in_innodb & TRX_FORCE_ROLLBACK_MASK) > 0); - - --trx->in_innodb; - - trx_mutex_exit(trx); - } - - /* - @return true if it is a forced rollback, asynchronously */ - static bool is_forced_rollback(const trx_t* trx) - { - ut_ad(trx_mutex_own(trx)); - - return((trx->in_innodb & TRX_FORCE_ROLLBACK)) > 0; - } - - /** - Wait for the asynchronous rollback to complete, if it is in progress */ - static void wait(trx_t* trx) - { - ut_ad(trx_mutex_own(trx)); - - ulint loop_count = 0; - /* start with optimistic sleep time - 20 micro seconds. */ - ulint sleep_time = 20; - - while (is_forced_rollback(trx)) { - - /* Wait for the async rollback to complete */ - - trx_mutex_exit(trx); - - loop_count++; - /* If the wait is long, don't hog the cpu. */ - if (loop_count < 100) { - /* 20 microseconds */ - sleep_time = 20; - } else if (loop_count < 1000) { - /* 1 millisecond */ - sleep_time = 1000; - } else { - /* 100 milliseconds */ - sleep_time = 100000; - } - - os_thread_sleep(sleep_time); - - trx_mutex_enter(trx); - } - } - - /** - @return true if transaction is started */ - static bool is_started(const trx_t* trx) - { - ut_ad(trx_mutex_own(trx)); - - return(trx_is_started(trx)); - } -private: - /** - Transaction instance crossing the handler boundary from the Server. */ - trx_t* m_trx; -}; - #include "trx0trx.ic" #endif diff --git a/storage/innobase/include/trx0trx.ic b/storage/innobase/include/trx0trx.ic index 6fa00c5333f..7721e28bfb6 100644 --- a/storage/innobase/include/trx0trx.ic +++ b/storage/innobase/include/trx0trx.ic @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2016, MariaDB Corporation. All Rights Reserved. +Copyright (c) 2016, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -64,11 +64,8 @@ trx_state_eq( return(state == trx->state); case TRX_STATE_NOT_STARTED: - case TRX_STATE_FORCED_ROLLBACK: - /* These states are not allowed for running transactions. */ ut_a(state == TRX_STATE_NOT_STARTED - || state == TRX_STATE_FORCED_ROLLBACK || (relaxed && thd_get_error_number(trx->mysql_thd))); @@ -280,67 +277,3 @@ trx_get_read_view( { return(!MVCC::is_view_active(trx->read_view) ? NULL : trx->read_view); } - -/** -@param[in] trx Transaction to check -@return true if the transaction is a high priority transaction.*/ -UNIV_INLINE -bool -trx_is_high_priority(const trx_t* trx) -{ - if (trx->mysql_thd == NULL) { - return(false); - } - - return(thd_trx_priority(trx->mysql_thd) > 0); -} - -/** -@param[in] requestor Transaction requesting the lock -@param[in] holder Transaction holding the lock -@return the transaction that will be rolled back, null don't care */ -UNIV_INLINE -const trx_t* -trx_arbitrate(const trx_t* requestor, const trx_t* holder) -{ - ut_ad(!trx_is_autocommit_non_locking(holder)); - ut_ad(!trx_is_autocommit_non_locking(requestor)); - - /* Note: Background stats collection transactions also acquire - locks on user tables. They don't have an associated MySQL session - instance. */ - - if (requestor->mysql_thd == NULL) { - - ut_ad(!trx_is_high_priority(requestor)); - - if (trx_is_high_priority(holder)) { - return(requestor); - } else { - return(NULL); - } - - } else if (holder->mysql_thd == NULL) { - - ut_ad(!trx_is_high_priority(holder)); - - if (trx_is_high_priority(requestor)) { - return(holder); - } - - return(NULL); - } - - const THD* victim = thd_trx_arbitrate( - requestor->mysql_thd, holder->mysql_thd); - - ut_ad(victim == NULL - || victim == requestor->mysql_thd - || victim == holder->mysql_thd); - - if (victim != NULL) { - return(victim == requestor->mysql_thd ? requestor : holder); - } - - return(NULL); -} diff --git a/storage/innobase/include/trx0types.h b/storage/innobase/include/trx0types.h index 8092246c7fa..b42871bef31 100644 --- a/storage/innobase/include/trx0types.h +++ b/storage/innobase/include/trx0types.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2014, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -53,21 +53,6 @@ static const ulint TRX_SYS_SPACE = 0; /** Random value to check for corruption of trx_t */ static const ulint TRX_MAGIC_N = 91118598; -/** If this flag is set then the transaction cannot be rolled back -asynchronously. */ -static const ib_uint32_t TRX_FORCE_ROLLBACK_DISABLE = 1 << 29; - -/** Was the transaction rolled back asynchronously or by the -owning thread. This flag is relevant only if TRX_FORCE_ROLLBACK -is set. */ -static const ib_uint32_t TRX_FORCE_ROLLBACK_ASYNC = 1 << 30; - -/** Mark the transaction for forced rollback */ -static const ib_uint32_t TRX_FORCE_ROLLBACK = 1U << 31; - -/** For masking out the above four flags */ -static const ib_uint32_t TRX_FORCE_ROLLBACK_MASK = 0x1FFFFFFF; - /** Transaction execution states when trx->state == TRX_STATE_ACTIVE */ enum trx_que_t { TRX_QUE_RUNNING, /*!< transaction is running */ @@ -79,13 +64,8 @@ enum trx_que_t { /** Transaction states (trx_t::state) */ enum trx_state_t { - TRX_STATE_NOT_STARTED, - /** Same as not started but with additional semantics that it - was rolled back asynchronously the last time it was active. */ - TRX_STATE_FORCED_ROLLBACK, - TRX_STATE_ACTIVE, /** Support for 2PC/XA */ diff --git a/storage/innobase/include/trx0undo.h b/storage/innobase/include/trx0undo.h index 20e7aaa4913..f744364d966 100644 --- a/storage/innobase/include/trx0undo.h +++ b/storage/innobase/include/trx0undo.h @@ -28,12 +28,7 @@ Created 3/26/1996 Heikki Tuuri #define trx0undo_h #ifndef UNIV_INNOCHECKSUM -#include "univ.i" -#include "trx0types.h" -#include "mtr0mtr.h" #include "trx0sys.h" -#include "page0types.h" -#include "trx0xa.h" /** The LSB of the "is insert" flag in DB_ROLL_PTR */ #define ROLL_PTR_INSERT_FLAG_POS 55 diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index db079abf10f..213b370ee36 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -44,6 +44,7 @@ Created 5/7/1996 Heikki Tuuri #include "ut0new.h" #include "row0sel.h" #include "row0mysql.h" +#include "row0vers.h" #include "pars0pars.h" #include @@ -1506,128 +1507,210 @@ wsrep_print_wait_locks( } #endif /* WITH_WSREP */ -/** -Check of the lock is on m_rec_id. -@param[in] lock Lock to compare with -@return true if the record lock is on m_rec_id*/ -/** -@param[in] rhs Lock to compare with -@return true if the record lock equals rhs */ -bool -RecLock::is_on_row(const lock_t* lock) const -{ - ut_ad(lock_get_type_low(lock) == LOCK_REC); - - const lock_rec_t& other = lock->un_member.rec_lock; - - return(other.space == m_rec_id.m_space_id - && other.page_no == m_rec_id.m_page_no - && lock_rec_get_nth_bit(lock, m_rec_id.m_heap_no)); -} - -/** -Do some checks and prepare for creating a new record lock */ -void -RecLock::prepare() const -{ - ut_ad(lock_mutex_own()); - ut_ad(m_trx == thr_get_trx(m_thr)); - - /* Test if there already is some other reason to suspend thread: - we do not enqueue a lock request if the query thread should be - stopped anyway */ - - if (que_thr_stop(m_thr)) { - ut_error; - } - - switch (trx_get_dict_operation(m_trx)) { - case TRX_DICT_OP_NONE: - break; - case TRX_DICT_OP_TABLE: - case TRX_DICT_OP_INDEX: - ib::error() << "A record lock wait happens in a dictionary" - " operation. index " << m_index->name - << " of table " << m_index->table->name - << ". " << BUG_REPORT_MSG; - ut_ad(0); - } - - ut_ad(m_index->table->n_ref_count > 0 - || !m_index->table->can_be_evicted); -} - -/** -Create the lock instance -@param[in, out] trx The transaction requesting the lock -@param[in, out] index Index on which record lock is required -@param[in] mode The lock mode desired -@param[in] rec_id The record id -@param[in] size Size of the lock + bitmap requested -@return a record lock instance */ +/** Create a new record lock and inserts it to the lock queue, +without checking for deadlocks or conflicts. +@param[in] type_mode lock mode and wait flag; type will be replaced + with LOCK_REC +@param[in] space tablespace id +@param[in] page_no index page number +@param[in] page R-tree index page, or NULL +@param[in] heap_no record heap number in the index page +@param[in] index the index tree +@param[in,out] trx transaction +@param[in] holds_trx_mutex whether the caller holds trx->mutex +@return created lock */ lock_t* -RecLock::lock_alloc( - trx_t* trx, +lock_rec_create_low( +#ifdef WITH_WSREP + lock_t* c_lock, /*!< conflicting lock */ + que_thr_t* thr, /*!< thread owning trx */ +#endif + ulint type_mode, + ulint space, + ulint page_no, + const page_t* page, + ulint heap_no, dict_index_t* index, - ulint mode, - const RecID& rec_id, - ulint size) + trx_t* trx, + bool holds_trx_mutex) { + lock_t* lock; + ulint n_bits; + ulint n_bytes; + ut_ad(lock_mutex_own()); + ut_ad(holds_trx_mutex == trx_mutex_own(trx)); + ut_ad(dict_index_is_clust(index) || !dict_index_is_online_ddl(index)); - lock_t* lock; +#ifdef UNIV_DEBUG + /* Non-locking autocommit read-only transactions should not set + any locks. See comment in trx_set_rw_mode explaining why this + conditional check is required in debug code. */ + if (holds_trx_mutex) { + check_trx_state(trx); + } +#endif /* UNIV_DEBUG */ - if (trx->lock.rec_cached >= trx->lock.rec_pool.size() - || sizeof(*lock) + size > REC_LOCK_SIZE) { + /* If rec is the supremum record, then we reset the gap and + LOCK_REC_NOT_GAP bits, as all locks on the supremum are + automatically of the gap type */ - ulint n_bytes = size + sizeof(*lock); - mem_heap_t* heap = trx->lock.lock_heap; + if (UNIV_UNLIKELY(heap_no == PAGE_HEAP_NO_SUPREMUM)) { + ut_ad(!(type_mode & LOCK_REC_NOT_GAP)); + type_mode = type_mode & ~(LOCK_GAP | LOCK_REC_NOT_GAP); + } - lock = reinterpret_cast(mem_heap_alloc(heap, n_bytes)); + if (UNIV_LIKELY(!(type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)))) { + /* Make lock bitmap bigger by a safety margin */ + n_bits = page_dir_get_n_heap(page) + LOCK_PAGE_BITMAP_MARGIN; + n_bytes = 1 + n_bits / 8; } else { + ut_ad(heap_no == PRDT_HEAPNO); - lock = trx->lock.rec_pool[trx->lock.rec_cached]; - ++trx->lock.rec_cached; + /* The lock is always on PAGE_HEAP_NO_INFIMUM (0), so + we only need 1 bit (which round up to 1 byte) for + lock bit setting */ + n_bytes = 1; + + if (type_mode & LOCK_PREDICATE) { + ulint tmp = UNIV_WORD_SIZE - 1; + + /* We will attach predicate structure after lock. + Make sure the memory is aligned on 8 bytes, + the mem_heap_alloc will align it with + MEM_SPACE_NEEDED anyway. */ + n_bytes = (n_bytes + sizeof(lock_prdt_t) + tmp) & ~tmp; + ut_ad(n_bytes == sizeof(lock_prdt_t) + UNIV_WORD_SIZE); + } + } + + if (trx->lock.rec_cached >= trx->lock.rec_pool.size() + || sizeof *lock + n_bytes > REC_LOCK_SIZE) { + lock = static_cast( + mem_heap_alloc(trx->lock.lock_heap, + sizeof *lock + n_bytes)); + } else { + lock = trx->lock.rec_pool[trx->lock.rec_cached++]; } lock->trx = trx; - + lock->type_mode = (type_mode & ~LOCK_TYPE_MASK) | LOCK_REC; lock->index = index; + lock->un_member.rec_lock.space = uint32_t(space); + lock->un_member.rec_lock.page_no = uint32_t(page_no); - /* Setup the lock attributes */ - - lock->type_mode = uint32_t(LOCK_REC | (mode & ~LOCK_TYPE_MASK)); - - lock_rec_t& rec_lock = lock->un_member.rec_lock; - - /* Predicate lock always on INFIMUM (0) */ - - if (is_predicate_lock(mode)) { - - rec_lock.n_bits = 8; - - memset(&lock[1], 0x0, 1); - + if (UNIV_LIKELY(!(type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)))) { + lock->un_member.rec_lock.n_bits = uint32_t(n_bytes * 8); } else { - ut_ad(8 * size < UINT32_MAX); - rec_lock.n_bits = static_cast(8 * size); + /* Predicate lock always on INFIMUM (0) */ + lock->un_member.rec_lock.n_bits = 8; + } + lock_rec_bitmap_reset(lock); + lock_rec_set_nth_bit(lock, heap_no); + index->table->n_rec_locks++; + ut_ad(index->table->n_ref_count > 0 || !index->table->can_be_evicted); - memset(&lock[1], 0x0, size); +#ifdef WITH_WSREP + if (c_lock && wsrep_on_trx(trx) + && wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { + lock_t *hash = (lock_t *)c_lock->hash; + lock_t *prev = NULL; + + while (hash && wsrep_thd_is_BF(hash->trx->mysql_thd, TRUE) + && wsrep_trx_order_before(hash->trx->mysql_thd, + trx->mysql_thd)) { + prev = hash; + hash = (lock_t *)hash->hash; + } + lock->hash = hash; + if (prev) { + prev->hash = lock; + } else { + c_lock->hash = lock; + } + /* + * delayed conflict resolution '...kill_one_trx' was not called, + * if victim was waiting for some other lock + */ + trx_mutex_enter(c_lock->trx); + if (c_lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { + + c_lock->trx->lock.was_chosen_as_deadlock_victim = TRUE; + + if (wsrep_debug) { + wsrep_print_wait_locks(c_lock); + } + + trx->lock.que_state = TRX_QUE_LOCK_WAIT; + lock_set_lock_and_trx_wait(lock, trx); + UT_LIST_ADD_LAST(trx->lock.trx_locks, lock); + + trx->lock.wait_thr = thr; + thr->state = QUE_THR_LOCK_WAIT; + + /* have to release trx mutex for the duration of + victim lock release. This will eventually call + lock_grant, which wants to grant trx mutex again + */ + if (holds_trx_mutex) { + trx_mutex_exit(trx); + } + lock_cancel_waiting_and_release( + c_lock->trx->lock.wait_lock); + + if (holds_trx_mutex) { + trx_mutex_enter(trx); + } + + /* trx might not wait for c_lock, but some other lock + does not matter if wait_lock was released above + */ + if (c_lock->trx->lock.wait_lock == c_lock) { + lock_reset_lock_and_trx_wait(lock); + } + + trx_mutex_exit(c_lock->trx); + + if (wsrep_debug) { + ib::info() << "WSREP: c_lock canceled " + << ib::hex(c_lock->trx->id) + << " SQL: " + << wsrep_thd_query( + c_lock->trx->mysql_thd); + } + + /* have to bail out here to avoid lock_set_lock... */ + return(lock); + } + trx_mutex_exit(c_lock->trx); + } else +#endif /* WITH_WSREP */ + if (!(type_mode & (LOCK_WAIT | LOCK_PREDICATE | LOCK_PRDT_PAGE)) + && innodb_lock_schedule_algorithm + == INNODB_LOCK_SCHEDULE_ALGORITHM_VATS + && !thd_is_replication_slave_thread(trx->mysql_thd)) { + HASH_PREPEND(lock_t, hash, lock_sys->rec_hash, + lock_rec_fold(space, page_no), lock); + } else { + HASH_INSERT(lock_t, hash, lock_hash_get(type_mode), + lock_rec_fold(space, page_no), lock); } - rec_lock.space = rec_id.m_space_id; - - rec_lock.page_no = rec_id.m_page_no; - - /* Set the bit corresponding to rec */ - - lock_rec_set_nth_bit(lock, rec_id.m_heap_no); - + if (!holds_trx_mutex) { + trx_mutex_enter(trx); + } + ut_ad(trx_mutex_own(trx)); + if (type_mode & LOCK_WAIT) { + lock_set_lock_and_trx_wait(lock, trx); + } + UT_LIST_ADD_LAST(trx->lock.trx_locks, lock); + if (!holds_trx_mutex) { + trx_mutex_exit(trx); + } + MONITOR_INC(MONITOR_RECLOCK_CREATED); MONITOR_INC(MONITOR_NUM_RECLOCK); - MONITOR_INC(MONITOR_RECLOCK_CREATED); - - return(lock); + return lock; } /*********************************************************************//** @@ -1638,6 +1721,7 @@ If only one of them is a wait lock, it has lower priority. If either is a high priority transaction, the lock has higher priority. Otherwise, the one with an older transaction has higher priority. @returns true if lock1 has higher priority, false otherwise. */ +static bool has_higher_priority( lock_t *lock1, @@ -1654,9 +1738,6 @@ has_higher_priority( } else if (!lock_get_wait(lock2)) { return false; } - if (trx_is_high_priority(lock1->trx)) { - return false; - } return lock1->trx->start_time_micro <= lock2->trx->start_time_micro; } @@ -1781,389 +1862,118 @@ lock_rec_insert_to_head( } } -/** -Add the lock to the record lock hash and the transaction's lock list -@param[in,out] lock Newly created record lock to add to the rec hash -@param[in] add_to_hash If the lock should be added to the hash table */ -void -RecLock::lock_add(lock_t* lock, bool add_to_hash) -{ - ut_ad(lock_mutex_own()); - ut_ad(trx_mutex_own(lock->trx)); - - bool wait_lock = m_mode & LOCK_WAIT; - - if (add_to_hash) { - ulint key = m_rec_id.fold(); - hash_table_t *lock_hash = lock_hash_get(m_mode); - - ++lock->index->table->n_rec_locks; - - if (innodb_lock_schedule_algorithm == INNODB_LOCK_SCHEDULE_ALGORITHM_VATS - && !thd_is_replication_slave_thread(lock->trx->mysql_thd)) { - if (wait_lock) { - HASH_INSERT(lock_t, hash, lock_hash, key, lock); - } else { - lock_rec_insert_to_head(lock, m_rec_id.fold()); - } - } else { - HASH_INSERT(lock_t, hash, lock_hash, key, lock); - } - } - - if (wait_lock) { - lock_set_lock_and_trx_wait(lock, lock->trx); - } - - UT_LIST_ADD_LAST(lock->trx->lock.trx_locks, lock); -} - -/** -Create a new lock. -@param[in,out] trx Transaction requesting the lock -@param[in] owns_trx_mutex true if caller owns the trx_t::mutex -@param[in] add_to_hash add the lock to hash table -@param[in] prdt Predicate lock (optional) -@param[in,out] c_lock Conflicting lock request or NULL - in Galera conflicting lock is selected - as deadlock victim if requester - is BF transaction. -@return a new lock instance */ -lock_t* -RecLock::create( - trx_t* trx, - bool owns_trx_mutex, - bool add_to_hash, - const lock_prdt_t* prdt +/** Enqueue a waiting request for a lock which cannot be granted immediately. +Check for deadlocks. +@param[in] type_mode the requested lock mode (LOCK_S or LOCK_X) + possibly ORed with LOCK_GAP or + LOCK_REC_NOT_GAP, ORed with + LOCK_INSERT_INTENTION if this + waiting lock request is set + when performing an insert of + an index record +@param[in] block leaf page in the index +@param[in] heap_no record heap number in the block +@param[in] index index tree +@param[in,out] thr query thread +@param[in] prdt minimum bounding box (spatial index) +@retval DB_LOCK_WAIT if the waiting lock was enqueued +@retval DB_DEADLOCK if this transaction was chosen as the victim +@retval DB_SUCCESS_LOCKED_REC if the other transaction was chosen as a victim + (or it happened to commit) */ +dberr_t +lock_rec_enqueue_waiting( #ifdef WITH_WSREP - ,lock_t* c_lock -#endif /* WITH_WSREP */ -) + lock_t* c_lock, /*!< conflicting lock */ +#endif + ulint type_mode, + const buf_block_t* block, + ulint heap_no, + dict_index_t* index, + que_thr_t* thr, + lock_prdt_t* prdt) { ut_ad(lock_mutex_own()); - ut_ad(owns_trx_mutex == trx_mutex_own(trx)); + ut_ad(!srv_read_only_mode); + ut_ad(dict_index_is_clust(index) || !dict_index_is_online_ddl(index)); - /* Create the explicit lock instance and initialise it. */ + trx_t* trx = thr_get_trx(thr); - lock_t* lock = lock_alloc(trx, m_index, m_mode, m_rec_id, m_size); + ut_ad(trx_mutex_own(trx)); + ut_a(!que_thr_stop(thr)); - if (prdt != NULL && (m_mode & LOCK_PREDICATE)) { + switch (trx_get_dict_operation(trx)) { + case TRX_DICT_OP_NONE: + break; + case TRX_DICT_OP_TABLE: + case TRX_DICT_OP_INDEX: + ib::error() << "A record lock wait happens in a dictionary" + " operation. index " + << index->name + << " of table " + << index->table->name + << ". " << BUG_REPORT_MSG; + ut_ad(0); + } + /* Enqueue the lock request that will wait to be granted, note that + we already own the trx mutex. */ + lock_t* lock = lock_rec_create( +#ifdef WITH_WSREP + c_lock, thr, +#endif + type_mode | LOCK_WAIT, block, heap_no, index, trx, TRUE); + + if (prdt && type_mode & LOCK_PREDICATE) { lock_prdt_set_prdt(lock, prdt); } -#ifdef WITH_WSREP - if (c_lock && wsrep_on_trx(trx) && - wsrep_thd_is_BF(trx->mysql_thd, FALSE)) { - lock_t *hash = (lock_t *)c_lock->hash; - lock_t *prev = NULL; - - while (hash && - wsrep_thd_is_BF(((lock_t *)hash)->trx->mysql_thd, TRUE) && - wsrep_trx_order_before( - ((lock_t *)hash)->trx->mysql_thd, - trx->mysql_thd)) { - prev = hash; - hash = (lock_t *)hash->hash; - } - - lock->hash = hash; - - if (prev) { - prev->hash = lock; - } else { - c_lock->hash = lock; - } - /* - * delayed conflict resolution '...kill_one_trx' was not called, - * if victim was waiting for some other lock - */ - trx_mutex_enter(c_lock->trx); - if (c_lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { - - c_lock->trx->lock.was_chosen_as_deadlock_victim = TRUE; - - if (wsrep_debug) { - wsrep_print_wait_locks(c_lock); - } - - trx->lock.que_state = TRX_QUE_LOCK_WAIT; - lock_set_lock_and_trx_wait(lock, trx); - UT_LIST_ADD_LAST(trx->lock.trx_locks, lock); - - ut_ad(m_thr != NULL); - trx->lock.wait_thr = m_thr; - m_thr->state = QUE_THR_LOCK_WAIT; - - /* have to release trx mutex for the duration of - victim lock release. This will eventually call - lock_grant, which wants to grant trx mutex again - */ - if (owns_trx_mutex) { - trx_mutex_exit(trx); - } - - lock_cancel_waiting_and_release( - c_lock->trx->lock.wait_lock); - - if (owns_trx_mutex) { - trx_mutex_enter(trx); - } - - /* trx might not wait for c_lock, but some other lock - does not matter if wait_lock was released above - */ - if (c_lock->trx->lock.wait_lock == c_lock) { - if (wsrep_debug) { - ib::info() << - "victim trx waits for some other lock than c_lock"; - } - lock_reset_lock_and_trx_wait(lock); - } - - trx_mutex_exit(c_lock->trx); - - if (wsrep_debug) { - ib::info() << "WSREP: c_lock canceled " << ib::hex(c_lock->trx->id); - ib::info() << " SQL1: " - << wsrep_thd_query(c_lock->trx->mysql_thd); - ib::info() << " SQL2: " - << wsrep_thd_query(trx->mysql_thd); - } - - ++lock->index->table->n_rec_locks; - /* have to bail out here to avoid lock_set_lock... */ - return(lock); - } - trx_mutex_exit(c_lock->trx); - /* we don't want to add to hash anymore, but need other updates from lock_add */ - ++lock->index->table->n_rec_locks; - lock_add(lock, false); - } else { -#endif /* WITH_WSREP */ - - /* Ensure that another transaction doesn't access the trx - lock state and lock data structures while we are adding the - lock and changing the transaction state to LOCK_WAIT */ - - if (!owns_trx_mutex) { - trx_mutex_enter(trx); - } - - lock_add(lock, add_to_hash); - - if (!owns_trx_mutex) { - trx_mutex_exit(trx); - } -#ifdef WITH_WSREP - } -#endif /* WITH_WSREP */ - - return(lock); -} - -/** -Check the outcome of the deadlock check -@param[in,out] victim_trx Transaction selected for rollback -@param[in,out] lock Lock being requested -@return DB_LOCK_WAIT, DB_DEADLOCK or DB_SUCCESS_LOCKED_REC */ -dberr_t -RecLock::check_deadlock_result(const trx_t* victim_trx, lock_t* lock) -{ - ut_ad(lock_mutex_own()); - ut_ad(m_trx == lock->trx); - ut_ad(trx_mutex_own(m_trx)); - - if (victim_trx != NULL) { - - ut_ad(victim_trx == m_trx); - + if (const trx_t* victim = + DeadlockChecker::check_and_resolve(lock, trx)) { + ut_ad(victim == trx); lock_reset_lock_and_trx_wait(lock); + lock_rec_reset_nth_bit(lock, heap_no); + return DB_DEADLOCK; + } - lock_rec_reset_nth_bit(lock, m_rec_id.m_heap_no); - - return(DB_DEADLOCK); - - } else if (m_trx->lock.wait_lock == NULL) { - + if (!trx->lock.wait_lock) { /* If there was a deadlock but we chose another transaction as a victim, it is possible that we already have the lock now granted! */ - - return(DB_SUCCESS_LOCKED_REC); - } - - return(DB_LOCK_WAIT); -} - -/** -Check and resolve any deadlocks -@param[in, out] lock The lock being acquired -@return DB_LOCK_WAIT, DB_DEADLOCK, or - DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that - there was a deadlock, but another transaction was chosen - as a victim, and we got the lock immediately: no need to - wait then */ -dberr_t -RecLock::deadlock_check(lock_t* lock) -{ - ut_ad(lock_mutex_own()); - ut_ad(lock->trx == m_trx); - ut_ad(trx_mutex_own(m_trx)); - - const trx_t* victim_trx = - DeadlockChecker::check_and_resolve(lock, m_trx); - - /* Check the outcome of the deadlock test. It is possible that - the transaction that blocked our lock was rolled back and we - were granted our lock. */ - - dberr_t err = check_deadlock_result(victim_trx, lock); - - if (err == DB_LOCK_WAIT) { - - set_wait_state(lock); - - MONITOR_INC(MONITOR_LOCKREC_WAIT); - } - - return(err); -} - -/** -Collect the transactions that will need to be rolled back asynchronously -@param[in, out] trx Transaction to be rolled back */ -void -RecLock::mark_trx_for_rollback(trx_t* trx) -{ - trx->abort = true; - - ut_ad(!trx->read_only); - ut_ad(trx_mutex_own(m_trx)); - ut_ad(!(trx->in_innodb & TRX_FORCE_ROLLBACK)); - ut_ad(!(trx->in_innodb & TRX_FORCE_ROLLBACK_ASYNC)); - ut_ad(!(trx->in_innodb & TRX_FORCE_ROLLBACK_DISABLE)); - - /* Note that we will attempt an async rollback. The _ASYNC - flag will be cleared if the transaction is rolled back - synchronously before we get a chance to do it. */ - - trx->in_innodb |= TRX_FORCE_ROLLBACK | TRX_FORCE_ROLLBACK_ASYNC; - - ut_a(!trx->killed_by); - my_atomic_storelong(&trx->killed_by, (long) os_thread_get_curr_id()); - - m_trx->hit_list.push_back(hit_list_t::value_type(trx)); - -#ifdef UNIV_DEBUG - THD* thd = trx->mysql_thd; - - if (thd != NULL) { - - char buffer[1024]; - ib::info() << "Blocking transaction: ID: " << ib::hex(trx->id) << " - " - << " Blocked transaction ID: "<< ib::hex(m_trx->id) << " - " - << thd_get_error_context_description(thd, buffer, sizeof(buffer), - 512); - } -#endif /* UNIV_DEBUG */ -} - -/** -Setup the requesting transaction state for lock grant -@param[in,out] lock Lock for which to change state */ -void -RecLock::set_wait_state(lock_t* lock) -{ - ut_ad(lock_mutex_own()); - ut_ad(m_trx == lock->trx); - ut_ad(trx_mutex_own(m_trx)); - ut_ad(lock_get_wait(lock)); - - m_trx->lock.wait_started = ut_time(); - - m_trx->lock.que_state = TRX_QUE_LOCK_WAIT; - - m_trx->lock.was_chosen_as_deadlock_victim = false; - - bool stopped = que_thr_stop(m_thr); - ut_a(stopped); -} - -/** -Enqueue a lock wait for normal transaction. If it is a high priority transaction -then jump the record lock wait queue and if the transaction at the head of the -queue is itself waiting roll it back, also do a deadlock check and resolve. -@param[in, out] wait_for The lock that the joining transaction is - waiting for -@param[in] prdt Predicate [optional] -@return DB_LOCK_WAIT, DB_DEADLOCK, or - DB_SUCCESS_LOCKED_REC; DB_SUCCESS_LOCKED_REC means that - there was a deadlock, but another transaction was chosen - as a victim, and we got the lock immediately: no need to - wait then */ -dberr_t -RecLock::add_to_waitq(lock_t* wait_for, const lock_prdt_t* prdt) -{ - ut_ad(lock_mutex_own()); - ut_ad(m_trx == thr_get_trx(m_thr)); - ut_ad(trx_mutex_own(m_trx)); - - DEBUG_SYNC_C("rec_lock_add_to_waitq"); - - m_mode |= LOCK_WAIT; - - /* Do the preliminary checks, and set query thread state */ - - prepare(); - - bool high_priority = trx_is_high_priority(m_trx); - - /* Don't queue the lock to hash table, if high priority transaction. */ - lock_t* lock = create( - m_trx, true, !high_priority, prdt #ifdef WITH_WSREP - ,wait_for -#endif /* WITH_WSREP */ - ); - - /* Attempt to jump over the low priority waiting locks. */ - if (high_priority && jump_queue(lock, wait_for)) { - - /* Lock is granted */ - return(DB_SUCCESS); - } - -#ifdef WITH_WSREP - if (!lock_get_wait(lock) && wsrep_thd_is_BF(m_trx->mysql_thd, FALSE)) { if (wsrep_debug) { - ib::info() << "WSREP: BF thread got lock granted early, ID " << ib::hex(lock->trx->id) - << " query: " << wsrep_thd_query(m_trx->mysql_thd); + ib::info() << "WSREP: BF thread got lock granted early, ID " << ib::hex(trx->id) + << " query: " << wsrep_thd_query(trx->mysql_thd); } - return(DB_SUCCESS); - } -#endif /* WITH_WSREP */ - ut_ad(lock_get_wait(lock)); +#endif + return DB_SUCCESS_LOCKED_REC; + } - dberr_t err = deadlock_check(lock); - ut_ad(trx_mutex_own(m_trx)); + trx->lock.que_state = TRX_QUE_LOCK_WAIT; - // Move it only when it does not cause a deadlock. - if (err != DB_DEADLOCK - && innodb_lock_schedule_algorithm - == INNODB_LOCK_SCHEDULE_ALGORITHM_VATS - && !thd_is_replication_slave_thread(lock->trx->mysql_thd) - && !trx_is_high_priority(lock->trx)) { + trx->lock.was_chosen_as_deadlock_victim = false; + trx->lock.wait_started = ut_time(); - HASH_DELETE(lock_t, hash, lock_hash_get(lock->type_mode), - m_rec_id.fold(), lock); + ut_a(que_thr_stop(thr)); + + DBUG_LOG("ib_lock", "trx " << ib::hex(trx->id) + << " waits for lock in index " << index->name + << " of table " << index->table->name); + + MONITOR_INC(MONITOR_LOCKREC_WAIT); + + if (innodb_lock_schedule_algorithm + == INNODB_LOCK_SCHEDULE_ALGORITHM_VATS + && !prdt + && !thd_is_replication_slave_thread(lock->trx->mysql_thd)) { + HASH_DELETE(lock_t, hash, lock_sys->rec_hash, + lock_rec_lock_fold(lock), lock); dberr_t res = lock_rec_insert_by_trx_age(lock); if (res != DB_SUCCESS) { return res; } } - return(err); + return DB_LOCK_WAIT; } /*********************************************************************//** @@ -2288,9 +2098,11 @@ lock_rec_add_to_queue( } } - RecLock rec_lock(index, block, heap_no, type_mode); - - rec_lock.create(trx, caller_owns_trx_mutex, true); + lock_rec_create( +#ifdef WITH_WSREP + NULL, NULL, +#endif + type_mode, block, heap_no, index, trx, caller_owns_trx_mutex); } /*********************************************************************//** @@ -2341,12 +2153,13 @@ lock_rec_lock_fast( lock_rec_req_status status = LOCK_REC_SUCCESS; if (lock == NULL) { - if (!impl) { - RecLock rec_lock(index, block, heap_no, mode); - /* Note that we don't own the trx mutex. */ - rec_lock.create(trx, false, true); + lock = lock_rec_create( +#ifdef WITH_WSREP + NULL, NULL, +#endif + mode, block, heap_no, index, trx, false); } status = LOCK_REC_SUCCESS_CREATED; @@ -2419,40 +2232,32 @@ lock_rec_lock_slow( trx_mutex_enter(trx); if (lock_rec_has_expl(mode, block, heap_no, trx)) { - - /* The trx already has a strong enough lock on rec: do - nothing */ - + /* The trx already has a strong enough lock: do nothing */ err = DB_SUCCESS; - + } else if ( +#ifdef WITH_WSREP + lock_t* c_lock = +#endif /* WITH_WSREP */ + lock_rec_other_has_conflicting( + static_cast(mode), + block, heap_no, trx)) { + /* If another transaction has a non-gap conflicting + request in the queue, as this transaction does not + have a lock strong enough already granted on the + record, we have to wait. */ + err = lock_rec_enqueue_waiting( +#ifdef WITH_WSREP + c_lock, +#endif /* WITH_WSREP */ + mode, block, heap_no, index, thr, NULL); + } else if (!impl) { + /* Set the requested lock on the record, note that + we already own the transaction mutex. */ + lock_rec_add_to_queue( + LOCK_REC | mode, block, heap_no, index, trx, TRUE); + err = DB_SUCCESS_LOCKED_REC; } else { - lock_t* wait_for = lock_rec_other_has_conflicting( - mode, block, heap_no, trx); - - if (wait_for != NULL) { - - /* If another transaction has a non-gap conflicting - request in the queue, as this transaction does not - have a lock strong enough already granted on the - record, we may have to wait. */ - - RecLock rec_lock(thr, index, block, heap_no, mode); - - err = rec_lock.add_to_waitq(wait_for); - - } else if (!impl) { - - /* Set the requested lock on the record, note that - we already own the transaction mutex. */ - - lock_rec_add_to_queue( - LOCK_REC | mode, block, heap_no, index, trx, - true); - - err = DB_SUCCESS_LOCKED_REC; - } else { - err = DB_SUCCESS; - } + err = DB_SUCCESS; } trx_mutex_exit(trx); @@ -2630,234 +2435,6 @@ lock_grant( } } -/** -Jump the queue for the record over all low priority transactions and -add the lock. If all current granted locks are compatible, grant the -lock. Otherwise, mark all granted transaction for asynchronous -rollback and add to hit list. -@param[in, out] lock Lock being requested -@param[in] conflict_lock First conflicting lock from the head -@return true if the lock is granted */ -bool -RecLock::jump_queue( - lock_t* lock, - const lock_t* conflict_lock) -{ - ut_ad(m_trx == lock->trx); - ut_ad(trx_mutex_own(m_trx)); - ut_ad(conflict_lock->trx != m_trx); - ut_ad(trx_is_high_priority(m_trx)); - ut_ad(m_rec_id.m_heap_no != ULINT32_UNDEFINED); - - bool high_priority = false; - - /* Find out the position to add the lock. If there are other high - priority transactions in waiting state then we should add it after - the last high priority transaction. Otherwise, we can add it after - the last granted lock jumping over the wait queue. */ - bool grant_lock = lock_add_priority(lock, conflict_lock, - &high_priority); - - if (grant_lock) { - - ut_ad(conflict_lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT); - ut_ad(conflict_lock->trx->lock.wait_lock == conflict_lock); - - DBUG_LOG("trx", - "Granting High Priority Transaction " - << ib::hex(lock->trx->id) << " a lock jumping over" - << " waiting Transaction " << ib::hex(conflict_lock->trx->id)); - - lock_reset_lock_and_trx_wait(lock); - return(true); - } - - /* If another high priority transaction is found waiting - victim transactions are already marked for rollback. */ - if (high_priority) { - - return(false); - } - - /* The lock is placed after the last granted lock in the queue. Check and add - low priority transactinos to hit list for ASYNC rollback. */ - make_trx_hit_list(lock, conflict_lock); - - return(false); -} - -/** Find position in lock queue and add the high priority transaction -lock. Intention and GAP only locks can be granted even if there are -waiting locks in front of the queue. To add the High priority -transaction in a safe position we keep the following rule. - -1. If the lock can be granted, add it before the first waiting lock -in the queue so that all currently waiting locks need to do conflict -check before getting granted. - -2. If the lock has to wait, add it after the last granted lock or the -last waiting high priority transaction in the queue whichever is later. -This ensures that the transaction is granted only after doing conflict -check with all granted transactions. -@param[in] lock Lock being requested -@param[in] conflict_lock First conflicting lock from the head -@param[out] high_priority high priority transaction ahead in queue -@return true if the lock can be granted */ -bool -RecLock::lock_add_priority( - lock_t* lock, - const lock_t* conflict_lock, - bool* high_priority) -{ - ut_ad(high_priority); - - *high_priority = false; - - /* If the first conflicting lock is waiting for the current row, - then all other granted locks are compatible and the lock can be - directly granted if no other high priority transactions are - waiting. We need to recheck with all granted transaction as there - could be granted GAP or Intention locks down the queue. */ - bool grant_lock = (conflict_lock->is_waiting()); - lock_t* lock_head = NULL; - lock_t* grant_position = NULL; - lock_t* add_position = NULL; - - /* Different lock (such as predicate lock) are on different hash */ - hash_table_t* lock_hash = lock_hash_get(m_mode); - - HASH_SEARCH(hash, lock_hash, m_rec_id.fold(), lock_t*, - lock_head, ut_ad(lock_head->is_record_lock()), true); - - ut_ad(lock_head); - - for (lock_t* next = lock_head; next != NULL; next = next->hash) { - - /* check only for locks on the current row */ - if (!is_on_row(next)) { - continue; - } - - if (next->is_waiting()) { - /* grant lock position is the granted lock just before - the first wait lock in the queue. */ - if (grant_position == NULL) { - grant_position = add_position; - } - - if (trx_is_high_priority(next->trx)) { - - *high_priority = true; - grant_lock = false; - add_position = next; - } - } else { - - add_position = next; - /* Cannot grant lock if there is any conflicting - granted lock. */ - if (grant_lock && lock_has_to_wait(lock, next)) { - grant_lock = false; - } - } - } - - /* If the lock is to be granted it is safe to add before the first - waiting lock in the queue. */ - if (grant_lock) { - - ut_ad(!lock_has_to_wait(lock, grant_position)); - add_position = grant_position; - } - - ut_ad(add_position != NULL); - - /* Add the lock to lock hash table. */ - lock->hash = add_position->hash; - add_position->hash = lock; - ++lock->index->table->n_rec_locks; - - return(grant_lock); -} - -/** Iterate over the granted locks and prepare the hit list for ASYNC Rollback. -If the transaction is waiting for some other lock then wake up with deadlock error. -Currently we don't mark following transactions for ASYNC Rollback. -1. Read only transactions -2. Background transactions -3. Other High priority transactions -@param[in] lock Lock being requested -@param[in] conflict_lock First conflicting lock from the head */ -void -RecLock::make_trx_hit_list( - lock_t* lock, - const lock_t* conflict_lock) -{ - const lock_t* next; - - for (next = conflict_lock; next != NULL; next = next->hash) { - - /* All locks ahead in the queue are checked. */ - if (next == lock) { - - ut_ad(next->is_waiting()); - break; - } - - trx_t* trx = next->trx; - /* Check only for conflicting, granted locks on the current row. - Currently, we don't rollback read only transactions, transactions - owned by background threads. */ - if (trx == lock->trx - || !is_on_row(next) - || next->is_waiting() - || trx->read_only - || trx->mysql_thd == NULL - || !lock_has_to_wait(lock, next)) { - - continue; - } - - trx_mutex_enter(trx); - - /* Skip high priority transactions, if already marked for abort - by some other transaction or if ASYNC rollback is disabled. A - transaction must complete kill/abort of a victim transaction once - marked and added to hit list. */ - if (trx_is_high_priority(trx) - || (trx->in_innodb & TRX_FORCE_ROLLBACK_DISABLE) != 0 - || trx->abort) { - - trx_mutex_exit(trx); - continue; - } - - /* If the transaction is waiting on some other resource then - wake it up with DEAD_LOCK error so that it can rollback. */ - if (trx->lock.que_state == TRX_QUE_LOCK_WAIT) { - - /* Assert that it is not waiting for current record. */ - ut_ad(trx->lock.wait_lock != next); - - DBUG_LOG("trx", "High Priority Transaction " - << ib::hex(lock->trx->id) - << " waking up blocking transaction " - << ib::hex(trx->id)); - - trx->lock.was_chosen_as_deadlock_victim = true; - lock_cancel_waiting_and_release(trx->lock.wait_lock); - trx_mutex_exit(trx); - continue; - } - - /* Mark for ASYNC Rollback and add to hit list. */ - mark_trx_for_rollback(trx); - trx_mutex_exit(trx); - } - - ut_ad(next == lock); -} - /*************************************************************//** Cancels a waiting record lock request and releases the waiting transaction that requested it. NOTE: does NOT check if waiting lock requests behind this @@ -2981,7 +2558,6 @@ lock_rec_dequeue_from_page( space = in_lock->un_member.rec_lock.space; page_no = in_lock->un_member.rec_lock.page_no; - ut_ad(in_lock->index->table->n_rec_locks > 0); in_lock->index->table->n_rec_locks--; lock_hash = lock_hash_get(in_lock->type_mode); @@ -3042,7 +2618,6 @@ lock_rec_discard( space = in_lock->un_member.rec_lock.space; page_no = in_lock->un_member.rec_lock.page_no; - ut_ad(in_lock->index->table->n_rec_locks > 0); in_lock->index->table->n_rec_locks--; HASH_DELETE(lock_t, hash, lock_hash_get(in_lock->type_mode), @@ -3826,10 +3401,10 @@ lock_update_merge_right( #ifdef UNIV_DEBUG /* there should exist no page lock on the left page, otherwise, it will be blocked from merge */ - ulint space = left_block->page.id.space(); - ulint page_no = left_block->page.id.page_no(); + ulint space = left_block->page.id.space(); + ulint page_no = left_block->page.id.page_no(); ut_ad(lock_rec_get_first_on_page_addr( - lock_sys->prdt_page_hash, space, page_no) == NULL); + lock_sys->prdt_page_hash, space, page_no) == NULL); #endif /* UNIV_DEBUG */ lock_rec_free_all_from_discard_page(left_block); @@ -3954,7 +3529,7 @@ lock_update_merge_left( ulint space = right_block->page.id.space(); ulint page_no = right_block->page.id.page_no(); lock_t* lock_test = lock_rec_get_first_on_page_addr( - lock_sys->prdt_page_hash, space, page_no); + lock_sys->prdt_page_hash, space, page_no); ut_ad(!lock_test); #endif /* UNIV_DEBUG */ @@ -4001,9 +3576,9 @@ lock_update_discard( const buf_block_t* block) /*!< in: index page which will be discarded */ { + const page_t* page = block->frame; const rec_t* rec; ulint heap_no; - const page_t* page = block->frame; lock_mutex_enter(); @@ -4444,10 +4019,9 @@ lock_table_remove_low( /*********************************************************************//** Enqueues a waiting request for a table lock which cannot be granted immediately. Checks for deadlocks. -@return DB_LOCK_WAIT, DB_DEADLOCK, or -DB_SUCCESS; DB_SUCCESS means that there was a deadlock, but another -transaction was chosen as a victim, and we got the lock immediately: -no need to wait then */ +@retval DB_LOCK_WAIT if the waiting lock was enqueued +@retval DB_DEADLOCK if this transaction was chosen as the victim +@retval DB_SUCCESS if the other transaction committed or aborted */ static dberr_t lock_table_enqueue_waiting( @@ -5036,7 +4610,7 @@ lock_release( } if (count == LOCK_RELEASE_INTERVAL) { - /* Release the mutex for a while, so that we + /* Release the mutex for a while, so that we do not monopolize it */ lock_mutex_exit(); @@ -6548,19 +6122,21 @@ lock_rec_insert_check_and_lock( const ulint type_mode = LOCK_X | LOCK_GAP | LOCK_INSERT_INTENTION; - lock_t* wait_for = lock_rec_other_has_conflicting( - type_mode, block, heap_no, trx); - - if (wait_for != NULL) { - - RecLock rec_lock(thr, index, block, heap_no, type_mode); - + if ( +#ifdef WITH_WSREP + lock_t* c_lock = +#endif /* WITH_WSREP */ + lock_rec_other_has_conflicting(type_mode, block, heap_no, trx)) { + /* Note that we may get DB_SUCCESS also here! */ trx_mutex_enter(trx); - err = rec_lock.add_to_waitq(wait_for); + err = lock_rec_enqueue_waiting( +#ifdef WITH_WSREP + c_lock, +#endif /* WITH_WSREP */ + type_mode, block, heap_no, index, thr, NULL); trx_mutex_exit(trx); - } else { err = DB_SUCCESS; } @@ -7369,7 +6945,6 @@ lock_unlock_table_autoinc( but not COMMITTED transactions. */ ut_ad(trx_state_eq(trx, TRX_STATE_NOT_STARTED) - || trx_state_eq(trx, TRX_STATE_FORCED_ROLLBACK) || !trx_state_eq(trx, TRX_STATE_COMMITTED_IN_MEMORY)); /* This function is invoked for a running transaction by the @@ -7992,24 +7567,9 @@ DeadlockChecker::select_victim() const ut_ad(m_start->lock.wait_lock != 0); ut_ad(m_wait_lock->trx != m_start); - if (thd_trx_priority(m_start->mysql_thd) > 0 - || thd_trx_priority(m_wait_lock->trx->mysql_thd) > 0) { - - const trx_t* victim; - - victim = trx_arbitrate(m_start, m_wait_lock->trx); - - if (victim != NULL) { - - return(victim); - } - } - if (trx_weight_ge(m_wait_lock->trx, m_start)) { - /* The joining transaction is 'smaller', choose it as the victim and roll it back. */ - #ifdef WITH_WSREP if (wsrep_thd_is_BF(m_start->mysql_thd, TRUE)) { return(m_wait_lock->trx); @@ -8208,12 +7768,7 @@ DeadlockChecker::check_and_resolve(const lock_t* lock, trx_t* trx) check_trx_state(trx); ut_ad(!srv_read_only_mode); - /* If transaction is marked for ASYNC rollback then we should - not allow it to wait for another lock causing possible deadlock. - We return current transaction as deadlock victim here. */ - if (trx->in_innodb & TRX_FORCE_ROLLBACK_ASYNC) { - return(trx); - } else if (!innobase_deadlock_detect) { + if (!innobase_deadlock_detect) { return(NULL); } diff --git a/storage/innobase/lock/lock0prdt.cc b/storage/innobase/lock/lock0prdt.cc index f6859b70297..23a46a002be 100644 --- a/storage/innobase/lock/lock0prdt.cc +++ b/storage/innobase/lock/lock0prdt.cc @@ -38,6 +38,7 @@ Created 9/7/2013 Jimmy Yang #include "ut0vec.h" #include "btr0btr.h" #include "dict0boot.h" +#include "que0que.h" #include /*********************************************************************//** @@ -495,9 +496,18 @@ lock_prdt_add_to_queue( } } - RecLock rec_lock(index, block, PRDT_HEAPNO, type_mode); + lock = lock_rec_create( +#ifdef WITH_WSREP + NULL, NULL, /* FIXME: replicate SPATIAL INDEX locks */ +#endif + type_mode, block, PRDT_HEAPNO, index, trx, + caller_owns_trx_mutex); - return(rec_lock.create(trx, caller_owns_trx_mutex, true, prdt)); + if (lock->type_mode & LOCK_PREDICATE) { + lock_prdt_set_prdt(lock, prdt); + } + + return lock; } /*********************************************************************//** @@ -565,7 +575,7 @@ lock_prdt_insert_check_and_lock( const ulint mode = LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION; - lock_t* wait_for = lock_prdt_other_has_conflicting( + const lock_t* wait_for = lock_prdt_other_has_conflicting( mode, block, prdt, trx); if (wait_for != NULL) { @@ -574,16 +584,17 @@ lock_prdt_insert_check_and_lock( /* Allocate MBR on the lock heap */ lock_init_prdt_from_mbr(prdt, mbr, 0, trx->lock.lock_heap); - RecLock rec_lock(thr, index, block, PRDT_HEAPNO, mode); - /* Note that we may get DB_SUCCESS also here! */ - trx_mutex_enter(trx); - err = rec_lock.add_to_waitq(wait_for, prdt); + err = lock_rec_enqueue_waiting( +#ifdef WITH_WSREP + NULL, /* FIXME: replicate SPATIAL INDEX locks */ +#endif + LOCK_X | LOCK_PREDICATE | LOCK_INSERT_INTENTION, + block, PRDT_HEAPNO, index, thr, prdt); trx_mutex_exit(trx); - } else { err = DB_SUCCESS; } @@ -831,13 +842,14 @@ lock_prdt_lock( lock_t* lock = lock_rec_get_first_on_page(hash, block); if (lock == NULL) { - - RecLock rec_lock(index, block, PRDT_HEAPNO, prdt_mode); - - lock = rec_lock.create(trx, false, true); + lock = lock_rec_create( +#ifdef WITH_WSREP + NULL, NULL, /* FIXME: replicate SPATIAL INDEX locks */ +#endif + mode | type_mode, block, PRDT_HEAPNO, + index, trx, FALSE); status = LOCK_REC_SUCCESS_CREATED; - } else { trx_mutex_enter(trx); @@ -861,12 +873,14 @@ lock_prdt_lock( if (wait_for != NULL) { - RecLock rec_lock( - thr, index, block, PRDT_HEAPNO, - prdt_mode, prdt); - - err = rec_lock.add_to_waitq(wait_for); - + err = lock_rec_enqueue_waiting( +#ifdef WITH_WSREP + NULL, /* FIXME: replicate + SPATIAL INDEX locks */ +#endif + mode | type_mode, + block, PRDT_HEAPNO, + index, thr, prdt); } else { lock_prdt_add_to_queue( @@ -947,10 +961,12 @@ lock_place_prdt_page_lock( } if (lock == NULL) { - RecID rec_id(space, page_no, PRDT_HEAPNO); - RecLock rec_lock(index, rec_id, mode); - - rec_lock.create(trx, false, true); + lock = lock_rec_create_low( +#ifdef WITH_WSREP + NULL, NULL, /* FIXME: replicate SPATIAL INDEX locks */ +#endif + mode, space, page_no, NULL, PRDT_HEAPNO, + index, trx, FALSE); #ifdef PRDT_DIAG printf("GIS_DIAGNOSTIC: page lock %d\n", (int) page_no); diff --git a/storage/innobase/lock/lock0wait.cc b/storage/innobase/lock/lock0wait.cc index c41821412af..d6c812e1af0 100644 --- a/storage/innobase/lock/lock0wait.cc +++ b/storage/innobase/lock/lock0wait.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2017, MariaDB Corporation. +Copyright (c) 2014, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -419,7 +419,7 @@ lock_wait_suspend_thread( && (!wsrep_on_trx(trx) || (!wsrep_is_BF_lock_timeout(trx, false) && trx->error_state != DB_DEADLOCK)) #endif /* WITH_WSREP */ - && !trx_is_high_priority(trx)) { + ) { trx->error_state = DB_LOCK_WAIT_TIMEOUT; @@ -502,7 +502,7 @@ lock_wait_check_and_cancel( trx_mutex_enter(trx); - if (trx->lock.wait_lock != NULL && !trx_is_high_priority(trx)) { + if (trx->lock.wait_lock != NULL) { ut_a(trx->lock.que_state == TRX_QUE_LOCK_WAIT); diff --git a/storage/innobase/page/page0page.cc b/storage/innobase/page/page0page.cc index fb528843da6..c33623e9398 100644 --- a/storage/innobase/page/page0page.cc +++ b/storage/innobase/page/page0page.cc @@ -2,7 +2,7 @@ Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -35,6 +35,7 @@ Created 2/2/1994 Heikki Tuuri #include "lock0lock.h" #include "fut0lst.h" #include "btr0sea.h" +#include "trx0sys.h" /* THE INDEX PAGE ============== diff --git a/storage/innobase/page/page0zip.cc b/storage/innobase/page/page0zip.cc index 271ef4a787e..6b68ee973af 100644 --- a/storage/innobase/page/page0zip.cc +++ b/storage/innobase/page/page0zip.cc @@ -45,6 +45,7 @@ const byte field_ref_zero[FIELD_REF_SIZE] = { #include "btr0cur.h" #include "page0types.h" #include "log0recv.h" +#include "row0row.h" #include "row0trunc.h" #include "zlib.h" #include "buf0buf.h" diff --git a/storage/innobase/row/row0ins.cc b/storage/innobase/row/row0ins.cc index 98e94e06464..76809222f2c 100644 --- a/storage/innobase/row/row0ins.cc +++ b/storage/innobase/row/row0ins.cc @@ -1852,8 +1852,6 @@ do_possible_lock_wait: my_atomic_addlint( &check_table->n_foreign_key_checks_running, 1); - trx_kill_blocking(trx); - lock_wait_suspend_thread(thr); thr->lock_state = QUE_THR_LOCK_NOLOCK; diff --git a/storage/innobase/row/row0merge.cc b/storage/innobase/row/row0merge.cc index c50e8fbd6ae..cbd5aa2f316 100644 --- a/storage/innobase/row/row0merge.cc +++ b/storage/innobase/row/row0merge.cc @@ -36,6 +36,7 @@ Completed by Sunny Bains and Marko Makela #include "row0ext.h" #include "row0log.h" #include "row0ins.h" +#include "row0row.h" #include "row0sel.h" #include "log0crypt.h" #include "dict0crea.h" @@ -45,6 +46,7 @@ Completed by Sunny Bains and Marko Makela #include "ut0sort.h" #include "row0ftsort.h" #include "row0import.h" +#include "row0vers.h" #include "handler0alter.h" #include "btr0bulk.h" #include "fsp0sysspace.h" @@ -3586,8 +3588,6 @@ row_merge_lock_table( trx->op_info = "setting table lock for creating or dropping index"; trx->ddl = true; - /* Trx for DDL should not be forced to rollback for now */ - trx->in_innodb |= TRX_FORCE_ROLLBACK_DISABLE; return(lock_table_for_trx(table, trx, mode)); } diff --git a/storage/innobase/row/row0mysql.cc b/storage/innobase/row/row0mysql.cc index 49a042acc20..cd3e7253298 100644 --- a/storage/innobase/row/row0mysql.cc +++ b/storage/innobase/row/row0mysql.cc @@ -729,9 +729,6 @@ handle_new_error: /* MySQL will roll back the latest SQL statement */ break; case DB_LOCK_WAIT: - - trx_kill_blocking(trx); - lock_wait_suspend_thread(thr); if (trx->error_state != DB_SUCCESS) { diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 623961945b2..c42b08d820b 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -4437,17 +4437,12 @@ row_search_mvcc( naturally moves upward (in fetch next) in alphabetical order, otherwise downward */ - if (direction == 0) { - - if (mode == PAGE_CUR_GE - || mode == PAGE_CUR_G + if (UNIV_UNLIKELY(direction == 0)) { + if (mode == PAGE_CUR_GE || mode == PAGE_CUR_G || mode >= PAGE_CUR_CONTAIN) { - moves_up = TRUE; } - } else if (direction == ROW_SEL_NEXT) { - moves_up = TRUE; } @@ -5682,15 +5677,6 @@ normal_return: mtr.commit(); - /* Rollback blocking transactions from hit list for high priority - transaction, if any. We should not be holding latches here as - we are going to rollback the blocking transactions. */ - if (!trx->hit_list.empty()) { - - ut_ad(trx_is_high_priority(trx)); - trx_kill_blocking(trx); - } - DEBUG_SYNC_C("row_search_for_mysql_before_return"); if (prebuilt->idx_cond != 0) { diff --git a/storage/innobase/row/row0trunc.cc b/storage/innobase/row/row0trunc.cc index 068b4d96ed2..94be5152596 100644 --- a/storage/innobase/row/row0trunc.cc +++ b/storage/innobase/row/row0trunc.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2013, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2017, MariaDB Corporation. +Copyright (c) 2017, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -36,7 +36,8 @@ Created 2013-04-12 Sunny Bains #include "srv0start.h" #include "row0trunc.h" #include "os0file.h" -#include +#include "que0que.h" +#include "trx0undo.h" /* FIXME: For temporary tables, use a simple approach of btr_free() and btr_create() of each index tree. */ diff --git a/storage/innobase/trx/trx0roll.cc b/storage/innobase/trx/trx0roll.cc index 5c2258f25be..a5be54b7b79 100644 --- a/storage/innobase/trx/trx0roll.cc +++ b/storage/innobase/trx/trx0roll.cc @@ -183,10 +183,7 @@ trx_rollback_for_mysql_low( /** Rollback a transaction used in MySQL @param[in, out] trx transaction @return error code or DB_SUCCESS */ -static -dberr_t -trx_rollback_low( - trx_t* trx) +dberr_t trx_rollback_for_mysql(trx_t* trx) { /* We are reading trx->state without holding trx_sys->mutex here, because the rollback should be invoked for a running @@ -194,7 +191,6 @@ trx_rollback_low( that is associated with the current thread. */ switch (trx->state) { - case TRX_STATE_FORCED_ROLLBACK: case TRX_STATE_NOT_STARTED: trx->will_lock = 0; ut_ad(trx->in_mysql_trx_list); @@ -261,28 +257,6 @@ trx_rollback_low( return(DB_CORRUPTION); } -/*******************************************************************//** -Rollback a transaction used in MySQL. -@return error code or DB_SUCCESS */ -dberr_t -trx_rollback_for_mysql( -/*===================*/ - trx_t* trx) /*!< in/out: transaction */ -{ - /* Avoid the tracking of async rollback killer - thread to enter into InnoDB. */ - if (TrxInInnoDB::is_async_rollback(trx)) { - - return(trx_rollback_low(trx)); - - } else { - - TrxInInnoDB trx_in_innodb(trx, true); - - return(trx_rollback_low(trx)); - } -} - /*******************************************************************//** Rollback the latest SQL statement for MySQL. @return error code or DB_SUCCESS */ @@ -300,7 +274,6 @@ trx_rollback_last_sql_stat_for_mysql( ut_ad(trx->in_mysql_trx_list); switch (trx->state) { - case TRX_STATE_FORCED_ROLLBACK: case TRX_STATE_NOT_STARTED: return(DB_SUCCESS); @@ -487,12 +460,9 @@ trx_rollback_to_savepoint_for_mysql( switch (trx->state) { case TRX_STATE_NOT_STARTED: - case TRX_STATE_FORCED_ROLLBACK: - ib::error() << "Transaction has a savepoint " << savep->name << " though it is not started"; - return(DB_ERROR); case TRX_STATE_ACTIVE: @@ -780,7 +750,6 @@ fake_prepared: case TRX_STATE_PREPARED: goto func_exit; case TRX_STATE_NOT_STARTED: - case TRX_STATE_FORCED_ROLLBACK: break; } diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index c6e2bd88d62..1c058a3ede5 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -43,7 +43,6 @@ Created 3/26/1996 Heikki Tuuri #include "srv0mon.h" #include "srv0srv.h" #include "fsp0sysspace.h" -#include "row0mysql.h" #include "srv0start.h" #include "trx0purge.h" #include "trx0rec.h" @@ -69,15 +68,6 @@ typedef std::set< std::less, ut_allocator > table_id_set; -/** Constructor */ -TrxVersion::TrxVersion(trx_t* trx) - : - m_trx(trx), - m_version(trx->version) -{ - /* No op */ -} - /** Set flush observer for the transaction @param[in/out] trx transaction struct @param[in] observer flush observer */ @@ -121,14 +111,12 @@ trx_init( /*=====*/ trx_t* trx) { - /* This is called at the end of commit, do not reset the - trx_t::state here to NOT_STARTED. The FORCED_ROLLBACK - status is required for asynchronous handling. */ - trx->id = 0; trx->no = TRX_ID_MAX; + trx->state = TRX_STATE_NOT_STARTED; + trx->is_recovered = false; trx->op_info = ""; @@ -183,29 +171,7 @@ trx_init( trx->lock.table_cached = 0; - /* During asynchronous rollback, we should reset forced rollback flag - only after rollback is complete to avoid race with the thread owning - the transaction. */ - - if (!TrxInInnoDB::is_async_rollback(trx)) { - - my_atomic_storelong(&trx->killed_by, 0); - - /* Note: Do not set to 0, the ref count is decremented inside - the TrxInInnoDB() destructor. We only need to clear the flags. */ - - trx->in_innodb &= TRX_FORCE_ROLLBACK_MASK; - } - - /* Note: It's possible that this list is not empty if a transaction - was interrupted after it collected the victim transactions and before - it got a chance to roll them back asynchronously. */ - - trx->hit_list.clear(); - trx->flush_observer = NULL; - - ++trx->version; } /** For managing the life-cycle of the trx_t instance that we get @@ -220,7 +186,7 @@ struct TrxFactory { { /* Explicitly call the constructor of the already allocated object. trx_t objects are allocated by - ut_zalloc() in Pool::Pool() which would not call + ut_zalloc_nokey() in Pool::Pool() which would not call the constructors of the trx_t members. */ new(&trx->mod_tables) trx_mod_tables_t(); @@ -230,13 +196,8 @@ struct TrxFactory { new(&trx->lock.table_locks) lock_pool_t(); - new(&trx->hit_list) hit_list_t(); - trx_init(trx); - DBUG_LOG("trx", "Init: " << trx); - trx->state = TRX_STATE_NOT_STARTED; - trx->dict_operation_lock_mode = 0; trx->xid = UT_NEW_NOKEY(xid_t()); @@ -309,8 +270,6 @@ struct TrxFactory { trx->lock.table_pool.~lock_pool_t(); trx->lock.table_locks.~lock_pool_t(); - - trx->hit_list.~hit_list_t(); } /** Enforce any invariants here, this is called before the transaction @@ -324,8 +283,7 @@ struct TrxFactory { ut_ad(!trx->read_only); - ut_ad(trx->state == TRX_STATE_NOT_STARTED - || trx->state == TRX_STATE_FORCED_ROLLBACK); + ut_ad(trx->state == TRX_STATE_NOT_STARTED); ut_ad(trx->dict_operation == TRX_DICT_OP_NONE); @@ -344,12 +302,6 @@ struct TrxFactory { ut_ad(trx->lock.table_locks.empty()); - ut_ad(!trx->abort); - - ut_ad(trx->hit_list.empty()); - - ut_ad(trx->killed_by == 0); - return(true); } }; @@ -442,15 +394,9 @@ trx_create_low() /* We just got trx from pool, it should be non locking */ ut_ad(trx->will_lock == 0); + ut_ad(trx->state == TRX_STATE_NOT_STARTED); - /* Background trx should not be forced to rollback, - we will unset the flag for user trx. */ - trx->in_innodb |= TRX_FORCE_ROLLBACK_DISABLE; - - /* Trx state can be TRX_STATE_FORCED_ROLLBACK if - the trx was forced to rollback before it's reused.*/ DBUG_LOG("trx", "Create: " << trx); - trx->state = TRX_STATE_NOT_STARTED; heap = mem_heap_create(sizeof(ib_vector_t) + sizeof(void*) * 8); @@ -1204,7 +1150,6 @@ trx_start_low( { ut_ad(!trx->in_rollback); ut_ad(!trx->is_recovered); - ut_ad(trx->hit_list.empty()); ut_ad(trx->start_line != 0); ut_ad(trx->start_file != 0); ut_ad(trx->roll_limit == 0); @@ -1213,10 +1158,6 @@ trx_start_low( ut_ad(trx->rsegs.m_noredo.rseg == NULL); ut_ad(trx_state_eq(trx, TRX_STATE_NOT_STARTED)); ut_ad(UT_LIST_GET_LEN(trx->lock.trx_locks) == 0); - ut_ad(!(trx->in_innodb & TRX_FORCE_ROLLBACK)); - ut_ad(!(trx->in_innodb & TRX_FORCE_ROLLBACK_ASYNC)); - - ++trx->version; /* Check whether it is an AUTOCOMMIT SELECT */ trx->auto_commit = thd_trx_is_auto_commit(trx->mysql_thd); @@ -1731,16 +1672,9 @@ trx_commit_in_memory( MONITOR_INC(MONITOR_TRX_NL_RO_COMMIT); - /* AC-NL-RO transactions can't be rolled back asynchronously. */ - ut_ad(!trx->abort); - ut_ad(!(trx->in_innodb - & (TRX_FORCE_ROLLBACK | TRX_FORCE_ROLLBACK_ASYNC))); - DBUG_LOG("trx", "Autocommit in memory: " << trx); trx->state = TRX_STATE_NOT_STARTED; - } else { - if (trx->id > 0) { /* For consistent snapshot, we need to remove current transaction from running transaction id list for mvcc @@ -1864,20 +1798,8 @@ trx_commit_in_memory( } #endif - /* Because we can rollback transactions asynchronously, we change - the state at the last step. trx_t::abort cannot change once commit - or rollback has started because we will have released the locks by - the time we get here. */ - - if (trx->abort) { - - trx->abort = false; - DBUG_LOG("trx", "Abort: " << trx); - trx->state = TRX_STATE_FORCED_ROLLBACK; - } else { - DBUG_LOG("trx", "Commit in memory: " << trx); - trx->state = TRX_STATE_NOT_STARTED; - } + DBUG_LOG("trx", "Commit in memory: " << trx); + trx->state = TRX_STATE_NOT_STARTED; /* trx->in_mysql_trx_list would hold between trx_allocate_for_mysql() and trx_free_for_mysql(). It does not @@ -2089,8 +2011,6 @@ trx_commit_or_rollback_prepare( switch (trx->state) { case TRX_STATE_NOT_STARTED: - case TRX_STATE_FORCED_ROLLBACK: - trx_start_low(trx, true); /* fall through */ @@ -2194,22 +2114,12 @@ trx_commit_for_mysql( /*=================*/ trx_t* trx) /*!< in/out: transaction */ { - TrxInInnoDB trx_in_innodb(trx, true); - - if (trx_in_innodb.is_aborted() - && trx->killed_by != os_thread_get_curr_id()) { - - return(DB_FORCED_ABORT); - } - /* Because we do not do the commit by sending an Innobase sig to the transaction, we must here make sure that trx has been started. */ switch (trx->state) { case TRX_STATE_NOT_STARTED: - case TRX_STATE_FORCED_ROLLBACK: - ut_d(trx->start_file = __FILE__); ut_d(trx->start_line = __LINE__); @@ -2270,7 +2180,6 @@ trx_mark_sql_stat_end( case TRX_STATE_COMMITTED_IN_MEMORY: break; case TRX_STATE_NOT_STARTED: - case TRX_STATE_FORCED_ROLLBACK: trx->undo_no = 0; trx->undo_rseg_space = 0; /* fall through */ @@ -2321,9 +2230,6 @@ trx_print_low( case TRX_STATE_NOT_STARTED: fputs(", not started", f); goto state_ok; - case TRX_STATE_FORCED_ROLLBACK: - fputs(", forced rollback", f); - goto state_ok; case TRX_STATE_ACTIVE: fprintf(f, ", ACTIVE %lu sec", (ulong) difftime(time(NULL), trx->start_time)); @@ -2465,10 +2371,6 @@ wsrep_trx_print_locking( fprintf(f, ", ACTIVE %lu sec", (ulong) difftime(time(NULL), trx->start_time)); goto state_ok; - case TRX_STATE_FORCED_ROLLBACK: - fprintf(f, ", FORCED ROLLBACK, %lu sec", - (ulong) difftime(time(NULL), trx->start_time)); - goto state_ok; case TRX_STATE_PREPARED: fprintf(f, ", ACTIVE (PREPARED) %lu sec", (ulong) difftime(time(NULL), trx->start_time)); @@ -2599,7 +2501,6 @@ trx_assert_started( return(TRUE); case TRX_STATE_NOT_STARTED: - case TRX_STATE_FORCED_ROLLBACK: break; } @@ -2714,10 +2615,6 @@ trx_prepare( /*========*/ trx_t* trx) /*!< in/out: transaction */ { - /* This transaction has crossed the point of no return and cannot - be rolled back asynchronously now. It must commit or rollback - synhronously. */ - /* Only fresh user transactions can be prepared. Recovered transactions cannot. */ ut_a(!trx->is_recovered); @@ -2755,29 +2652,17 @@ trx_prepare( } } -/** -Does the transaction prepare for MySQL. -@param[in, out] trx Transaction instance to prepare */ -dberr_t -trx_prepare_for_mysql(trx_t* trx) +/** XA PREPARE a transaction. +@param[in,out] trx transaction to prepare */ +void trx_prepare_for_mysql(trx_t* trx) { trx_start_if_not_started_xa(trx, false); - TrxInInnoDB trx_in_innodb(trx, true); - - if (trx_in_innodb.is_aborted() - && trx->killed_by != os_thread_get_curr_id()) { - - return(DB_FORCED_ABORT); - } - trx->op_info = "preparing"; trx_prepare(trx); trx->op_info = ""; - - return(DB_SUCCESS); } /**********************************************************************//** @@ -2925,7 +2810,6 @@ trx_start_if_not_started_xa_low( { switch (trx->state) { case TRX_STATE_NOT_STARTED: - case TRX_STATE_FORCED_ROLLBACK: trx_start_low(trx, read_write); return; @@ -2958,13 +2842,10 @@ trx_start_if_not_started_low( { switch (trx->state) { case TRX_STATE_NOT_STARTED: - case TRX_STATE_FORCED_ROLLBACK: - trx_start_low(trx, read_write); return; case TRX_STATE_ACTIVE: - if (read_write && trx->id == 0 && !trx->read_only) { trx_set_rw_mode(trx); } @@ -3021,8 +2902,6 @@ trx_start_for_ddl_low( { switch (trx->state) { case TRX_STATE_NOT_STARTED: - case TRX_STATE_FORCED_ROLLBACK: - /* Flag this transaction as a dictionary operation, so that the data dictionary will be locked in crash recovery. */ @@ -3114,146 +2993,3 @@ trx_set_rw_mode( mutex_exit(&trx_sys->mutex); } - -/** -Kill all transactions that are blocking this transaction from acquiring locks. -@param[in,out] trx High priority transaction */ - -void -trx_kill_blocking(trx_t* trx) -{ - if (trx->hit_list.empty()) { - return; - } - - DEBUG_SYNC_C("trx_kill_blocking_enter"); - - ulint had_dict_lock = trx->dict_operation_lock_mode; - - switch (had_dict_lock) { - case 0: - break; - - case RW_S_LATCH: - /* Release foreign key check latch */ - row_mysql_unfreeze_data_dictionary(trx); - break; - - default: - /* There should never be a lock wait when the - dictionary latch is reserved in X mode. Dictionary - transactions should only acquire locks on dictionary - tables, not other tables. All access to dictionary - tables should be covered by dictionary - transactions. */ - ut_error; - } - - ut_a(trx->dict_operation_lock_mode == 0); - - /** Kill the transactions in the lock acquisition order old -> new. */ - hit_list_t::reverse_iterator end = trx->hit_list.rend(); - - for (hit_list_t::reverse_iterator it = trx->hit_list.rbegin(); - it != end; - ++it) { - - trx_t* victim_trx = it->m_trx; - ulint version = it->m_version; - - /* Shouldn't commit suicide. */ - ut_ad(victim_trx != trx); - ut_ad(victim_trx->mysql_thd != trx->mysql_thd); - - /* Check that the transaction isn't active inside - InnoDB code. We have to wait while it is executing - in the InnoDB context. This can potentially take a - long time */ - - trx_mutex_enter(victim_trx); - ut_ad(version <= victim_trx->version); - - ulint loop_count = 0; - /* start with optimistic sleep time of 20 micro seconds. */ - ulint sleep_time = 20; - - while ((victim_trx->in_innodb & TRX_FORCE_ROLLBACK_MASK) > 0 - && victim_trx->version == version) { - - trx_mutex_exit(victim_trx); - - loop_count++; - /* If the wait is long, don't hog the cpu. */ - if (loop_count < 100) { - /* 20 microseconds */ - sleep_time = 20; - } else if (loop_count < 1000) { - /* 1 millisecond */ - sleep_time = 1000; - } else { - /* 100 milliseconds */ - sleep_time = 100000; - } - - os_thread_sleep(sleep_time); - - trx_mutex_enter(victim_trx); - } - - /* Compare the version to check if the transaction has - already finished */ - if (victim_trx->version != version) { - trx_mutex_exit(victim_trx); - continue; - } - - /* We should never kill background transactions. */ - ut_ad(victim_trx->mysql_thd != NULL); - - ut_ad(!(trx->in_innodb & TRX_FORCE_ROLLBACK_DISABLE)); - ut_ad(victim_trx->in_innodb & TRX_FORCE_ROLLBACK); - ut_ad(victim_trx->in_innodb & TRX_FORCE_ROLLBACK_ASYNC); - ut_ad(victim_trx->killed_by == os_thread_get_curr_id()); - ut_ad(victim_trx->version == it->m_version); - - /* We don't kill Read Only, Background or high priority - transactions. */ - ut_a(!victim_trx->read_only); - ut_a(victim_trx->mysql_thd != NULL); - - trx_mutex_exit(victim_trx); - -#ifndef DBUG_OFF - char buffer[1024]; -#endif /* !DBUG_OFF */ - - DBUG_LOG("trx", - "High Priority Transaction " - << trx->id << " killed transaction " - << victim_trx->id << " in hit list" - << " - " - << thd_get_error_context_description( - victim_trx->mysql_thd, - buffer, sizeof(buffer), 512)); - - trx_rollback_for_mysql(victim_trx); - trx_mutex_enter(victim_trx); - - version++; - ut_ad(victim_trx->version == version); - - my_atomic_storelong(&victim_trx->killed_by, 0); - - victim_trx->in_innodb &= TRX_FORCE_ROLLBACK_MASK; - - trx_mutex_exit(victim_trx); - } - - trx->hit_list.clear(); - - if (had_dict_lock) { - - row_mysql_freeze_data_dictionary(trx); - } - -} From 788b3ee86d7959d4c10756a78d6bc5c8ba34396a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 13 Mar 2018 14:06:30 +0200 Subject: [PATCH 063/139] Reduce the diff from 5.7 in DeadlockChecker::search() This is a non-functional change. --- storage/innobase/lock/lock0lock.cc | 97 ++++++++++++++---------------- 1 file changed, 45 insertions(+), 52 deletions(-) diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 213b370ee36..64676d827ad 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -305,8 +305,8 @@ private: /** This is to avoid malloc/free calls. */ static state_t s_states[MAX_STACK_SIZE]; - /** Set if thd_rpl_deadlock_check() should be called for waits. */ - bool m_report_waiters; + /** Set if thd_rpl_deadlock_check() should be called for waits. */ + const bool m_report_waiters; }; /** Counter to mark visited nodes during deadlock search. */ @@ -7611,7 +7611,6 @@ DeadlockChecker::search() const lock_t* lock = get_first_lock(&heap_no); for (;;) { - /* We should never visit the same sub-tree more than once. */ ut_ad(lock == NULL || !is_visited(lock)); @@ -7626,7 +7625,9 @@ DeadlockChecker::search() if (lock == NULL) { break; - } else if (lock == m_wait_lock) { + } + + if (lock == m_wait_lock) { /* We can mark this subtree as searched */ ut_ad(lock->trx->lock.deadlock_mark <= m_mark_start); @@ -7641,62 +7642,58 @@ DeadlockChecker::search() /* Backtrack */ lock = NULL; + continue; + } - } else if (!lock_has_to_wait(m_wait_lock, lock)) { - + if (!lock_has_to_wait(m_wait_lock, lock)) { /* No conflict, next lock */ lock = get_next_lock(lock, heap_no); + continue; + } - } else if (lock->trx == m_start) { - + if (lock->trx == m_start) { /* Found a cycle. */ - notify(lock); + return select_victim(); + } - return(select_victim()); - - } else if (is_too_deep()) { - + if (is_too_deep()) { /* Search too deep to continue. */ m_too_deep = true; - return(m_start); + return m_start; + } - } else { - /* We do not need to report autoinc locks to the upper - layer. These locks are released before commit, so they - can not cause deadlocks with binlog-fixed commit - order. */ - if (m_report_waiters && - (lock_get_type_low(lock) != LOCK_TABLE || - lock_get_mode(lock) != LOCK_AUTO_INC)) { - thd_rpl_deadlock_check(m_start->mysql_thd, - lock->trx->mysql_thd); + /* We do not need to report autoinc locks to the upper + layer. These locks are released before commit, so they + can not cause deadlocks with binlog-fixed commit + order. */ + if (m_report_waiters + && (lock_get_type_low(lock) != LOCK_TABLE + || lock_get_mode(lock) != LOCK_AUTO_INC)) { + thd_rpl_deadlock_check(m_start->mysql_thd, + lock->trx->mysql_thd); + } + + if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { + /* Another trx ahead has requested a lock in an + incompatible mode, and is itself waiting for a lock. */ + + ++m_cost; + + if (!push(lock, heap_no)) { + m_too_deep = true; + return m_start; } - if (lock->trx->lock.que_state == TRX_QUE_LOCK_WAIT) { + m_wait_lock = lock->trx->lock.wait_lock; - /* Another trx ahead has requested a lock in an - incompatible mode, and is itself waiting for a lock. */ + lock = get_first_lock(&heap_no); - ++m_cost; - - if (!push(lock, heap_no)) { - m_too_deep = true; - return(m_start); - } - - - m_wait_lock = lock->trx->lock.wait_lock; - - lock = get_first_lock(&heap_no); - - if (is_visited(lock)) { - lock = get_next_lock(lock, heap_no); - } - - } else { + if (is_visited(lock)) { lock = get_next_lock(lock, heap_no); } + } else { + lock = get_next_lock(lock, heap_no); } } @@ -7783,17 +7780,13 @@ DeadlockChecker::check_and_resolve(const lock_t* lock, trx_t* trx) trx_mutex_exit(trx); const trx_t* victim_trx; - THD* start_mysql_thd; - bool report_waits = false; - - start_mysql_thd = trx->mysql_thd; - - if (start_mysql_thd && thd_need_wait_reports(start_mysql_thd)) - report_waits = true; + const bool report_waiters = trx->mysql_thd + && thd_need_wait_reports(trx->mysql_thd); /* Try and resolve as many deadlocks as possible. */ do { - DeadlockChecker checker(trx, lock, s_lock_mark_counter, report_waits); + DeadlockChecker checker(trx, lock, s_lock_mark_counter, + report_waiters); victim_trx = checker.search(); From cac373f5333ad8dcfbc2b9d512ccc589f241008e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 13 Mar 2018 14:15:46 +0200 Subject: [PATCH 064/139] Add missing #ifdef WITH_WSREP lock_table_create(): Move the WSREP parameter c_lock last, and make it NULL by default, to avoid the need for a wrapper function. lock_table_enqueue_waiting(): Move the WSREP parameter c_lock last. --- storage/innobase/lock/lock0lock.cc | 40 ++++++++++++++++-------------- 1 file changed, 21 insertions(+), 19 deletions(-) diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 64676d827ad..c15e5547d8a 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -3766,12 +3766,15 @@ UNIV_INLINE lock_t* lock_table_create( /*==============*/ - lock_t* c_lock, /*!< in: conflicting lock or NULL */ dict_table_t* table, /*!< in/out: database table in dictionary cache */ ulint type_mode,/*!< in: lock mode possibly ORed with LOCK_WAIT */ - trx_t* trx) /*!< in: trx */ + trx_t* trx /*!< in: trx */ +#ifdef WITH_WSREP + , lock_t* c_lock = NULL /*!< in: conflicting lock */ +#endif + ) { lock_t* lock; @@ -3872,18 +3875,6 @@ lock_table_create( return(lock); } -UNIV_INLINE -lock_t* -lock_table_create( -/*==============*/ - dict_table_t* table, /*!< in/out: database table - in dictionary cache */ - ulint type_mode,/*!< in: lock mode possibly ORed with - LOCK_WAIT */ - trx_t* trx) /*!< in: trx */ -{ - return (lock_table_create(NULL, table, type_mode, trx)); -} /*************************************************************//** Pops autoinc lock requests from the transaction's autoinc_locks. We @@ -4026,11 +4017,14 @@ static dberr_t lock_table_enqueue_waiting( /*=======================*/ - lock_t* c_lock, /*!< in: conflicting lock or NULL */ ulint mode, /*!< in: lock mode this transaction is requesting */ dict_table_t* table, /*!< in/out: table */ - que_thr_t* thr) /*!< in: query thread */ + que_thr_t* thr /*!< in: query thread */ +#ifdef WITH_WSREP + , lock_t* c_lock /*!< in: conflicting lock or NULL */ +#endif +) { trx_t* trx; lock_t* lock; @@ -4060,7 +4054,11 @@ lock_table_enqueue_waiting( #endif /* WITH_WSREP */ /* Enqueue the lock request that will wait to be granted */ - lock = lock_table_create(c_lock, table, mode | LOCK_WAIT, trx); + lock = lock_table_create(table, mode | LOCK_WAIT, trx +#ifdef WITH_WSREP + , c_lock +#endif + ); const trx_t* victim_trx = DeadlockChecker::check_and_resolve(lock, trx); @@ -4215,9 +4213,13 @@ lock_table( mode: this trx may have to wait */ if (wait_for != NULL) { - err = lock_table_enqueue_waiting(wait_for, mode | flags, table, thr); + err = lock_table_enqueue_waiting(mode | flags, table, thr +#ifdef WITH_WSREP + , wait_for +#endif + ); } else { - lock_table_create(wait_for, table, mode | flags, trx); + lock_table_create(table, mode | flags, trx); ut_a(!flags || mode == LOCK_S || mode == LOCK_X); From f93a219c72b6b6c2ba023a002434738ec913ddad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 13 Mar 2018 14:19:03 +0200 Subject: [PATCH 065/139] MDEV-13935 INSERT stuck at state Unlocking tables lock_rec_queue_validate(): Restore some assertions. DeadlockChecker::select_victim(): Reduce the WSREP-related diff. --- storage/innobase/lock/lock0lock.cc | 49 ++++++++++++------------------ 1 file changed, 20 insertions(+), 29 deletions(-) diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index c15e5547d8a..99b771a9887 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -1284,7 +1284,7 @@ wsrep_kill_victim( } wsrep_innobase_kill_one_trx(trx->mysql_thd, - (const trx_t*) trx, lock->trx, TRUE); + trx, lock->trx, TRUE); } } } @@ -5669,22 +5669,20 @@ lock_rec_queue_validate( /* impl_trx cannot be committed until lock_mutex_exit() because lock_trx_release_locks() acquires lock_sys->mutex */ - if (impl_trx != NULL) { - const lock_t* other_lock - = lock_rec_other_has_expl_req( - LOCK_S, block, true, heap_no, - impl_trx); - + if (!impl_trx) { + } else if (const lock_t* other_lock + = lock_rec_other_has_expl_req( + LOCK_S, block, true, heap_no, + impl_trx)) { /* The impl_trx is holding an implicit lock on the given record 'rec'. So there cannot be another explicit granted lock. Also, there can be another explicit waiting lock only if the impl_trx has an explicit granted lock. */ - if (other_lock != NULL) { #ifdef WITH_WSREP - if (wsrep_on(other_lock->trx->mysql_thd) && !lock_get_wait(other_lock) ) { - + if (wsrep_on(other_lock->trx->mysql_thd)) { + if (!lock_get_wait(other_lock) ) { ib::info() << "WSREP impl BF lock conflict for my impl lock:\n BF:" << ((wsrep_thd_is_BF(impl_trx->mysql_thd, FALSE)) ? "BF" : "normal") << " exec: " << wsrep_thd_exec_mode(impl_trx->mysql_thd) << " conflict: " << @@ -5702,18 +5700,16 @@ lock_rec_queue_validate( wsrep_thd_query(otrx->mysql_thd); } - if (wsrep_on(other_lock->trx->mysql_thd) && !lock_rec_has_expl( - LOCK_X | LOCK_REC_NOT_GAP, - block, heap_no, impl_trx)) { + if (!lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, + block, heap_no, + impl_trx)) { ib::info() << "WSREP impl BF lock conflict"; } -#else /* !WITH_WSREP */ - ut_a(lock_get_wait(other_lock)); - ut_a(lock_rec_has_expl( - LOCK_X | LOCK_REC_NOT_GAP, - block, heap_no, impl_trx)); + } else #endif /* WITH_WSREP */ - } + ut_ad(lock_get_wait(other_lock)); + ut_ad(lock_rec_has_expl(LOCK_X | LOCK_REC_NOT_GAP, + block, heap_no, impl_trx)); } } @@ -7575,23 +7571,18 @@ DeadlockChecker::select_victim() const #ifdef WITH_WSREP if (wsrep_thd_is_BF(m_start->mysql_thd, TRUE)) { return(m_wait_lock->trx); - } else { -#endif /* WITH_WSREP */ - return(m_start); -#ifdef WITH_WSREP } -#endif +#endif /* WITH_WSREP */ + return(m_start); } #ifdef WITH_WSREP if (wsrep_thd_is_BF(m_wait_lock->trx->mysql_thd, TRUE)) { return(m_start); - } else { -#endif /* WITH_WSREP */ - return(m_wait_lock->trx); -#ifdef WITH_WSREP } -#endif +#endif /* WITH_WSREP */ + + return(m_wait_lock->trx); } /** Looks iteratively for a deadlock. Note: the joining transaction may From 27d4333cb9f7ab7c6738bdfe0aea79a63dc62a51 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 13 Mar 2018 17:37:03 +0200 Subject: [PATCH 066/139] MDEV-13935 INSERT stuck at state Unlocking tables Refactor lock_grant(). With innodb_lock_schedule_algorithm=VATS some callers were passing an incorrect parameter owns_trx_mutex to lock_grant(). lock_grant_after_reset(): Refactored from lock_grant(), without the call to lock_reset_lock_and_trx_wait(). lock_grant_have_trx_mutex(): A variant of lock_grant() where the caller already holds the lock->trx->mutex. The normal lock_grant() will acquire and release lock->trx->mutex. lock_grant(): Define as a wrapper that will acquire lock->trx->mutex. --- storage/innobase/lock/lock0lock.cc | 137 ++++++++++++----------------- 1 file changed, 55 insertions(+), 82 deletions(-) diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 99b771a9887..731b6ef5ed0 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -80,15 +80,9 @@ lock_rec_has_to_wait_in_queue( /*==========================*/ const lock_t* wait_lock); /*!< in: waiting record lock */ -/*************************************************************//** -Grants a lock to a waiting lock request and releases the waiting transaction. -The caller must hold lock_sys->mutex. */ -static -void -lock_grant( -/*=======*/ - lock_t* lock, /*!< in/out: waiting lock request */ - bool owns_trx_mutex); /*!< in: whether lock->trx->mutex is owned */ +/** Grant a lock to a waiting lock request and release the waiting transaction +after lock_reset_lock_and_trx_wait() has been called. */ +static void lock_grant_after_reset(lock_t* lock); extern "C" void thd_rpl_deadlock_check(MYSQL_THD thd, MYSQL_THD other_thd); extern "C" int thd_need_wait_reports(const MYSQL_THD thd); @@ -691,6 +685,12 @@ lock_reset_lock_and_trx_wait( lock->type_mode &= ~LOCK_WAIT; } +static inline void lock_grant_have_trx_mutex(lock_t* lock) +{ + lock_reset_lock_and_trx_wait(lock); + lock_grant_after_reset(lock); +} + /*********************************************************************//** Gets the gap flag of a record lock. @return LOCK_GAP or 0 */ @@ -1772,7 +1772,7 @@ lock_rec_insert_by_trx_age( cell->node = in_lock; in_lock->hash = node; if (lock_get_wait(in_lock)) { - lock_grant(in_lock, true); + lock_grant_have_trx_mutex(in_lock); return DB_SUCCESS_LOCKED_REC; } return DB_SUCCESS; @@ -1786,7 +1786,7 @@ lock_rec_insert_by_trx_age( in_lock->hash = next; if (lock_get_wait(in_lock) && !lock_rec_has_to_wait_in_queue(in_lock)) { - lock_grant(in_lock, true); + lock_grant_have_trx_mutex(in_lock); if (cell->node != in_lock) { // Move it to the front of the queue node->hash = in_lock->hash; @@ -2380,24 +2380,12 @@ lock_rec_has_to_wait_in_queue( return(NULL); } -/*************************************************************//** -Grants a lock to a waiting lock request and releases the waiting transaction. -The caller must hold lock_sys->mutex but not lock->trx->mutex. */ -static -void -lock_grant( -/*=======*/ - lock_t* lock, /*!< in/out: waiting lock request */ - bool owns_trx_mutex) /*!< in: whether lock->trx->mutex is owned */ +/** Grant a lock to a waiting lock request and release the waiting transaction +after lock_reset_lock_and_trx_wait() has been called. */ +static void lock_grant_after_reset(lock_t* lock) { ut_ad(lock_mutex_own()); - ut_ad(trx_mutex_own(lock->trx) == owns_trx_mutex); - - lock_reset_lock_and_trx_wait(lock); - - if (!owns_trx_mutex) { - trx_mutex_enter(lock->trx); - } + ut_ad(trx_mutex_own(lock->trx)); if (lock_get_mode(lock) == LOCK_AUTO_INC) { dict_table_t* table = lock->un_member.tab_lock.table; @@ -2429,10 +2417,15 @@ lock_grant( lock_wait_release_thread_if_suspended(thr); } } +} - if (!owns_trx_mutex) { - trx_mutex_exit(lock->trx); - } +/** Grant a lock to a waiting lock request and release the waiting transaction. */ +static void lock_grant(lock_t* lock) +{ + lock_reset_lock_and_trx_wait(lock); + trx_mutex_enter(lock->trx); + lock_grant_after_reset(lock); + trx_mutex_exit(lock->trx); } /*************************************************************//** @@ -2472,17 +2465,13 @@ lock_rec_cancel( static void -lock_grant_and_move_on_page( - hash_table_t* lock_hash, - ulint space, - ulint page_no) +lock_grant_and_move_on_page(ulint rec_fold, ulint space, ulint page_no) { lock_t* lock; - lock_t* previous; - ulint rec_fold = lock_rec_fold(space, page_no); - - previous = (lock_t *) hash_get_nth_cell(lock_hash, - hash_calc_hash(rec_fold, lock_hash))->node; + lock_t* previous = static_cast( + hash_get_nth_cell(lock_sys->rec_hash, + hash_calc_hash(rec_fold, lock_sys->rec_hash)) + ->node); if (previous == NULL) { return; } @@ -2502,14 +2491,13 @@ lock_grant_and_move_on_page( ut_ad(previous->hash == lock || previous == lock); /* Grant locks if there are no conflicting locks ahead. Move granted locks to the head of the list. */ - for (;lock != NULL;) { + while (lock) { /* If the lock is a wait lock on this page, and it does not need to wait. */ - if ((lock->un_member.rec_lock.space == space) - && (lock->un_member.rec_lock.page_no == page_no) - && lock_get_wait(lock) - && !lock_rec_has_to_wait_in_queue(lock)) { - - lock_grant(lock, false); + if (lock_get_wait(lock) + && lock->un_member.rec_lock.space == space + && lock->un_member.rec_lock.page_no == page_no + && !lock_rec_has_to_wait_in_queue(lock)) { + lock_grant(lock); if (previous != NULL) { /* Move the lock to the head of the list. */ @@ -2528,33 +2516,20 @@ lock_grant_and_move_on_page( } } -/*************************************************************//** -Removes a record lock request, waiting or granted, from the queue and -grants locks to other transactions in the queue if they now are entitled -to a lock. NOTE: all record locks contained in in_lock are removed. */ -static -void -lock_rec_dequeue_from_page( -/*=======================*/ - lock_t* in_lock) /*!< in: record lock object: all - record locks which are contained in - this lock object are removed; - transactions waiting behind will - get their lock requests granted, - if they are now qualified to it */ +/** Remove a record lock request, waiting or granted, from the queue and +grant locks to other transactions in the queue if they now are entitled +to a lock. NOTE: all record locks contained in in_lock are removed. +@param[in,out] in_lock record lock */ +static void lock_rec_dequeue_from_page(lock_t* in_lock) { ulint space; ulint page_no; - lock_t* lock; - trx_lock_t* trx_lock; hash_table_t* lock_hash; ut_ad(lock_mutex_own()); ut_ad(lock_get_type_low(in_lock) == LOCK_REC); /* We may or may not be holding in_lock->trx->mutex here. */ - trx_lock = &in_lock->trx->lock; - space = in_lock->un_member.rec_lock.space; page_no = in_lock->un_member.rec_lock.page_no; @@ -2562,38 +2537,36 @@ lock_rec_dequeue_from_page( lock_hash = lock_hash_get(in_lock->type_mode); - HASH_DELETE(lock_t, hash, lock_hash, - lock_rec_fold(space, page_no), in_lock); + ulint rec_fold = lock_rec_fold(space, page_no); - UT_LIST_REMOVE(trx_lock->trx_locks, in_lock); + HASH_DELETE(lock_t, hash, lock_hash, rec_fold, in_lock); + UT_LIST_REMOVE(in_lock->trx->lock.trx_locks, in_lock); MONITOR_INC(MONITOR_RECLOCK_REMOVED); MONITOR_DEC(MONITOR_NUM_RECLOCK); if (innodb_lock_schedule_algorithm - == INNODB_LOCK_SCHEDULE_ALGORITHM_FCFS || - thd_is_replication_slave_thread(in_lock->trx->mysql_thd)) { - + == INNODB_LOCK_SCHEDULE_ALGORITHM_FCFS + || lock_hash != lock_sys->rec_hash + || thd_is_replication_slave_thread(in_lock->trx->mysql_thd)) { /* Check if waiting locks in the queue can now be granted: grant locks if there are no conflicting locks ahead. Stop at the first X lock that is waiting or has been granted. */ - for (lock = lock_rec_get_first_on_page_addr(lock_hash, space, - page_no); - lock != NULL; - lock = lock_rec_get_next_on_page(lock)) { + for (lock_t* lock = lock_rec_get_first_on_page_addr( + lock_hash, space, page_no); + lock != NULL; + lock = lock_rec_get_next_on_page(lock)) { if (lock_get_wait(lock) - && !lock_rec_has_to_wait_in_queue(lock)) { - + && !lock_rec_has_to_wait_in_queue(lock)) { /* Grant the lock */ ut_ad(lock->trx != in_lock->trx); - - lock_grant(lock, false); + lock_grant(lock); } } } else { - lock_grant_and_move_on_page(lock_hash, space, page_no); + lock_grant_and_move_on_page(rec_fold, space, page_no); } } @@ -4322,7 +4295,7 @@ lock_table_dequeue( /* Grant the lock */ ut_ad(in_lock->trx != lock->trx); - lock_grant(lock, false); + lock_grant(lock); } } } @@ -4424,7 +4397,7 @@ lock_grant_and_move_on_rec( && lock_get_wait(lock) && !lock_rec_has_to_wait_in_queue(lock)) { - lock_grant(lock, false); + lock_grant(lock); if (previous != NULL) { /* Move the lock to the head of the list. */ @@ -4516,7 +4489,7 @@ released: /* Grant the lock */ ut_ad(trx != lock->trx); - lock_grant(lock, false); + lock_grant(lock); } } } else { From 61e192fa40f85e219f9ab42d2f47bd5633ca8ff1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 14 Mar 2018 09:33:38 +0200 Subject: [PATCH 067/139] lock_reset_lock_and_trx_wait(): Remove diagnostics --- storage/innobase/lock/lock0lock.cc | 40 ++---------------------------- 1 file changed, 2 insertions(+), 38 deletions(-) diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 731b6ef5ed0..900ea4f91ed 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -643,44 +643,8 @@ lock_reset_lock_and_trx_wait( { ut_ad(lock_get_wait(lock)); ut_ad(lock_mutex_own()); - - if (lock->trx->lock.wait_lock && - lock->trx->lock.wait_lock != lock) { - const char* stmt=NULL; - const char* stmt2=NULL; - size_t stmt_len; - trx_id_t trx_id = 0; - stmt = lock->trx->mysql_thd - ? innobase_get_stmt_unsafe( - lock->trx->mysql_thd, &stmt_len) - : NULL; - - if (lock->trx->lock.wait_lock && - lock->trx->lock.wait_lock->trx) { - trx_id = lock->trx->lock.wait_lock->trx->id; - stmt2 = lock->trx->lock.wait_lock->trx->mysql_thd - ? innobase_get_stmt_unsafe( - lock->trx->lock.wait_lock - ->trx->mysql_thd, &stmt_len) - : NULL; - } - - ib::error() << - "Trx id " << ib::hex(lock->trx->id) - << " is waiting a lock " - << " for this trx id " << ib::hex(trx_id) - << " wait_lock " << lock->trx->lock.wait_lock; - if (stmt) { - ib::info() << " SQL1: " << stmt; - } - - if (stmt2) { - ib::info() << " SQL2: " << stmt2; - } - - ut_ad(0); - } - + ut_ad(lock->trx->lock.wait_lock == NULL + || lock->trx->lock.wait_lock == lock); lock->trx->lock.wait_lock = NULL; lock->type_mode &= ~LOCK_WAIT; } From d2a15092c194f47f54f8f79098747f6f1878fde4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 14 Mar 2018 09:39:47 +0200 Subject: [PATCH 068/139] lock_table_create(), lock_rec_create(): Clean up the WSREP code By definition, c_lock->trx->lock.wait_lock==c_lock cannot hold. That is, the owner transaction of a lock cannot be waiting for that particular lock. It must have been waiting for some other lock. Remove the dead code related to that. Also, test c_lock for NULLness only once. --- storage/innobase/lock/lock0lock.cc | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index 900ea4f91ed..ae04ed505f7 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -639,7 +639,7 @@ UNIV_INLINE void lock_reset_lock_and_trx_wait( /*=========================*/ - lock_t* lock) /*!< in/out: record lock */ + lock_t* lock) /*!< in/out: record lock */ { ut_ad(lock_get_wait(lock)); ut_ad(lock_mutex_own()); @@ -1626,13 +1626,6 @@ lock_rec_create_low( trx_mutex_enter(trx); } - /* trx might not wait for c_lock, but some other lock - does not matter if wait_lock was released above - */ - if (c_lock->trx->lock.wait_lock == c_lock) { - lock_reset_lock_and_trx_wait(lock); - } - trx_mutex_exit(c_lock->trx); if (wsrep_debug) { From 27c54b77c1aad3a8a2d4df1dfd9194e128374e2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 14 Mar 2018 10:00:19 +0200 Subject: [PATCH 069/139] Make some locking primitives inline lock_rec_trx_wait(): Merge to the only caller lock_prdt_rec_move(). lock_rec_reset_nth_bit(), lock_set_lock_and_trx_wait(), lock_reset_lock_and_trx_wait(): Define in lock0priv.h. --- storage/innobase/include/lock0lock.h | 10 ---- storage/innobase/include/lock0priv.h | 52 +++++++++++++++++- storage/innobase/lock/lock0lock.cc | 79 ---------------------------- storage/innobase/lock/lock0prdt.cc | 3 +- 4 files changed, 52 insertions(+), 92 deletions(-) diff --git a/storage/innobase/include/lock0lock.h b/storage/innobase/include/lock0lock.h index ddbbdeebfee..c9500f7fd49 100644 --- a/storage/innobase/include/lock0lock.h +++ b/storage/innobase/include/lock0lock.h @@ -1064,16 +1064,6 @@ lock_rec_free_all_from_discard_page( /*================================*/ const buf_block_t* block); /*!< in: page to be discarded */ -/** Reset the nth bit of a record lock. -@param[in,out] lock record lock -@param[in] i index of the bit that will be reset -@param[in] type whether the lock is in wait mode */ -void -lock_rec_trx_wait( - lock_t* lock, - ulint i, - ulint type); - /** The lock system */ extern lock_sys_t* lock_sys; diff --git a/storage/innobase/include/lock0priv.h b/storage/innobase/include/lock0priv.h index 0f35e0ca6d0..1c297a5b945 100644 --- a/storage/innobase/include/lock0priv.h +++ b/storage/innobase/include/lock0priv.h @@ -35,9 +35,8 @@ those functions in lock/ */ #endif #include "univ.i" -#include "dict0types.h" #include "hash0hash.h" -#include "trx0types.h" +#include "trx0trx.h" #ifndef UINT32_MAX #define UINT32_MAX (4294967295U) @@ -643,6 +642,28 @@ lock_rec_set_nth_bit( lock_t* lock, /*!< in: record lock */ ulint i); /*!< in: index of the bit */ +/** Reset the nth bit of a record lock. +@param[in,out] lock record lock +@param[in] i index of the bit that will be reset +@return previous value of the bit */ +inline byte lock_rec_reset_nth_bit(lock_t* lock, ulint i) +{ + ut_ad(lock_get_type_low(lock) == LOCK_REC); + ut_ad(i < lock->un_member.rec_lock.n_bits); + + byte* b = reinterpret_cast(&lock[1]) + (i >> 3); + byte mask = byte(1U << (i & 7)); + byte bit = *b & mask; + *b &= ~mask; + + if (bit != 0) { + ut_ad(lock->trx->lock.n_rec_locks > 0); + --lock->trx->lock.n_rec_locks; + } + + return(bit); +} + /*********************************************************************//** Gets the first or next record lock on a page. @return next lock, NULL if none exists */ @@ -770,6 +791,33 @@ lock_table_has( const dict_table_t* table, /*!< in: table */ enum lock_mode mode); /*!< in: lock mode */ +/** Set the wait status of a lock. +@param[in,out] lock lock that will be waited for +@param[in,out] trx transaction that will wait for the lock */ +inline void lock_set_lock_and_trx_wait(lock_t* lock, trx_t* trx) +{ + ut_ad(lock); + ut_ad(lock->trx == trx); + ut_ad(trx->lock.wait_lock == NULL); + ut_ad(lock_mutex_own()); + ut_ad(trx_mutex_own(trx)); + + trx->lock.wait_lock = lock; + lock->type_mode |= LOCK_WAIT; +} + +/** Reset the wait status of a lock. +@param[in,out] lock lock that was possibly being waited for */ +inline void lock_reset_lock_and_trx_wait(lock_t* lock) +{ + ut_ad(lock_get_wait(lock)); + ut_ad(lock_mutex_own()); + ut_ad(lock->trx->lock.wait_lock == NULL + || lock->trx->lock.wait_lock == lock); + lock->trx->lock.wait_lock = NULL; + lock->type_mode &= ~LOCK_WAIT; +} + #include "lock0priv.ic" #endif /* lock0priv_h */ diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index ae04ed505f7..240510ffa59 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -613,42 +613,6 @@ lock_get_size(void) return((ulint) sizeof(lock_t)); } -/*********************************************************************//** -Sets the wait flag of a lock and the back pointer in trx to lock. */ -UNIV_INLINE -void -lock_set_lock_and_trx_wait( -/*=======================*/ - lock_t* lock, /*!< in: lock */ - trx_t* trx) /*!< in/out: trx */ -{ - ut_ad(lock); - ut_ad(lock->trx == trx); - ut_ad(trx->lock.wait_lock == NULL); - ut_ad(lock_mutex_own()); - ut_ad(trx_mutex_own(trx)); - - trx->lock.wait_lock = lock; - lock->type_mode |= LOCK_WAIT; -} - -/**********************************************************************//** -The back pointer to a waiting lock request in the transaction is set to NULL -and the wait bit in lock type_mode is reset. */ -UNIV_INLINE -void -lock_reset_lock_and_trx_wait( -/*=========================*/ - lock_t* lock) /*!< in/out: record lock */ -{ - ut_ad(lock_get_wait(lock)); - ut_ad(lock_mutex_own()); - ut_ad(lock->trx->lock.wait_lock == NULL - || lock->trx->lock.wait_lock == lock); - lock->trx->lock.wait_lock = NULL; - lock->type_mode &= ~LOCK_WAIT; -} - static inline void lock_grant_have_trx_mutex(lock_t* lock) { lock_reset_lock_and_trx_wait(lock); @@ -942,49 +906,6 @@ lock_rec_find_set_bit( return(ULINT_UNDEFINED); } -/** Reset the nth bit of a record lock. -@param[in,out] lock record lock -@param[in] i index of the bit that will be reset -@return previous value of the bit */ -UNIV_INLINE -byte -lock_rec_reset_nth_bit( - lock_t* lock, - ulint i) -{ - ut_ad(lock_get_type_low(lock) == LOCK_REC); - ut_ad(i < lock->un_member.rec_lock.n_bits); - - byte* b = reinterpret_cast(&lock[1]) + (i >> 3); - byte mask = static_cast(1U << (i & 7)); - byte bit = *b & mask; - *b &= ~mask; - - if (bit != 0) { - ut_ad(lock->trx->lock.n_rec_locks > 0); - --lock->trx->lock.n_rec_locks; - } - - return(bit); -} - -/** Reset the nth bit of a record lock. -@param[in,out] lock record lock -@param[in] i index of the bit that will be reset -@param[in] type whether the lock is in wait mode */ -void -lock_rec_trx_wait( - lock_t* lock, - ulint i, - ulint type) -{ - lock_rec_reset_nth_bit(lock, i); - - if (type & LOCK_WAIT) { - lock_reset_lock_and_trx_wait(lock); - } -} - /*********************************************************************//** Determines if there are explicit record locks on a page. @return an explicit record lock on the page, or NULL if there are none */ diff --git a/storage/innobase/lock/lock0prdt.cc b/storage/innobase/lock/lock0prdt.cc index 23a46a002be..5843508741d 100644 --- a/storage/innobase/lock/lock0prdt.cc +++ b/storage/innobase/lock/lock0prdt.cc @@ -1028,7 +1028,8 @@ lock_prdt_rec_move( const ulint type_mode = lock->type_mode; lock_prdt_t* lock_prdt = lock_get_prdt_from_lock(lock); - lock_rec_trx_wait(lock, PRDT_HEAPNO, type_mode); + lock_rec_reset_nth_bit(lock, PRDT_HEAPNO); + lock_reset_lock_and_trx_wait(lock); lock_prdt_add_to_queue( type_mode, receiver, lock->index, lock->trx, From 6d1d5c3aeb7100429036107d95787ea529f25e91 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Fri, 16 Mar 2018 20:55:55 +0530 Subject: [PATCH 070/139] MDEV-14545 Backup fails due to MLOG_INDEX_LOAD record - Fixed the asan failure of the unsupported_redo test case --- extra/mariabackup/xtrabackup.cc | 2 ++ 1 file changed, 2 insertions(+) diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index c5bdec35f90..78c0ae7d944 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -480,10 +480,12 @@ static bool backup_includes(space_id_t space_id) "backup operation.\n"); } + datafiles_iter_free(it); return false; } } + datafiles_iter_free(it); return true; } From e3dd9a95e50ef2019435b01bd9e161d552673a28 Mon Sep 17 00:00:00 2001 From: Varun Gupta Date: Fri, 16 Mar 2018 18:57:21 +0530 Subject: [PATCH 071/139] MDEV-6736: Valgrind warnings 'Invalid read' in subselect_engine::calc_const_tables with SQ in WHERE and HAVING, ORDER BY, materialization+semijoin During cleanup a pointer to the materialised table that was freed was not set to NULL --- mysql-test/r/having.result | 14 ++++++++++++++ mysql-test/t/having.test | 18 ++++++++++++++++++ sql/sql_select.cc | 4 +++- 3 files changed, 35 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/having.result b/mysql-test/r/having.result index 0b933427303..c509458d830 100644 --- a/mysql-test/r/having.result +++ b/mysql-test/r/having.result @@ -723,4 +723,18 @@ SELECT * FROM t1 JOIN t2 ON c1 = c2 HAVING c2 > 'a' ORDER BY c2 LIMIT 1; c1 c2 x x DROP TABLE t1,t2; +# +# MDEV-6736: Valgrind warnings 'Invalid read' in subselect_engine::calc_const_tables with SQ +# in WHERE and HAVING, ORDER BY, materialization+semijoin +# +CREATE TABLE t1 (a INT) ENGINE=MyISAM; +INSERT INTO t1 VALUES (3),(8); +CREATE TABLE t2 (b INT) ENGINE=MyISAM; +INSERT INTO t2 VALUES (2),(1); +SELECT a FROM t1 +WHERE 9 IN ( SELECT MIN( a ) FROM t1 ) +HAVING a <> ( SELECT COUNT(*) FROM t2 ) +ORDER BY a; +a +DROP TABLE t1,t2; End of 10.0 tests diff --git a/mysql-test/t/having.test b/mysql-test/t/having.test index 1682fe5b874..51cf3fb426d 100644 --- a/mysql-test/t/having.test +++ b/mysql-test/t/having.test @@ -759,4 +759,22 @@ SELECT * FROM t1 JOIN t2 ON c1 = c2 HAVING c2 > 'a' ORDER BY c2 LIMIT 1; DROP TABLE t1,t2; +--echo # +--echo # MDEV-6736: Valgrind warnings 'Invalid read' in subselect_engine::calc_const_tables with SQ +--echo # in WHERE and HAVING, ORDER BY, materialization+semijoin +--echo # + +CREATE TABLE t1 (a INT) ENGINE=MyISAM; +INSERT INTO t1 VALUES (3),(8); + +CREATE TABLE t2 (b INT) ENGINE=MyISAM; +INSERT INTO t2 VALUES (2),(1); + +SELECT a FROM t1 +WHERE 9 IN ( SELECT MIN( a ) FROM t1 ) +HAVING a <> ( SELECT COUNT(*) FROM t2 ) +ORDER BY a; + +DROP TABLE t1,t2; + --echo End of 10.0 tests diff --git a/sql/sql_select.cc b/sql/sql_select.cc index fd8ff6eb016..37d68c730dd 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -11476,13 +11476,15 @@ void JOIN_TAB::cleanup() } else { + TABLE_LIST *tmp= table->pos_in_table_list; end_read_record(&read_record); - table->pos_in_table_list->jtbm_subselect->cleanup(); + tmp->jtbm_subselect->cleanup(); /* The above call freed the materializedd temptable. Set it to NULL so that we don't attempt to touch it if JOIN_TAB::cleanup() is invoked multiple times (it may be) */ + tmp->table= NULL; table=NULL; } DBUG_VOID_RETURN; From 34b03da2114892d96af651a953f6c2527437ad15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Sun, 18 Mar 2018 15:11:48 +0200 Subject: [PATCH 072/139] Update Connector/C --- libmariadb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libmariadb b/libmariadb index 67cc3438a84..668757aaa9a 160000 --- a/libmariadb +++ b/libmariadb @@ -1 +1 @@ -Subproject commit 67cc3438a84df9fa3cc0cfbf9ed81242502702da +Subproject commit 668757aaa9a55d2bcd806908cb5a8e806cd6dc31 From e5e83249c15927576d0d6066bfce32d5a5e103c4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Thu, 15 Feb 2018 10:34:01 +0200 Subject: [PATCH 073/139] MDEV-14875: galera_new_cluster crashes mysqld when existing server contains databases Fortify wsrep_hton so that wsrep calls are not done to NULL-pointers. --- sql/wsrep_hton.cc | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sql/wsrep_hton.cc b/sql/wsrep_hton.cc index 75c1526cb15..944c8d83b76 100644 --- a/sql/wsrep_hton.cc +++ b/sql/wsrep_hton.cc @@ -120,7 +120,7 @@ void wsrep_post_commit(THD* thd, bool all) case LOCAL_COMMIT: { DBUG_ASSERT(thd->wsrep_trx_meta.gtid.seqno != WSREP_SEQNO_UNDEFINED); - if (wsrep->post_commit(wsrep, &thd->wsrep_ws_handle)) + if (wsrep && wsrep->post_commit(wsrep, &thd->wsrep_ws_handle)) { DBUG_PRINT("wsrep", ("set committed fail")); WSREP_WARN("set committed fail: %llu %d", @@ -252,7 +252,7 @@ static int wsrep_rollback(handlerton *hton, THD *thd, bool all) if ((all || !thd_test_options(thd, OPTION_NOT_AUTOCOMMIT | OPTION_BEGIN)) && (thd->variables.wsrep_on && thd->wsrep_conflict_state != MUST_REPLAY)) { - if (wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle)) + if (wsrep && wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle)) { DBUG_PRINT("wsrep", ("setting rollback fail")); WSREP_ERROR("settting rollback fail: thd: %llu, schema: %s, SQL: %s", @@ -294,7 +294,7 @@ int wsrep_commit(handlerton *hton, THD *thd, bool all) possible changes to clean state. */ if (WSREP_PROVIDER_EXISTS) { - if (wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle)) + if (wsrep && wsrep->post_rollback(wsrep, &thd->wsrep_ws_handle)) { DBUG_PRINT("wsrep", ("setting rollback fail")); WSREP_ERROR("settting rollback fail: thd: %llu, schema: %s, SQL: %s", @@ -471,7 +471,7 @@ wsrep_run_wsrep_commit(THD *thd, bool all) } else if (!rcode) { - if (WSREP_OK == rcode) + if (WSREP_OK == rcode && wsrep) rcode = wsrep->pre_commit(wsrep, (wsrep_conn_id_t)thd->thread_id, &thd->wsrep_ws_handle, From f46155a31b97b3458fb6748e945c223c901f642d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Wed, 14 Feb 2018 13:52:37 +0200 Subject: [PATCH 074/139] MDEV-13549: Galera test failures Fix test failure on galera_concurrent_ctas --- mysql-test/suite/galera/disabled.def | 1 - mysql-test/suite/galera/t/galera_concurrent_ctas.test | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 6d4574c0403..19105e88881 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -30,7 +30,6 @@ galera_gcache_recover_manytrx : MDEV-13549 Galera test failures galera_ist_mysqldump : MDEV-13549 Galera test failures mysql-wsrep#31 : MDEV-13549 Galera test failures galera_migrate : MariaDB 10.0 does not support START SLAVE USER -galera_concurrent_ctas : MDEV-13549 Galera test failures galera_wsrep_desync_wsrep_on : MDEV-13549 Galera test failures galera_ssl_upgrade : MDEV-13549 Galera test failures mysql-wsrep#33 : MDEV-13549 Galera test failures diff --git a/mysql-test/suite/galera/t/galera_concurrent_ctas.test b/mysql-test/suite/galera/t/galera_concurrent_ctas.test index f0dcf8e4900..6c4e8be68a7 100644 --- a/mysql-test/suite/galera/t/galera_concurrent_ctas.test +++ b/mysql-test/suite/galera/t/galera_concurrent_ctas.test @@ -43,9 +43,9 @@ let $run=10; while($run) { --error 0,1 - exec $MYSQL --user=root --host=127.0.0.1 --port=$NODE_MYPORT_1 test - < $MYSQLTEST_VARDIR/tmp/galera_concurrent.sql & - $MYSQL --user=root --host=127.0.0.1 --port=$NODE_MYPORT_2 test + exec $MYSQL --user=root --host=127.0.0.1 --port=$NODE_MYPORT_1 test \ + < $MYSQLTEST_VARDIR/tmp/galera_concurrent.sql & \ + $MYSQL --user=root --host=127.0.0.1 --port=$NODE_MYPORT_2 test \ < $MYSQLTEST_VARDIR/tmp/galera_concurrent.sql; dec $run; } From 31e2ab513d1d0d6caad96f613cbce3ad25c19497 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Wed, 14 Feb 2018 09:48:29 +0200 Subject: [PATCH 075/139] MDEV-13549: Galera test failures Fix test failure on galera_flush_local. --- mysql-test/suite/galera/disabled.def | 1 - .../suite/galera/include/have_wsrep_replicate_myisam.inc | 4 ++++ mysql-test/suite/galera/r/have_wsrep_replicate_myisam.require | 2 ++ mysql-test/suite/galera/t/galera_flush_local.opt | 4 +++- mysql-test/suite/galera/t/galera_flush_local.test | 1 + 5 files changed, 10 insertions(+), 2 deletions(-) create mode 100644 mysql-test/suite/galera/include/have_wsrep_replicate_myisam.inc create mode 100644 mysql-test/suite/galera/r/have_wsrep_replicate_myisam.require diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 19105e88881..da268abe0d2 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -10,7 +10,6 @@ # ############################################################################## galera_gra_log : MDEV-13549 Galera test failures -galera_flush_local : MDEV-13549 Galera test failures galera_flush : MDEV-13549 Galera test failures galera_account_management : MariaDB 10.0 does not support ALTER USER galera_binlog_row_image : MariaDB 10.0 does not support binlog_row_image diff --git a/mysql-test/suite/galera/include/have_wsrep_replicate_myisam.inc b/mysql-test/suite/galera/include/have_wsrep_replicate_myisam.inc new file mode 100644 index 00000000000..726fc6e2b18 --- /dev/null +++ b/mysql-test/suite/galera/include/have_wsrep_replicate_myisam.inc @@ -0,0 +1,4 @@ +--require suite/galera/r/have_wsrep_replicate_myisam.require +disable_query_log; +SHOW VARIABLES LIKE 'wsrep_replicate_myisam'; +enable_query_log; diff --git a/mysql-test/suite/galera/r/have_wsrep_replicate_myisam.require b/mysql-test/suite/galera/r/have_wsrep_replicate_myisam.require new file mode 100644 index 00000000000..c55610fd049 --- /dev/null +++ b/mysql-test/suite/galera/r/have_wsrep_replicate_myisam.require @@ -0,0 +1,2 @@ +Variable_name Value +wsrep_replicate_myisam ON diff --git a/mysql-test/suite/galera/t/galera_flush_local.opt b/mysql-test/suite/galera/t/galera_flush_local.opt index 5a1fb6748d9..a084db15c5d 100644 --- a/mysql-test/suite/galera/t/galera_flush_local.opt +++ b/mysql-test/suite/galera/t/galera_flush_local.opt @@ -1 +1,3 @@ ---query_cache_type=1 --query_cache_size=1000000 +--query_cache_type=1 +--query_cache_size=1000000 +--wsrep_replicate_myisam=ON diff --git a/mysql-test/suite/galera/t/galera_flush_local.test b/mysql-test/suite/galera/t/galera_flush_local.test index 768f4ea4f1b..24acd9ec4ff 100644 --- a/mysql-test/suite/galera/t/galera_flush_local.test +++ b/mysql-test/suite/galera/t/galera_flush_local.test @@ -5,6 +5,7 @@ --source include/galera_cluster.inc --source include/have_innodb.inc --source include/have_query_cache.inc +--source include/have_wsrep_replicate_myisam.inc --disable_warnings DROP TABLE IF EXISTS t1, t2, x1, x2; From f538a64817ce583fcce558303ae9d9b6aeecf838 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Mon, 19 Mar 2018 13:07:41 +0400 Subject: [PATCH 076/139] MDEV-15005 ASAN: stack-buffer-overflow in my_strnncollsp_simple cmp_item_sort_string::store_value() did not cache the string returned from item->val_str(), whose result can point to various private members such as Item_char_typecast::tmp_value. - cmp_item_sort_string::store_value() remembered the pointer returned from item->val_str() poiting to tmp_value into cmp_item_string::value_res. - Later, cmp_item_real::store_value() was called, which called Item_str_func::val_real(), which called Item_char_typecast::val_str(&tmp) using a local stack variable "String tmp". Item_char_typecast::tmp_value was overwritten and become a link to "tmp": tmp_value.Ptr freed its own buffer and set to point to the buffer owned by "tmp". - On return from Item_str_func::val_real(), "String tmp" was destructed, but "tmp_value" still pointed to the buffer owned by "tmp", So tmp_value.Ptr became invalid. - Then cmp_item_sort_string() passed cmp_item_string::value_res to sortcmp(). At this point, value_res still pointed to an invalid value of Item_char_typecast::tmp_value. Fix: changing cmp_item_sort_string::store_value() to force copying to cmp_item_string::value if item->val_str(&value) returned a different pointer (instead of &value). --- mysql-test/r/ctype_latin1.result | 13 +++++++++++++ mysql-test/r/ctype_utf8.result | 13 +++++++++++++ mysql-test/t/ctype_latin1.test | 9 +++++++++ mysql-test/t/ctype_utf8.test | 10 ++++++++++ sql/item_cmpfunc.h | 7 +++++++ 5 files changed, 52 insertions(+) diff --git a/mysql-test/r/ctype_latin1.result b/mysql-test/r/ctype_latin1.result index 85035982cf9..66c5a37750d 100644 --- a/mysql-test/r/ctype_latin1.result +++ b/mysql-test/r/ctype_latin1.result @@ -8208,5 +8208,18 @@ Warnings: Note 1003 select `test`.`t1`.`a` AS `a`,`test`.`t1`.`b` AS `b`,`test`.`t1`.`c` AS `c`,`test`.`t1`.`d` AS `d` from `test`.`t1` where ((coalesce(`test`.`t1`.`c`,0) = '3 ') and (coalesce(`test`.`t1`.`d`,0) = '3 ')) DROP TABLE t1; # +# MDEV-15005 ASAN: stack-buffer-overflow in my_strnncollsp_simple +# +SET NAMES latin1; +SELECT CONVERT(1, CHAR) IN ('100', 10, '101'); +CONVERT(1, CHAR) IN ('100', 10, '101') +0 +SELECT CONVERT(1, CHAR) IN ('100', 10, '1'); +CONVERT(1, CHAR) IN ('100', 10, '1') +1 +SELECT CONVERT(1, CHAR) IN ('100', '10', '1'); +CONVERT(1, CHAR) IN ('100', '10', '1') +1 +# # End of 10.1 tests # diff --git a/mysql-test/r/ctype_utf8.result b/mysql-test/r/ctype_utf8.result index 5a77ea3aea5..3da74d12877 100644 --- a/mysql-test/r/ctype_utf8.result +++ b/mysql-test/r/ctype_utf8.result @@ -10535,5 +10535,18 @@ CAST(_utf8 0xC499 AS CHAR CHARACTER SET latin1) Warnings: Warning 1977 Cannot convert 'utf8' character 0xC499 to 'latin1' # +# MDEV-15005 ASAN: stack-buffer-overflow in my_strnncollsp_simple +# +SET NAMES utf8; +SELECT CONVERT(1, CHAR) IN ('100', 10, '101'); +CONVERT(1, CHAR) IN ('100', 10, '101') +0 +SELECT CONVERT(1, CHAR) IN ('100', 10, '1'); +CONVERT(1, CHAR) IN ('100', 10, '1') +1 +SELECT CONVERT(1, CHAR) IN ('100', '10', '1'); +CONVERT(1, CHAR) IN ('100', '10', '1') +1 +# # End of 10.1 tests # diff --git a/mysql-test/t/ctype_latin1.test b/mysql-test/t/ctype_latin1.test index 1ee48eed18c..3455b0437c1 100644 --- a/mysql-test/t/ctype_latin1.test +++ b/mysql-test/t/ctype_latin1.test @@ -389,6 +389,15 @@ EXPLAIN EXTENDED SELECT * FROM t1 WHERE COALESCE(c,0)='3 ' AND COALESCE(d,0)=COALESCE(c,0); DROP TABLE t1; +--echo # +--echo # MDEV-15005 ASAN: stack-buffer-overflow in my_strnncollsp_simple +--echo # + +SET NAMES latin1; +SELECT CONVERT(1, CHAR) IN ('100', 10, '101'); +SELECT CONVERT(1, CHAR) IN ('100', 10, '1'); +SELECT CONVERT(1, CHAR) IN ('100', '10', '1'); + --echo # --echo # End of 10.1 tests --echo # diff --git a/mysql-test/t/ctype_utf8.test b/mysql-test/t/ctype_utf8.test index e013109d0a7..be17eb461be 100644 --- a/mysql-test/t/ctype_utf8.test +++ b/mysql-test/t/ctype_utf8.test @@ -2024,6 +2024,16 @@ SELECT CONVERT(_utf8 0xC499 USING latin1); SELECT CAST(_utf8 0xC499 AS CHAR CHARACTER SET latin1); +--echo # +--echo # MDEV-15005 ASAN: stack-buffer-overflow in my_strnncollsp_simple +--echo # + +SET NAMES utf8; +SELECT CONVERT(1, CHAR) IN ('100', 10, '101'); +SELECT CONVERT(1, CHAR) IN ('100', 10, '1'); +SELECT CONVERT(1, CHAR) IN ('100', '10', '1'); + + --echo # --echo # End of 10.1 tests --echo # diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 076e6da953c..41d5ce25fd4 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -1312,6 +1312,13 @@ public: { value_res= item->val_str(&value); m_null_value= item->null_value; + // Make sure to cache the result String inside "value" + if (value_res && value_res != &value) + { + if (value.copy(*value_res)) + value.set("", 0, item->collation.collation); + value_res= &value; + } } int cmp(Item *arg) { From 75c76dbb06a99359d867e2a516f3244bf41fde96 Mon Sep 17 00:00:00 2001 From: Eugene Kosov Date: Mon, 19 Mar 2018 16:18:53 +0300 Subject: [PATCH 077/139] MDEV-15030 Add ASAN instrumentation Learn both valgrind and asan to catch this bug: mem_heap_t* heap = mem_heap_create(1024); byte* p = reinterpret_cast(heap) + sizeof(mem_heap_t); *p = 123; Overflows of the last allocation in a block will be catched too. mem_heap_create_block(): poison newly allocated memory --- storage/innobase/mem/mem0mem.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/innobase/mem/mem0mem.c b/storage/innobase/mem/mem0mem.c index 6e9a39d329f..31f235b1960 100644 --- a/storage/innobase/mem/mem0mem.c +++ b/storage/innobase/mem/mem0mem.c @@ -404,6 +404,8 @@ mem_heap_create_block( heap->total_size += len; } + UNIV_MEM_FREE(block + 1, len - MEM_BLOCK_HEADER_SIZE); + ut_ad((ulint)MEM_BLOCK_HEADER_SIZE < len); return(block); From 7cf2428db31f0f3812e7c1b0923f61077717bb23 Mon Sep 17 00:00:00 2001 From: Jacob Mathew Date: Mon, 19 Mar 2018 12:49:22 -0700 Subject: [PATCH 078/139] MDEV-10991: Server crashes in spider_udf_direct_sql_create_conn - tests in spider/oracle* suites crash the server The crash occurs due to code that is #ifdef'd out with HAVE_ORACLE_OCI that pertains to the use of Spider with an Oracle data tier. Enabling this code eliminates the crash. The reason that MariaDB needs to support Oracle storage at the data tier is to help customers migrate from Oracle. It is necessary to build Spider with the additional build flag -DHAVE_ORACLE_OCI, and install and start Oracle before running the Oracle test suite or any tests within it. Nevertheless, if Spider is built normally and Oracle has not been started, these tests should not cause the MariaDB server to crash. The bug fix replaces the crash with the following error: ERROR 12501 (HY000) at line 4: The connect info 'ORACLE' is invalid Author: Jacob Mathew. Reviewer: Kentoku Shiba. --- storage/spider/spd_db_oracle.cc | 6 +++++- storage/spider/spd_direct_sql.cc | 16 ++++++++++++++++ 2 files changed, 21 insertions(+), 1 deletion(-) diff --git a/storage/spider/spd_db_oracle.cc b/storage/spider/spd_db_oracle.cc index c3dfe8b8cf2..8d7b9126570 100644 --- a/storage/spider/spd_db_oracle.cc +++ b/storage/spider/spd_db_oracle.cc @@ -29,6 +29,10 @@ #endif #ifdef HAVE_ORACLE_OCI +#if (defined(WIN32) || defined(_WIN32) || defined(WINDOWS) || defined(_WINDOWS)) +#include +#define strcasestr StrStr +#endif #include #include "spd_err.h" #include "spd_param.h" @@ -3817,7 +3821,7 @@ int spider_db_oracle_util::open_item_func( { Item_func_conv_charset *item_func_conv_charset = (Item_func_conv_charset *)item_func; - CHARSET_INFO *conv_charset = item_func_conv_charset->conv_charset; + CHARSET_INFO *conv_charset = item_func_conv_charset->collation.collation; uint cset_length = strlen(conv_charset->csname); if (str->reserve(SPIDER_SQL_USING_LEN + cset_length)) DBUG_RETURN(HA_ERR_OUT_OF_MEM); diff --git a/storage/spider/spd_direct_sql.cc b/storage/spider/spd_direct_sql.cc index 8f892869679..5c3ef9cabdf 100644 --- a/storage/spider/spd_direct_sql.cc +++ b/storage/spider/spd_direct_sql.cc @@ -371,6 +371,14 @@ SPIDER_CONN *spider_udf_direct_sql_create_conn( if (direct_sql->access_mode == 0) { #endif + if (direct_sql->dbton_id == SPIDER_DBTON_SIZE) + { + /* Invalid target wrapper */ + *error_num = ER_SPIDER_INVALID_CONNECT_INFO_NUM; + my_printf_error(*error_num, ER_SPIDER_INVALID_CONNECT_INFO_STR, + MYF(0), direct_sql->tgt_wrapper); + goto error_alloc_conn; + } if (!(conn = (SPIDER_CONN *) spider_bulk_malloc(spider_current_trx, 32, MYF(MY_WME | MY_ZEROFILL), &conn, sizeof(*conn), @@ -398,6 +406,14 @@ SPIDER_CONN *spider_udf_direct_sql_create_conn( conn->default_database.init_calc_mem(138); #if defined(HS_HAS_SQLCOM) && defined(HAVE_HANDLERSOCKET) } else { + if (direct_sql->dbton_id == SPIDER_DBTON_SIZE) + { + /* Invalid target wrapper */ + *error_num = ER_SPIDER_NOSQL_WRAPPER_IS_INVALID_NUM; + my_printf_error(*error_num, ER_SPIDER_NOSQL_WRAPPER_IS_INVALID_STR, + MYF(0), direct_sql->tgt_wrapper); + goto error_alloc_conn; + } if (!(conn = (SPIDER_CONN *) spider_bulk_malloc(spider_current_trx, 33, MYF(MY_WME | MY_ZEROFILL), &conn, sizeof(*conn), From e390f7b67509e324033644d017e8d6408d277d02 Mon Sep 17 00:00:00 2001 From: Varun Gupta Date: Tue, 20 Mar 2018 11:05:28 +0530 Subject: [PATCH 079/139] MDEV-12737: tokudb_mariadb.mdev6657 fails in buildbot with different plan, and outside with valgrind warnings Fixing the test by adding replace column for the rows column in the explain. --- storage/tokudb/mysql-test/tokudb_mariadb/r/mdev6657.result | 6 +++--- storage/tokudb/mysql-test/tokudb_mariadb/t/mdev6657.test | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/storage/tokudb/mysql-test/tokudb_mariadb/r/mdev6657.result b/storage/tokudb/mysql-test/tokudb_mariadb/r/mdev6657.result index 2e9faddbaff..3804a583dc3 100644 --- a/storage/tokudb/mysql-test/tokudb_mariadb/r/mdev6657.result +++ b/storage/tokudb/mysql-test/tokudb_mariadb/r/mdev6657.result @@ -10,7 +10,7 @@ col3 smallint(5) NOT NULL DEFAULT '1', filler varchar(255) DEFAULT NULL, KEY pk_ersatz(col1,col2,col3), KEY key1 (col1,col2) USING BTREE -) ENGINE=TokuDB DEFAULT CHARSET=latin1 PACK_KEYS=1 COMPRESSION=TOKUDB_LZMA; +) ENGINE=TokuDB DEFAULT CHARSET=latin1 PACK_KEYS=1; insert into t3 select 1300000000+a, 12345, 7890, 'data' from t2; insert into t3 select 1400000000+a, 12345, 7890, 'data' from t2; insert into t3 select 1410799999+a, 12345, 7890, 'data' from t2; @@ -34,7 +34,7 @@ from t3 where col1 <= 1410799999 order by col1 desc,col2 desc,col3 desc limit 1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t3 range pk_ersatz,key1 pk_ersatz 4 NULL 2001 Using where; Using index +1 SIMPLE t3 range pk_ersatz,key1 pk_ersatz 4 NULL # Using where; Using index # The same query but the constant is bigger. # The query should use range(PRIMARY), not full index scan: explain @@ -43,5 +43,5 @@ from t3 where col1 <= 1412199999 order by col1 desc, col2 desc, col3 desc limit 1; id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t3 range pk_ersatz,key1 pk_ersatz 4 NULL 15001 Using where; Using index +1 SIMPLE t3 range pk_ersatz,key1 pk_ersatz 4 NULL # Using where; Using index drop table t1,t2,t3; diff --git a/storage/tokudb/mysql-test/tokudb_mariadb/t/mdev6657.test b/storage/tokudb/mysql-test/tokudb_mariadb/t/mdev6657.test index a809c3faf06..b723a5d72e8 100644 --- a/storage/tokudb/mysql-test/tokudb_mariadb/t/mdev6657.test +++ b/storage/tokudb/mysql-test/tokudb_mariadb/t/mdev6657.test @@ -15,7 +15,7 @@ CREATE TABLE t3 ( filler varchar(255) DEFAULT NULL, KEY pk_ersatz(col1,col2,col3), KEY key1 (col1,col2) USING BTREE -) ENGINE=TokuDB DEFAULT CHARSET=latin1 PACK_KEYS=1 COMPRESSION=TOKUDB_LZMA; +) ENGINE=TokuDB DEFAULT CHARSET=latin1 PACK_KEYS=1; insert into t3 select 1300000000+a, 12345, 7890, 'data' from t2; insert into t3 select 1400000000+a, 12345, 7890, 'data' from t2; @@ -35,6 +35,7 @@ insert into t3 select 1412099999+a, 12345, 7890, 'data' from t2; insert into t3 select 1412199999+a, 12345, 7890, 'data' from t2; --echo # The following must use range(PRIMARY): +--replace_column 9 # explain select col1,col2,col3 from t3 @@ -43,6 +44,7 @@ order by col1 desc,col2 desc,col3 desc limit 1; --echo # The same query but the constant is bigger. --echo # The query should use range(PRIMARY), not full index scan: +--replace_column 9 # explain select col1,col2,col3 from t3 From 5a8f8f89d65b75e51048288a49c86a8d974a8543 Mon Sep 17 00:00:00 2001 From: Eugene Kosov Date: Tue, 20 Mar 2018 10:46:57 +0300 Subject: [PATCH 080/139] honor alignment rules and xtradb too --- include/my_valgrind.h | 2 ++ storage/innobase/mem/mem0mem.c | 5 ++++- storage/xtradb/mem/mem0mem.c | 5 +++++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/include/my_valgrind.h b/include/my_valgrind.h index b76f5607bb5..8dde079b976 100644 --- a/include/my_valgrind.h +++ b/include/my_valgrind.h @@ -35,6 +35,8 @@ # define MEM_CHECK_DEFINED(a,len) VALGRIND_CHECK_MEM_IS_DEFINED(a,len) #elif defined(__SANITIZE_ADDRESS__) # include +/* How to do manual poisoning: +https://github.com/google/sanitizers/wiki/AddressSanitizerManualPoisoning */ # define MEM_UNDEFINED(a,len) ASAN_UNPOISON_MEMORY_REGION(a,len) # define MEM_NOACCESS(a,len) ASAN_POISON_MEMORY_REGION(a,len) # define MEM_CHECK_ADDRESSABLE(a,len) ((void) 0) diff --git a/storage/innobase/mem/mem0mem.c b/storage/innobase/mem/mem0mem.c index 31f235b1960..924231470aa 100644 --- a/storage/innobase/mem/mem0mem.c +++ b/storage/innobase/mem/mem0mem.c @@ -404,7 +404,10 @@ mem_heap_create_block( heap->total_size += len; } - UNIV_MEM_FREE(block + 1, len - MEM_BLOCK_HEADER_SIZE); + /* Poison all available memory. Individual chunks will be unpoisoned on + every mem_heap_alloc() call. */ + compile_time_assert(MEM_BLOCK_HEADER_SIZE >= sizeof *block); + UNIV_MEM_FREE(block + 1, len - sizeof *block); ut_ad((ulint)MEM_BLOCK_HEADER_SIZE < len); diff --git a/storage/xtradb/mem/mem0mem.c b/storage/xtradb/mem/mem0mem.c index 6e9a39d329f..924231470aa 100644 --- a/storage/xtradb/mem/mem0mem.c +++ b/storage/xtradb/mem/mem0mem.c @@ -404,6 +404,11 @@ mem_heap_create_block( heap->total_size += len; } + /* Poison all available memory. Individual chunks will be unpoisoned on + every mem_heap_alloc() call. */ + compile_time_assert(MEM_BLOCK_HEADER_SIZE >= sizeof *block); + UNIV_MEM_FREE(block + 1, len - sizeof *block); + ut_ad((ulint)MEM_BLOCK_HEADER_SIZE < len); return(block); From 2a729b5f4b14f9f04cf81e1d8dd4eec4ad6cb7cd Mon Sep 17 00:00:00 2001 From: sjaakola Date: Wed, 17 Feb 2016 11:20:48 +0200 Subject: [PATCH 081/139] refs MW-245 - merged wsrep_dirty_reads and wsrep_reject_queries from PXC --- .../suite/galera/r/galera_defaults.result | 3 +- .../galera/r/galera_schema_dirty_reads.result | 13 ++++++ .../r/galera_var_cluster_address.result | 3 +- .../galera/r/galera_var_dirty_reads.result | 7 ++-- .../galera/t/galera_schema_dirty_reads.test | 13 ++++++ .../galera/t/galera_var_cluster_address.test | 1 - .../galera/t/galera_var_dirty_reads.test | 3 -- .../suite/sys_vars/r/sysvars_wsrep.result | 14 +++++++ sql/sql_class.h | 8 ++++ sql/sql_parse.cc | 41 +++++++++++++++---- sql/sys_vars.cc | 10 ++++- sql/wsrep_mysqld.h | 7 ++++ sql/wsrep_var.cc | 25 +++++++++++ sql/wsrep_var.h | 1 + 14 files changed, 131 insertions(+), 18 deletions(-) create mode 100644 mysql-test/suite/galera/r/galera_schema_dirty_reads.result create mode 100644 mysql-test/suite/galera/t/galera_schema_dirty_reads.test diff --git a/mysql-test/suite/galera/r/galera_defaults.result b/mysql-test/suite/galera/r/galera_defaults.result index d3004735a0a..5c5fdabf432 100644 --- a/mysql-test/suite/galera/r/galera_defaults.result +++ b/mysql-test/suite/galera/r/galera_defaults.result @@ -1,6 +1,6 @@ SELECT COUNT(*) = 43 FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep_%'; COUNT(*) = 43 -1 +0 SELECT VARIABLE_NAME, VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_VARIABLES WHERE VARIABLE_NAME LIKE 'wsrep_%' @@ -40,6 +40,7 @@ WSREP_NOTIFY_CMD WSREP_ON ON WSREP_OSU_METHOD TOI WSREP_RECOVER OFF +WSREP_REJECT_QUERIES NONE WSREP_REPLICATE_MYISAM OFF WSREP_RESTART_SLAVE OFF WSREP_RETRY_AUTOCOMMIT 1 diff --git a/mysql-test/suite/galera/r/galera_schema_dirty_reads.result b/mysql-test/suite/galera/r/galera_schema_dirty_reads.result new file mode 100644 index 00000000000..edf20da92c6 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_schema_dirty_reads.result @@ -0,0 +1,13 @@ +USE information_schema; +SELECT * FROM SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep_dirty_reads"; +VARIABLE_NAME VARIABLE_VALUE +WSREP_DIRTY_READS OFF +SET GLOBAL wsrep_reject_queries=ALL; +SELECT * FROM SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep_dirty_reads"; +VARIABLE_NAME VARIABLE_VALUE +WSREP_DIRTY_READS OFF +SET GLOBAL wsrep_reject_queries=NONE; +SET SESSION wsrep_dirty_reads=TRUE; +SELECT * FROM SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep_dirty_reads"; +VARIABLE_NAME VARIABLE_VALUE +WSREP_DIRTY_READS ON diff --git a/mysql-test/suite/galera/r/galera_var_cluster_address.result b/mysql-test/suite/galera/r/galera_var_cluster_address.result index 7c56e22eca2..8245cdf6093 100644 --- a/mysql-test/suite/galera/r/galera_var_cluster_address.result +++ b/mysql-test/suite/galera/r/galera_var_cluster_address.result @@ -1,7 +1,8 @@ SET GLOBAL wsrep_cluster_address = 'foo://'; SET SESSION wsrep_sync_wait=0; SELECT COUNT(*) > 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS; -ERROR 08S01: WSREP has not yet prepared node for application use +COUNT(*) > 0 +1 SHOW STATUS LIKE 'wsrep_ready'; Variable_name Value wsrep_ready OFF diff --git a/mysql-test/suite/galera/r/galera_var_dirty_reads.result b/mysql-test/suite/galera/r/galera_var_dirty_reads.result index f141b332b6c..5a108ddfcaa 100644 --- a/mysql-test/suite/galera/r/galera_var_dirty_reads.result +++ b/mysql-test/suite/galera/r/galera_var_dirty_reads.result @@ -33,11 +33,12 @@ SELECT 1; 1 1 USE information_schema; -ERROR 08S01: WSREP has not yet prepared node for application use SELECT * FROM information_schema.session_variables WHERE variable_name LIKE "wsrep_dirty_reads"; -ERROR 08S01: WSREP has not yet prepared node for application use +VARIABLE_NAME VARIABLE_VALUE +WSREP_DIRTY_READS OFF SELECT COUNT(*) >= 10 FROM performance_schema.events_statements_history; -ERROR 08S01: WSREP has not yet prepared node for application use +COUNT(*) >= 10 +1 USE test; SELECT * FROM t1; i diff --git a/mysql-test/suite/galera/t/galera_schema_dirty_reads.test b/mysql-test/suite/galera/t/galera_schema_dirty_reads.test new file mode 100644 index 00000000000..93e24244611 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_schema_dirty_reads.test @@ -0,0 +1,13 @@ +# +# Dirty reads from INFORMATION_SCHEMA tables. +# +--source include/galera_cluster.inc +--source include/have_innodb.inc +--disable_info +USE information_schema; +SELECT * FROM SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep_dirty_reads"; +SET GLOBAL wsrep_reject_queries=ALL; +SELECT * FROM SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep_dirty_reads"; +SET GLOBAL wsrep_reject_queries=NONE; +SET SESSION wsrep_dirty_reads=TRUE; +SELECT * FROM SESSION_VARIABLES WHERE VARIABLE_NAME LIKE "wsrep_dirty_reads"; diff --git a/mysql-test/suite/galera/t/galera_var_cluster_address.test b/mysql-test/suite/galera/t/galera_var_cluster_address.test index 03706bbbb12..6d99d35cdac 100644 --- a/mysql-test/suite/galera/t/galera_var_cluster_address.test +++ b/mysql-test/suite/galera/t/galera_var_cluster_address.test @@ -24,7 +24,6 @@ SET GLOBAL wsrep_cluster_address = 'foo://'; SET SESSION wsrep_sync_wait=0; ---error ER_UNKNOWN_COM_ERROR SELECT COUNT(*) > 0 FROM INFORMATION_SCHEMA.GLOBAL_STATUS; # Must return 'OFF' diff --git a/mysql-test/suite/galera/t/galera_var_dirty_reads.test b/mysql-test/suite/galera/t/galera_var_dirty_reads.test index cba8488b879..138b7c1c703 100644 --- a/mysql-test/suite/galera/t/galera_var_dirty_reads.test +++ b/mysql-test/suite/galera/t/galera_var_dirty_reads.test @@ -49,13 +49,10 @@ SELECT i, variable_name, variable_value FROM t1, information_schema.session_vari SELECT 1; ---error ER_UNKNOWN_COM_ERROR USE information_schema; ---error ER_UNKNOWN_COM_ERROR SELECT * FROM information_schema.session_variables WHERE variable_name LIKE "wsrep_dirty_reads"; ---error ER_UNKNOWN_COM_ERROR SELECT COUNT(*) >= 10 FROM performance_schema.events_statements_history; --disable_query_log diff --git a/mysql-test/suite/sys_vars/r/sysvars_wsrep.result b/mysql-test/suite/sys_vars/r/sysvars_wsrep.result index 0c206975c29..db932ae8223 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_wsrep.result +++ b/mysql-test/suite/sys_vars/r/sysvars_wsrep.result @@ -421,6 +421,20 @@ NUMERIC_BLOCK_SIZE NULL ENUM_VALUE_LIST OFF,ON READ_ONLY YES COMMAND_LINE_ARGUMENT OPTIONAL +VARIABLE_NAME WSREP_REJECT_QUERIES +SESSION_VALUE NULL +GLOBAL_VALUE NONE +GLOBAL_VALUE_ORIGIN COMPILE-TIME +DEFAULT_VALUE NONE +VARIABLE_SCOPE GLOBAL +VARIABLE_TYPE ENUM +VARIABLE_COMMENT Variable to set to reject queries +NUMERIC_MIN_VALUE NULL +NUMERIC_MAX_VALUE NULL +NUMERIC_BLOCK_SIZE NULL +ENUM_VALUE_LIST NONE,ALL,ALL_KILL +READ_ONLY NO +COMMAND_LINE_ARGUMENT OPTIONAL VARIABLE_NAME WSREP_REPLICATE_MYISAM SESSION_VALUE NULL GLOBAL_VALUE OFF diff --git a/sql/sql_class.h b/sql/sql_class.h index 91030145022..b42b6ece9d4 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -5475,6 +5475,14 @@ public: sent by the user (ie: stored procedure). */ #define CF_SKIP_QUESTIONS (1U << 1) +#ifdef WITH_WSREP +/** + Do not check that wsrep snapshot is ready before allowing this command +*/ +#define CF_SKIP_WSREP_CHECK (1U << 2) +#else +#define CF_SKIP_WSREP_CHECK 0 +#endif /* WITH_WSREP */ /** Do not check that wsrep snapshot is ready before allowing this command diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 97d743d9a42..6a10f50813b 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2000, 2017, Oracle and/or its affiliates. - Copyright (c) 2008, 2017, MariaDB + Copyright (c) 2008, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -284,6 +284,8 @@ void init_update_queries(void) server_command_flags[COM_QUERY]= CF_SKIP_WSREP_CHECK; server_command_flags[COM_SET_OPTION]= CF_SKIP_WSREP_CHECK; server_command_flags[COM_STMT_PREPARE]= CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK; + server_command_flags[COM_STMT_EXECUTE]= CF_SKIP_WSREP_CHECK; + server_command_flags[COM_STMT_FETCH]= CF_SKIP_WSREP_CHECK; server_command_flags[COM_STMT_CLOSE]= CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK; server_command_flags[COM_STMT_RESET]= CF_SKIP_QUESTIONS | CF_SKIP_WSREP_CHECK; server_command_flags[COM_STMT_EXECUTE]= CF_SKIP_WSREP_CHECK; @@ -887,6 +889,25 @@ void cleanup_items(Item *item) } +#ifdef WITH_WSREP +static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables) +{ + for (const TABLE_LIST *table= tables; table; table= table->next_global) + { + TABLE_CATEGORY c; + LEX_STRING db, tn; + lex_string_set(&db, table->db); + lex_string_set(&tn, table->table_name); + c= get_table_category(&db, &tn); + if (c != TABLE_CATEGORY_INFORMATION && + c != TABLE_CATEGORY_PERFORMANCE) + { + return false; + } + } + return true; +} +#endif /* WITH_WSREP */ #ifndef EMBEDDED_LIBRARY #ifdef WITH_WSREP @@ -2659,14 +2680,18 @@ mysql_execute_command(THD *thd) We additionally allow all other commands that do not change data in case wsrep_dirty_reads is enabled. */ - if (lex->sql_command != SQLCOM_SET_OPTION && - !wsrep_is_show_query(lex->sql_command) && - !(thd->variables.wsrep_dirty_reads && - !is_update_query(lex->sql_command)) && - !(lex->sql_command == SQLCOM_SELECT && - !all_tables) && - !wsrep_node_is_ready(thd)) + if (thd->variables.wsrep_on && !thd->wsrep_applier && + !(wsrep_ready || + (thd->variables.wsrep_dirty_reads && + (sql_command_flags[lex->sql_command] & CF_CHANGES_DATA) == 0) || + wsrep_tables_accessible_when_detached(all_tables)) && + lex->sql_command != SQLCOM_SET_OPTION && + !wsrep_is_show_query(lex->sql_command)) + { + my_message(ER_UNKNOWN_COM_ERROR, + "WSREP has not yet prepared node for application use", MYF(0)); goto error; + } } #endif /* WITH_WSREP */ status_var_increment(thd->status_var.com_stat[lex->sql_command]); diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 303633939c3..2ec886f0528 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -1,5 +1,5 @@ /* Copyright (c) 2002, 2015, Oracle and/or its affiliates. - Copyright (c) 2012, 2015, MariaDB + Copyright (c) 2012, 2018, MariaDB This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -4967,6 +4967,14 @@ static Sys_var_mybool Sys_wsrep_desync ( ON_CHECK(wsrep_desync_check), ON_UPDATE(wsrep_desync_update)); +static const char *wsrep_reject_queries_names[]= { "NONE", "ALL", "ALL_KILL", NullS }; +static Sys_var_enum Sys_wsrep_reject_queries( + "wsrep_reject_queries", "Variable to set to reject queries", + GLOBAL_VAR(wsrep_reject_queries), CMD_LINE(OPT_ARG), + wsrep_reject_queries_names, DEFAULT(WSREP_REJECT_NONE), + NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), + ON_UPDATE(wsrep_reject_queries_update)); + static const char *wsrep_binlog_format_names[]= {"MIXED", "STATEMENT", "ROW", "NONE", NullS}; static Sys_var_enum Sys_wsrep_forced_binlog_format( diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h index fd68fab991c..33af697a19b 100644 --- a/sql/wsrep_mysqld.h +++ b/sql/wsrep_mysqld.h @@ -80,6 +80,7 @@ extern const char* wsrep_notify_cmd; extern long wsrep_max_protocol_version; extern ulong wsrep_forced_binlog_format; extern my_bool wsrep_desync; +extern ulong wsrep_reject_queries; extern my_bool wsrep_replicate_myisam; extern ulong wsrep_mysql_replication_bundle; extern my_bool wsrep_restart_slave; @@ -92,6 +93,12 @@ extern bool wsrep_gtid_mode; extern uint32 wsrep_gtid_domain_id; extern bool wsrep_dirty_reads; +enum enum_wsrep_reject_types { + WSREP_REJECT_NONE, /* nothing rejected */ + WSREP_REJECT_ALL, /* reject all queries, with UNKNOWN_COMMAND error */ + WSREP_REJECT_ALL_KILL /* kill existing connections and reject all queries*/ +}; + enum enum_wsrep_OSU_method { WSREP_OSU_TOI, WSREP_OSU_RSU, diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index ad1f4ec0eac..da53c0981af 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -34,6 +34,7 @@ const char* wsrep_node_name = 0; const char* wsrep_node_address = 0; const char* wsrep_node_incoming_address = 0; const char* wsrep_start_position = 0; +ulong wsrep_reject_queries; static long wsrep_prev_slave_threads = wsrep_slave_threads; @@ -384,6 +385,30 @@ void wsrep_provider_options_init(const char* value) wsrep_provider_options = (value) ? my_strdup(value, MYF(0)) : NULL; } +bool wsrep_reject_queries_update(sys_var *self, THD* thd, enum_var_type type) +{ + switch (wsrep_reject_queries) { + case WSREP_REJECT_NONE: + wsrep_ready_set(TRUE); + WSREP_INFO("Allowing client queries due to manual setting"); + break; + case WSREP_REJECT_ALL: + wsrep_ready_set(FALSE); + WSREP_INFO("Rejecting client queries due to manual setting"); + break; + case WSREP_REJECT_ALL_KILL: + wsrep_ready_set(FALSE); + wsrep_close_client_connections(FALSE); + WSREP_INFO("Rejecting client queries and killing connections due to manual setting"); + break; + default: + WSREP_INFO("Unknown value for wsrep_reject_queries: %lu", + wsrep_reject_queries); + return true; + } + return false; +} + static int wsrep_cluster_address_verify (const char* cluster_address_str) { /* There is no predefined address format, it depends on provider. */ diff --git a/sql/wsrep_var.h b/sql/wsrep_var.h index 55eb2fbc501..53952173c83 100644 --- a/sql/wsrep_var.h +++ b/sql/wsrep_var.h @@ -93,6 +93,7 @@ extern bool wsrep_desync_update UPDATE_ARGS; extern bool wsrep_max_ws_size_check CHECK_ARGS; extern bool wsrep_max_ws_size_update UPDATE_ARGS; +extern bool wsrep_reject_queries_update UPDATE_ARGS; #else /* WITH_WSREP */ From 8f717ed36002e6257b0ba659863f3a48d5c1f579 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Fri, 19 Feb 2016 00:14:26 +0200 Subject: [PATCH 082/139] refs MW-245 - allowing USE with dirty reads configuration - fix for logic of setting wsrep ready status --- sql/sql_parse.cc | 34 ++++++++++++++++++---------------- sql/wsrep_var.cc | 3 --- 2 files changed, 18 insertions(+), 19 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 6a10f50813b..c6adfd0999a 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -275,6 +275,7 @@ void init_update_queries(void) server_command_flags[COM_SHUTDOWN]= CF_SKIP_WSREP_CHECK; server_command_flags[COM_SLEEP]= CF_SKIP_WSREP_CHECK; server_command_flags[COM_TIME]= CF_SKIP_WSREP_CHECK; + server_command_flags[COM_INIT_DB]= CF_SKIP_WSREP_CHECK; server_command_flags[COM_END]= CF_SKIP_WSREP_CHECK; /* @@ -913,7 +914,8 @@ static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables) #ifdef WITH_WSREP static bool wsrep_node_is_ready(THD *thd) { - if (thd->variables.wsrep_on && !thd->wsrep_applier && !wsrep_ready) + if (thd->variables.wsrep_on && !thd->wsrep_applier && + (!wsrep_ready || wsrep_reject_queries != WSREP_REJECT_NONE)) { my_message(ER_UNKNOWN_COM_ERROR, "WSREP has not yet prepared node for application use", @@ -1104,8 +1106,9 @@ bool do_command(THD *thd) /* Bail out if DB snapshot has not been installed. */ - if (!(server_command_flags[command] & CF_SKIP_WSREP_CHECK) && - !wsrep_node_is_ready(thd)) + if (thd->variables.wsrep_on && !thd->wsrep_applier && + (!wsrep_ready || wsrep_reject_queries != WSREP_REJECT_NONE) && + (server_command_flags[command] & CF_SKIP_WSREP_CHECK) == 0) { thd->protocol->end_statement(); @@ -2674,19 +2677,18 @@ mysql_execute_command(THD *thd) } /* - Bail out if DB snapshot has not been installed. SET and SHOW commands, - however, are always allowed. - Select query is also allowed if it does not access any table. - We additionally allow all other commands that do not change data in - case wsrep_dirty_reads is enabled. - */ - if (thd->variables.wsrep_on && !thd->wsrep_applier && - !(wsrep_ready || - (thd->variables.wsrep_dirty_reads && - (sql_command_flags[lex->sql_command] & CF_CHANGES_DATA) == 0) || - wsrep_tables_accessible_when_detached(all_tables)) && - lex->sql_command != SQLCOM_SET_OPTION && - !wsrep_is_show_query(lex->sql_command)) + * Bail out if DB snapshot has not been installed. We however, + * allow SET and SHOW queries and reads from information schema + * and dirty reads (if configured) + */ + if (thd->variables.wsrep_on && + !thd->wsrep_applier && + !(wsrep_ready && wsrep_reject_queries == WSREP_REJECT_NONE) && + !(thd->variables.wsrep_dirty_reads && + (sql_command_flags[lex->sql_command] & CF_CHANGES_DATA) == 0) && + !wsrep_tables_accessible_when_detached(all_tables) && + lex->sql_command != SQLCOM_SET_OPTION && + !wsrep_is_show_query(lex->sql_command)) { my_message(ER_UNKNOWN_COM_ERROR, "WSREP has not yet prepared node for application use", MYF(0)); diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index da53c0981af..2d52396331d 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -389,15 +389,12 @@ bool wsrep_reject_queries_update(sys_var *self, THD* thd, enum_var_type type) { switch (wsrep_reject_queries) { case WSREP_REJECT_NONE: - wsrep_ready_set(TRUE); WSREP_INFO("Allowing client queries due to manual setting"); break; case WSREP_REJECT_ALL: - wsrep_ready_set(FALSE); WSREP_INFO("Rejecting client queries due to manual setting"); break; case WSREP_REJECT_ALL_KILL: - wsrep_ready_set(FALSE); wsrep_close_client_connections(FALSE); WSREP_INFO("Rejecting client queries and killing connections due to manual setting"); break; From 84d4ab5be1d6490742353c9c7c7ae198ceb66b42 Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Fri, 19 Feb 2016 00:16:36 -0800 Subject: [PATCH 083/139] refs MW-245: Galera MTR Tests: additional tests for wsrep_reject_queries, wsrep_dirty_reads --- .../galera/r/galera_var_reject_queries.result | 21 +++++++++ .../galera/t/galera_var_reject_queries.test | 44 +++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 mysql-test/suite/galera/r/galera_var_reject_queries.result create mode 100644 mysql-test/suite/galera/t/galera_var_reject_queries.test diff --git a/mysql-test/suite/galera/r/galera_var_reject_queries.result b/mysql-test/suite/galera/r/galera_var_reject_queries.result new file mode 100644 index 00000000000..e4b906f1717 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_var_reject_queries.result @@ -0,0 +1,21 @@ +CREATE TABLE t1 (f1 INTEGER); +SET SESSION wsrep_reject_queries = ALL; +ERROR HY000: Variable 'wsrep_reject_queries' is a GLOBAL variable and should be set with SET GLOBAL +SET GLOBAL wsrep_reject_queries = ALL; +SELECT * FROM t1; +ERROR 08S01: WSREP has not yet prepared node for application use +SET GLOBAL wsrep_reject_queries = ALL_KILL; +ERROR HY000: Lost connection to MySQL server during query +SELECT * FROM t1; +ERROR 70100: Connection was killed +SELECT * FROM t1; +ERROR 08S01: WSREP has not yet prepared node for application use +SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +VARIABLE_VALUE = 2 +1 +INSERT INTO t1 VALUES (1); +SET GLOBAL wsrep_reject_queries = NONE; +SELECT COUNT(*) = 1 FROM t1; +COUNT(*) = 1 +1 +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_var_reject_queries.test b/mysql-test/suite/galera/t/galera_var_reject_queries.test new file mode 100644 index 00000000000..b89cae78aa6 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_var_reject_queries.test @@ -0,0 +1,44 @@ +# +# Test wsrep_reject_queries +# + +--source include/galera_cluster.inc +--source include/have_innodb.inc + +CREATE TABLE t1 (f1 INTEGER); + +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 + +--connection node_1 +--error ER_GLOBAL_VARIABLE +SET SESSION wsrep_reject_queries = ALL; + +SET GLOBAL wsrep_reject_queries = ALL; + +--error ER_UNKNOWN_COM_ERROR +SELECT * FROM t1; + +# Lost connection +--error 2013 +SET GLOBAL wsrep_reject_queries = ALL_KILL; + +--connection node_1a +--error ER_CONNECTION_KILLED +SELECT * FROM t1; + +--connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--error ER_UNKNOWN_COM_ERROR +SELECT * FROM t1; + +# Confirm that replication continues + +--connection node_2 +SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +INSERT INTO t1 VALUES (1); + +--connect node_1c, 127.0.0.1, root, , test, $NODE_MYPORT_1 +SET GLOBAL wsrep_reject_queries = NONE; + +SELECT COUNT(*) = 1 FROM t1; + +DROP TABLE t1; From c5dd2abf4ca26cb4ac6bfc3123500e5c2f46cbff Mon Sep 17 00:00:00 2001 From: sjaakola Date: Wed, 2 Mar 2016 21:32:06 +0200 Subject: [PATCH 084/139] Refs MW-245 - logic was wrong in detecting if queries are allowed in non primary node. it allowed select with no table list to execute even if dirty reads was not specified --- sql/sql_parse.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index c6adfd0999a..c1962508864 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2686,7 +2686,8 @@ mysql_execute_command(THD *thd) !(wsrep_ready && wsrep_reject_queries == WSREP_REJECT_NONE) && !(thd->variables.wsrep_dirty_reads && (sql_command_flags[lex->sql_command] & CF_CHANGES_DATA) == 0) && - !wsrep_tables_accessible_when_detached(all_tables) && + !(thd->variables.wsrep_dirty_reads && + wsrep_tables_accessible_when_detached(all_tables)) && lex->sql_command != SQLCOM_SET_OPTION && !wsrep_is_show_query(lex->sql_command)) { From 9a8961485763c057620a24e60713d15d22ab41b1 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Thu, 3 Mar 2016 09:35:52 +0200 Subject: [PATCH 085/139] Refs: MW-245 - changed logic so that in non primary node it is possible to do SET + SHOW + SELECT from information and pfs schema, when dirty reads are not enabled - however, non table selects are not allowed (e.g. SELECT 1) --- sql/sql_parse.cc | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index c1962508864..affadf01d2c 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -893,6 +893,7 @@ void cleanup_items(Item *item) #ifdef WITH_WSREP static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables) { + bool has_tables = false; for (const TABLE_LIST *table= tables; table; table= table->next_global) { TABLE_CATEGORY c; @@ -905,8 +906,9 @@ static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables) { return false; } + has_tables = true; } - return true; + return has_tables; } #endif /* WITH_WSREP */ #ifndef EMBEDDED_LIBRARY @@ -2686,8 +2688,7 @@ mysql_execute_command(THD *thd) !(wsrep_ready && wsrep_reject_queries == WSREP_REJECT_NONE) && !(thd->variables.wsrep_dirty_reads && (sql_command_flags[lex->sql_command] & CF_CHANGES_DATA) == 0) && - !(thd->variables.wsrep_dirty_reads && - wsrep_tables_accessible_when_detached(all_tables)) && + !wsrep_tables_accessible_when_detached(all_tables) && lex->sql_command != SQLCOM_SET_OPTION && !wsrep_is_show_query(lex->sql_command)) { From 33028f7c4bc4b702a9dd2d8462f2f79b8f37dc1f Mon Sep 17 00:00:00 2001 From: Philip Stoev Date: Thu, 3 Mar 2016 02:57:21 -0800 Subject: [PATCH 086/139] Refs: MW-245 - Adjust tests to account for the new behavior. --- mysql-test/suite/galera/r/galera_var_dirty_reads.result | 4 ++-- mysql-test/suite/galera/r/galera_var_reject_queries.result | 2 +- mysql-test/suite/galera/t/galera_as_slave_nonprim.test | 7 ++++--- mysql-test/suite/galera/t/galera_var_dirty_reads.test | 2 ++ mysql-test/suite/galera/t/galera_var_reject_queries.test | 2 +- 5 files changed, 10 insertions(+), 7 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_var_dirty_reads.result b/mysql-test/suite/galera/r/galera_var_dirty_reads.result index 5a108ddfcaa..da842e7254a 100644 --- a/mysql-test/suite/galera/r/galera_var_dirty_reads.result +++ b/mysql-test/suite/galera/r/galera_var_dirty_reads.result @@ -30,9 +30,9 @@ SET @@session.wsrep_dirty_reads=OFF; SELECT i, variable_name, variable_value FROM t1, information_schema.session_variables WHERE variable_name LIKE "wsrep_dirty_reads" AND i = 1; ERROR 08S01: WSREP has not yet prepared node for application use SELECT 1; -1 -1 +ERROR 08S01: WSREP has not yet prepared node for application use USE information_schema; +ERROR 08S01: WSREP has not yet prepared node for application use SELECT * FROM information_schema.session_variables WHERE variable_name LIKE "wsrep_dirty_reads"; VARIABLE_NAME VARIABLE_VALUE WSREP_DIRTY_READS OFF diff --git a/mysql-test/suite/galera/r/galera_var_reject_queries.result b/mysql-test/suite/galera/r/galera_var_reject_queries.result index e4b906f1717..5958e6ae981 100644 --- a/mysql-test/suite/galera/r/galera_var_reject_queries.result +++ b/mysql-test/suite/galera/r/galera_var_reject_queries.result @@ -7,7 +7,7 @@ ERROR 08S01: WSREP has not yet prepared node for application use SET GLOBAL wsrep_reject_queries = ALL_KILL; ERROR HY000: Lost connection to MySQL server during query SELECT * FROM t1; -ERROR 70100: Connection was killed +Got one of the listed errors SELECT * FROM t1; ERROR 08S01: WSREP has not yet prepared node for application use SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; diff --git a/mysql-test/suite/galera/t/galera_as_slave_nonprim.test b/mysql-test/suite/galera/t/galera_as_slave_nonprim.test index 46a93458271..26fec05dfe5 100644 --- a/mysql-test/suite/galera/t/galera_as_slave_nonprim.test +++ b/mysql-test/suite/galera/t/galera_as_slave_nonprim.test @@ -51,7 +51,7 @@ INSERT INTO t1 VALUES (1),(2),(3),(4),(5); --let $value = query_get_value(SHOW SLAVE STATUS, Last_SQL_Error, 1) --connection node_3 --disable_query_log ---eval SELECT "$value" IN ("Error 'Unknown command' on query. Default database: 'test'. Query: 'BEGIN'", "Node has dropped from cluster") AS expected_error +--eval SELECT "$value" IN ("Error 'WSREP has not yet prepared node for application use' on query. Default database: 'test'. Query: 'BEGIN'", "Node has dropped from cluster") AS expected_error --enable_query_log # Step #4. Bring back the async slave and restart replication @@ -85,8 +85,9 @@ DROP TABLE t1; STOP SLAVE; RESET SLAVE ALL; -CALL mtr.add_suppression("Slave SQL: Error 'Unknown command' on query"); -CALL mtr.add_suppression("Slave: Unknown command Error_code: 1047"); +CALL mtr.add_suppression("Slave SQL: Error 'WSREP has not yet prepared node for application use' on query"); +CALL mtr.add_suppression("Slave: WSREP has not yet prepared node for application use Error_code: 1047"); +CALL mtr.add_suppression("TORDERED} returned -107 \\(Transport endpoint is not connected\\)"); CALL mtr.add_suppression("Transport endpoint is not connected"); CALL mtr.add_suppression("Slave SQL: Error in Xid_log_event: Commit could not be completed, 'Deadlock found when trying to get lock; try restarting transaction', Error_code: 1213"); CALL mtr.add_suppression("Slave SQL: Node has dropped from cluster, Error_code: 1047"); diff --git a/mysql-test/suite/galera/t/galera_var_dirty_reads.test b/mysql-test/suite/galera/t/galera_var_dirty_reads.test index 138b7c1c703..85d759e4a27 100644 --- a/mysql-test/suite/galera/t/galera_var_dirty_reads.test +++ b/mysql-test/suite/galera/t/galera_var_dirty_reads.test @@ -47,8 +47,10 @@ SET @@session.wsrep_dirty_reads=OFF; --error ER_UNKNOWN_COM_ERROR SELECT i, variable_name, variable_value FROM t1, information_schema.session_variables WHERE variable_name LIKE "wsrep_dirty_reads" AND i = 1; +--error ER_UNKNOWN_COM_ERROR SELECT 1; +--error ER_UNKNOWN_COM_ERROR USE information_schema; SELECT * FROM information_schema.session_variables WHERE variable_name LIKE "wsrep_dirty_reads"; diff --git a/mysql-test/suite/galera/t/galera_var_reject_queries.test b/mysql-test/suite/galera/t/galera_var_reject_queries.test index b89cae78aa6..b1af9d8aa2b 100644 --- a/mysql-test/suite/galera/t/galera_var_reject_queries.test +++ b/mysql-test/suite/galera/t/galera_var_reject_queries.test @@ -23,7 +23,7 @@ SELECT * FROM t1; SET GLOBAL wsrep_reject_queries = ALL_KILL; --connection node_1a ---error ER_CONNECTION_KILLED +--error ER_CONNECTION_KILLED,2013 SELECT * FROM t1; --connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1 From bc2e7d7889e35f30390d1ef8653f6ac9c038b5b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Tue, 20 Mar 2018 12:10:17 +0200 Subject: [PATCH 087/139] Fix test case MW-329. --- mysql-test/suite/galera/r/MW-329.result | 1 + mysql-test/suite/galera/t/MW-329.test | 2 ++ 2 files changed, 3 insertions(+) diff --git a/mysql-test/suite/galera/r/MW-329.result b/mysql-test/suite/galera/r/MW-329.result index a79ba598d69..4666d131c7d 100644 --- a/mysql-test/suite/galera/r/MW-329.result +++ b/mysql-test/suite/galera/r/MW-329.result @@ -19,3 +19,4 @@ VARIABLE_VALUE > 0 DROP PROCEDURE proc_insert; DROP TABLE t1; CALL mtr.add_suppression("conflict state 3 after post commit"); +set global innodb_status_output=Default; diff --git a/mysql-test/suite/galera/t/MW-329.test b/mysql-test/suite/galera/t/MW-329.test index d9f9a787442..bf045832113 100644 --- a/mysql-test/suite/galera/t/MW-329.test +++ b/mysql-test/suite/galera/t/MW-329.test @@ -83,3 +83,5 @@ DROP TABLE t1; # Due to MW-330, Multiple "conflict state 3 after post commit" warnings if table is dropped while SP is running CALL mtr.add_suppression("conflict state 3 after post commit"); + +set global innodb_status_output=Default; \ No newline at end of file From eee73ddfbb29816320c9fc78c8ff1012cac6567a Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Tue, 20 Mar 2018 17:51:57 +0530 Subject: [PATCH 088/139] MDEV-12255 innodb_prefix_index_cluster_optimization hits debug build assert on UTF-8 columns Problem: ======= (1) Multi-byte character cases are not considered during prefix index cluster optimization check. It leads to fetch of improper results during read operation. (2) Strict assert in row_sel_field_store_in_mysql_format_func and it asserts for prefix index record to mysql conversion. Solution: ======== (1) Consider the case of multi-byte character during prefix index cluster optimization check. (2) Relax the assert in row_sel_field_store_in_mysql_format_func to allow prefix index record to mysql format conversion. The patch is taken from https://github.com/laurynas-biveinis/percona-server/commit/1eee538087ffcf121c37f844b447ba5480faf081 --- .../r/fast_prefix_index_fetch_innodb.result | 361 +++++++++- .../t/fast_prefix_index_fetch_innodb.test | 664 ++++++++++++++++-- storage/innobase/row/row0sel.cc | 183 +++-- storage/xtradb/row/row0sel.cc | 180 +++-- 4 files changed, 1155 insertions(+), 233 deletions(-) diff --git a/mysql-test/r/fast_prefix_index_fetch_innodb.result b/mysql-test/r/fast_prefix_index_fetch_innodb.result index 92af85f7fdb..c6d96389b08 100644 --- a/mysql-test/r/fast_prefix_index_fetch_innodb.result +++ b/mysql-test/r/fast_prefix_index_fetch_innodb.result @@ -30,73 +30,372 @@ id fake_id bigfield 33 1033 yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy 128 1128 zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz # Baseline sanity check: 0, 0. +select "no-op query"; no-op query no-op query -cluster_lookups_matched -1 -cluster_lookups_avoided_matched -1 +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 # Eligible for optimization. +select id, bigfield from prefixinno where bigfield = repeat('d', 31); id bigfield 31 ddddddddddddddddddddddddddddddd -cluster_lookups_matched -1 -cluster_lookups_avoided_matched +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided 1 # Eligible for optimization, access via fake_id only. +select id, bigfield from prefixinno where fake_id = 1031; id bigfield 31 ddddddddddddddddddddddddddddddd -cluster_lookups_matched -1 -cluster_lookups_avoided_matched +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided 1 # Not eligible for optimization, access via fake_id of big row. +select id, bigfield from prefixinno where fake_id = 1033; id bigfield 33 yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy -cluster_lookups_matched -1 -cluster_lookups_avoided_matched +select @cluster_lookups; +@cluster_lookups 1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 # Not eligible for optimization. +select id, bigfield from prefixinno where bigfield = repeat('x', 32); id bigfield 32 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx -cluster_lookups_matched -1 -cluster_lookups_avoided_matched +select @cluster_lookups; +@cluster_lookups 1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 # Not eligible for optimization. +select id, bigfield from prefixinno where bigfield = repeat('y', 33); id bigfield 33 yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy -cluster_lookups_matched -1 -cluster_lookups_avoided_matched +select @cluster_lookups; +@cluster_lookups 1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 # Eligible, should not increment lookup counter. +select id, bigfield from prefixinno where bigfield = repeat('b', 8); id bigfield 8 bbbbbbbb -cluster_lookups_matched -1 -cluster_lookups_avoided_matched +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided 1 # Eligible, should not increment lookup counter. +select id, bigfield from prefixinno where bigfield = repeat('c', 24); id bigfield 24 cccccccccccccccccccccccc -cluster_lookups_matched -1 -cluster_lookups_avoided_matched +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided 1 # Should increment lookup counter. +select id, bigfield from prefixinno where bigfield = repeat('z', 128); id bigfield 128 zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz -cluster_lookups_matched -1 -cluster_lookups_avoided_matched +select @cluster_lookups; +@cluster_lookups 1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 # Disable optimization, confirm we still increment counter. +set global innodb_prefix_index_cluster_optimization = OFF; +select id, bigfield from prefixinno where fake_id = 1033; id bigfield 33 yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy -cluster_lookups_matched +select @cluster_lookups; +@cluster_lookups 1 -cluster_lookups_avoided_matched +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +drop table prefixinno; +# Multi-byte handling case +set global innodb_prefix_index_cluster_optimization = ON; +SET NAMES utf8mb4; +CREATE TABLE t1( +f1 varchar(10) CHARACTER SET UTF8MB4 COLLATE UTF8MB4_BIN, +INDEX (f1(3)))ENGINE=INNODB; +INSERT INTO t1 VALUES('a'), ('cccc'), ('až'), ('cčc'), ('ggᵷg'), ('¢¢'); +INSERT INTO t1 VALUES('தமிழ்'), ('🐱🌑'), ('🌒'), ('🌑'); +INSERT INTO t1 VALUES('😊me'), ('eu€'), ('ls¢'); +# Eligible - record length is shorter than prefix +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'a'; +f1 +a +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided 1 -# make test suite happy by cleaning up our mess +# Not eligible - record length longer than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like 'c%'; +f1 +cccc +cčc +select @cluster_lookups; +@cluster_lookups +3 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +# Eligible - record length shorter than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'až'; +f1 +až +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +1 +# Not eligible - record length longer than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'தமிழ்'; +f1 +தமிழ் +select @cluster_lookups; +@cluster_lookups +1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +# Not eligible - record length longer than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like 'ggᵷ%'; +f1 +ggᵷg +select @cluster_lookups; +@cluster_lookups +1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +# Not eligible - record length longer than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '😊%'; +f1 +😊me +select @cluster_lookups; +@cluster_lookups +1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +# Not eligible - record length longer than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'ls¢'; +f1 +ls¢ +select @cluster_lookups; +@cluster_lookups +1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +# Eligible - record length shorter than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '¢¢%'; +f1 +¢¢ +select @cluster_lookups; +@cluster_lookups +1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +1 +# Eligible - record length shorter than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🐱🌑%'; +f1 +🐱🌑 +select @cluster_lookups; +@cluster_lookups +1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +1 +# Not eligible - record length longer than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🌑%'; +f1 +🌑 +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +2 +# Not eligible - record length longer than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🌒%'; +f1 +🌒 +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +2 +DROP TABLE t1; +# Multi-byte with minimum character length > 1 bytes +CREATE TABLE t1( +f1 varchar(10) CHARACTER SET UTF16 COLLATE UTF16_BIN, +INDEX (f1(3)))ENGINE=INNODB; +INSERT INTO t1 VALUES('a'), ('cccc'), ('až'), ('cčc'), ('ggᵷg'), ('¢¢'); +INSERT INTO t1 VALUES('தமிழ்'), ('🐱🌑'), ('🌒'), ('🌑'); +INSERT INTO t1 VALUES('😊me'), ('eu€'), ('ls¢'); +# Eligible - record length is shorter than prefix +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'a'; +f1 +a +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +1 +# Not eligible - record length longer than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like 'c%'; +f1 +cccc +cčc +select @cluster_lookups; +@cluster_lookups +3 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +# Eligible - record length shorter than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'až'; +f1 +až +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +1 +# Not eligible - record length longer than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'தமிழ்'; +f1 +தமிழ் +select @cluster_lookups; +@cluster_lookups +1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +# Not eligible - record length longer than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like 'ggᵷ%'; +f1 +ggᵷg +select @cluster_lookups; +@cluster_lookups +2 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +# Not eligible - record length longer than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '😊%'; +f1 +😊me +select @cluster_lookups; +@cluster_lookups +1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +# Not eligible - record length longer than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'ls¢'; +f1 +ls¢ +select @cluster_lookups; +@cluster_lookups +1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +# Eligible - record length shorter than prefix length +SELECT f1 FROM t1 FORCE INDEX(`f1`) WHERE f1 like '¢¢%'; +f1 +¢¢ +select @cluster_lookups; +@cluster_lookups +1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +1 +# Eligible - record length shorter than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🐱🌑%'; +f1 +🐱🌑 +select @cluster_lookups; +@cluster_lookups +2 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +# Eligible - record length is shorter than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🌑%'; +f1 +🌑 +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +2 +# Eligible - record length is shorter than prefix length +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🌒%'; +f1 +🌒 +select @cluster_lookups; +@cluster_lookups +1 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +1 +DROP TABLE t1; +CREATE TABLE t1( +col1 INT, +col2 BLOB DEFAULT NULL, +INDEX `idx1`(col2(4), col1))ENGINE=INNODB; +INSERT INTO t1 VALUES (2, 'test'), (3, repeat('test1', 2000)); +INSERT INTO t1(col1) VALUES(1); +# Eligible - record length is shorter than prefix length +SELECT col1 FROM t1 FORCE INDEX (`idx1`) WHERE col2 is NULL; +col1 +1 +select @cluster_lookups; +@cluster_lookups +0 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +1 +# Not eligible - record length longer than prefix index +SELECT col1 FROM t1 FORCE INDEX (`idx1`) WHERE col2 like 'test1%'; +col1 +3 +select @cluster_lookups; +@cluster_lookups +2 +select @cluster_lookups_avoided; +@cluster_lookups_avoided +0 +DROP TABLE t1; +set global innodb_prefix_index_cluster_optimization = OFF; diff --git a/mysql-test/t/fast_prefix_index_fetch_innodb.test b/mysql-test/t/fast_prefix_index_fetch_innodb.test index e563e65ec2a..c3b3440d82d 100644 --- a/mysql-test/t/fast_prefix_index_fetch_innodb.test +++ b/mysql-test/t/fast_prefix_index_fetch_innodb.test @@ -31,120 +31,638 @@ select * from prefixinno; let $show_count_statement = show status like 'innodb_secondary_index_triggered_cluster_reads'; let $show_opt_statement = show status like 'innodb_secondary_index_triggered_cluster_reads_avoided'; ---disable_query_log - --echo # Baseline sanity check: 0, 0. ---let $base_count = query_get_value($show_count_statement, Value, 1) ---let $base_opt = query_get_value($show_opt_statement, Value, 1) +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + select "no-op query"; ---let $count = query_get_value($show_count_statement, Value, 1) + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log eval select $count - $base_count into @cluster_lookups; -select @cluster_lookups = 0 as cluster_lookups_matched; ---let $opt = query_get_value($show_opt_statement, Value, 1) -eval select $opt - $base_opt into @cluster_lookups; -select @cluster_lookups = 0 as cluster_lookups_avoided_matched; +eval select $opt - $base_opt into @cluster_lookups_avoided; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; --echo # Eligible for optimization. ---let $base_count = query_get_value($show_count_statement, Value, 1) ---let $base_opt = query_get_value($show_opt_statement, Value, 1) +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + select id, bigfield from prefixinno where bigfield = repeat('d', 31); ---let $count = query_get_value($show_count_statement, Value, 1) + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log eval select $count - $base_count into @cluster_lookups; -select @cluster_lookups = 0 as cluster_lookups_matched; ---let $opt = query_get_value($show_opt_statement, Value, 1) -eval select $opt - $base_opt into @cluster_lookups; -select @cluster_lookups = 1 as cluster_lookups_avoided_matched; +eval select $opt - $base_opt into @cluster_lookups_avoided; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; --echo # Eligible for optimization, access via fake_id only. ---let $base_count = query_get_value($show_count_statement, Value, 1) ---let $base_opt = query_get_value($show_opt_statement, Value, 1) +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + select id, bigfield from prefixinno where fake_id = 1031; ---let $count = query_get_value($show_count_statement, Value, 1) + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log eval select $count - $base_count into @cluster_lookups; -select @cluster_lookups = 0 as cluster_lookups_matched; ---let $opt = query_get_value($show_opt_statement, Value, 1) -eval select $opt - $base_opt into @cluster_lookups; -select @cluster_lookups = 1 as cluster_lookups_avoided_matched; +eval select $opt - $base_opt into @cluster_lookups_avoided; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; --echo # Not eligible for optimization, access via fake_id of big row. ---let $base_count = query_get_value($show_count_statement, Value, 1) ---let $base_opt = query_get_value($show_opt_statement, Value, 1) +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + select id, bigfield from prefixinno where fake_id = 1033; ---let $count = query_get_value($show_count_statement, Value, 1) + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log eval select $count - $base_count into @cluster_lookups; -select @cluster_lookups = 1 as cluster_lookups_matched; ---let $opt = query_get_value($show_opt_statement, Value, 1) -eval select $opt - $base_opt into @cluster_lookups; -select @cluster_lookups = 0 as cluster_lookups_avoided_matched; +eval select $opt - $base_opt into @cluster_lookups_avoided; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; --echo # Not eligible for optimization. ---let $base_count = query_get_value($show_count_statement, Value, 1) ---let $base_opt = query_get_value($show_opt_statement, Value, 1) +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + select id, bigfield from prefixinno where bigfield = repeat('x', 32); ---let $count = query_get_value($show_count_statement, Value, 1) + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log eval select $count - $base_count into @cluster_lookups; -select @cluster_lookups = 1 as cluster_lookups_matched; ---let $opt = query_get_value($show_opt_statement, Value, 1) -eval select $opt - $base_opt into @cluster_lookups; -select @cluster_lookups = 0 as cluster_lookups_avoided_matched; +eval select $opt - $base_opt into @cluster_lookups_avoided; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; --echo # Not eligible for optimization. ---let $base_count = query_get_value($show_count_statement, Value, 1) ---let $base_opt = query_get_value($show_opt_statement, Value, 1) +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + select id, bigfield from prefixinno where bigfield = repeat('y', 33); ---let $count = query_get_value($show_count_statement, Value, 1) + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log eval select $count - $base_count into @cluster_lookups; -select @cluster_lookups = 1 as cluster_lookups_matched; ---let $opt = query_get_value($show_opt_statement, Value, 1) -eval select $opt - $base_opt into @cluster_lookups; -select @cluster_lookups = 0 as cluster_lookups_avoided_matched; +eval select $opt - $base_opt into @cluster_lookups_avoided; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; --echo # Eligible, should not increment lookup counter. ---let $base_count = query_get_value($show_count_statement, Value, 1) ---let $base_opt = query_get_value($show_opt_statement, Value, 1) +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + select id, bigfield from prefixinno where bigfield = repeat('b', 8); ---let $count = query_get_value($show_count_statement, Value, 1) + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log eval select $count - $base_count into @cluster_lookups; -select @cluster_lookups = 0 as cluster_lookups_matched; ---let $opt = query_get_value($show_opt_statement, Value, 1) -eval select $opt - $base_opt into @cluster_lookups; -select @cluster_lookups = 1 as cluster_lookups_avoided_matched; +eval select $opt - $base_opt into @cluster_lookups_avoided; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; --echo # Eligible, should not increment lookup counter. ---let $base_count = query_get_value($show_count_statement, Value, 1) ---let $base_opt = query_get_value($show_opt_statement, Value, 1) +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + select id, bigfield from prefixinno where bigfield = repeat('c', 24); ---let $count = query_get_value($show_count_statement, Value, 1) + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log eval select $count - $base_count into @cluster_lookups; -select @cluster_lookups = 0 as cluster_lookups_matched; ---let $opt = query_get_value($show_opt_statement, Value, 1) -eval select $opt - $base_opt into @cluster_lookups; -select @cluster_lookups = 1 as cluster_lookups_avoided_matched; +eval select $opt - $base_opt into @cluster_lookups_avoided; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; --echo # Should increment lookup counter. ---let $base_count = query_get_value($show_count_statement, Value, 1) ---let $base_opt = query_get_value($show_opt_statement, Value, 1) +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + select id, bigfield from prefixinno where bigfield = repeat('z', 128); ---let $count = query_get_value($show_count_statement, Value, 1) + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log eval select $count - $base_count into @cluster_lookups; -select @cluster_lookups = 1 as cluster_lookups_matched; ---let $opt = query_get_value($show_opt_statement, Value, 1) -eval select $opt - $base_opt into @cluster_lookups; -select @cluster_lookups = 0 as cluster_lookups_avoided_matched; +eval select $opt - $base_opt into @cluster_lookups_avoided; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; --echo # Disable optimization, confirm we still increment counter. ---let $base_count = query_get_value($show_count_statement, Value, 1) ---let $base_opt = query_get_value($show_opt_statement, Value, 1) +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + set global innodb_prefix_index_cluster_optimization = OFF; select id, bigfield from prefixinno where fake_id = 1033; ---let $count = query_get_value($show_count_statement, Value, 1) + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log eval select $count - $base_count into @cluster_lookups; -select @cluster_lookups = 1 as cluster_lookups_matched; ---let $opt = query_get_value($show_opt_statement, Value, 1) -eval select $opt - $base_opt into @cluster_lookups; -select @cluster_lookups = 0 as cluster_lookups_avoided_matched; +eval select $opt - $base_opt into @cluster_lookups_avoided; +--enable_query_log +select @cluster_lookups; +select @cluster_lookups_avoided; ---echo # make test suite happy by cleaning up our mess drop table prefixinno; + +--echo # Multi-byte handling case + +set global innodb_prefix_index_cluster_optimization = ON; +SET NAMES utf8mb4; +CREATE TABLE t1( + f1 varchar(10) CHARACTER SET UTF8MB4 COLLATE UTF8MB4_BIN, + INDEX (f1(3)))ENGINE=INNODB; + +INSERT INTO t1 VALUES('a'), ('cccc'), ('až'), ('cčc'), ('ggᵷg'), ('¢¢'); +INSERT INTO t1 VALUES('தமிழ்'), ('🐱🌑'), ('🌒'), ('🌑'); +INSERT INTO t1 VALUES('😊me'), ('eu€'), ('ls¢'); + +--echo # Eligible - record length is shorter than prefix +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'a'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix length +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like 'c%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Eligible - record length shorter than prefix length +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'až'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'தமிழ்'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like 'ggᵷ%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '😊%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'ls¢'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Eligible - record length shorter than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '¢¢%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Eligible - record length shorter than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🐱🌑%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🌑%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🌒%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +DROP TABLE t1; + +--echo # Multi-byte with minimum character length > 1 bytes + +CREATE TABLE t1( + f1 varchar(10) CHARACTER SET UTF16 COLLATE UTF16_BIN, + INDEX (f1(3)))ENGINE=INNODB; + +INSERT INTO t1 VALUES('a'), ('cccc'), ('až'), ('cčc'), ('ggᵷg'), ('¢¢'); +INSERT INTO t1 VALUES('தமிழ்'), ('🐱🌑'), ('🌒'), ('🌑'); +INSERT INTO t1 VALUES('😊me'), ('eu€'), ('ls¢'); + +--echo # Eligible - record length is shorter than prefix +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'a'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix length +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like 'c%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Eligible - record length shorter than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'až'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'தமிழ்'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like 'ggᵷ%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '😊%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 = 'ls¢'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Eligible - record length shorter than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX(`f1`) WHERE f1 like '¢¢%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Eligible - record length shorter than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🐱🌑%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Eligible - record length is shorter than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🌑%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Eligible - record length is shorter than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT f1 FROM t1 FORCE INDEX (`f1`) WHERE f1 like '🌒%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +DROP TABLE t1; + +CREATE TABLE t1( + col1 INT, + col2 BLOB DEFAULT NULL, + INDEX `idx1`(col2(4), col1))ENGINE=INNODB; +INSERT INTO t1 VALUES (2, 'test'), (3, repeat('test1', 2000)); +INSERT INTO t1(col1) VALUES(1); + +--echo # Eligible - record length is shorter than prefix length + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT col1 FROM t1 FORCE INDEX (`idx1`) WHERE col2 is NULL; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +--echo # Not eligible - record length longer than prefix index + +let $base_count = query_get_value($show_count_statement, Value, 1); +let $base_opt = query_get_value($show_opt_statement, Value, 1); + +SELECT col1 FROM t1 FORCE INDEX (`idx1`) WHERE col2 like 'test1%'; + +let $count = query_get_value($show_count_statement, Value, 1); +let $opt = query_get_value($show_opt_statement, Value, 1); + +--disable_query_log +eval set @cluster_lookups = $count - $base_count; +eval set @cluster_lookups_avoided = $opt - $base_opt; +--enable_query_log + +select @cluster_lookups; +select @cluster_lookups_avoided; + +DROP TABLE t1; set global innodb_prefix_index_cluster_optimization = OFF; diff --git a/storage/innobase/row/row0sel.cc b/storage/innobase/row/row0sel.cc index 3cf7bc6ee80..7af788973f2 100644 --- a/storage/innobase/row/row0sel.cc +++ b/storage/innobase/row/row0sel.cc @@ -2707,7 +2707,9 @@ row_sel_field_store_in_mysql_format_func( || !(templ->mysql_col_len % templ->mbmaxlen)); ut_ad(len * templ->mbmaxlen >= templ->mysql_col_len || (field_no == templ->icp_rec_field_no - && field->prefix_len > 0)); + && field->prefix_len > 0) + || templ->rec_field_is_prefix); + ut_ad(!(field->prefix_len % templ->mbmaxlen)); if (templ->mbminlen == 1 && templ->mbmaxlen != 1) { @@ -3667,6 +3669,118 @@ row_search_idx_cond_check( return(result); } +/** Return the record field length in characters. +@param[in] col table column of the field +@param[in] field_no field number +@param[in] rec physical record +@param[in] offsets field offsets in the physical record +@return field length in characters. */ +static +size_t +rec_field_len_in_chars( + const dict_col_t* col, + const ulint field_no, + const rec_t* rec, + const ulint* offsets) +{ + const ulint cset = dtype_get_charset_coll(col->prtype); + const CHARSET_INFO* cs = all_charsets[cset]; + ulint rec_field_len; + const char* rec_field = reinterpret_cast( + rec_get_nth_field( + rec, offsets, field_no, &rec_field_len)); + + if (UNIV_UNLIKELY(!cs)) { + ib_logf(IB_LOG_LEVEL_WARN, "Missing collation " ULINTPF, cset); + return SIZE_T_MAX; + } + + return(cs->cset->numchars(cs, rec_field, rec_field + rec_field_len)); +} + +/** Avoid the clustered index lookup if all the following conditions +are true: +1) all columns are in secondary index +2) all values for columns that are prefix-only indexes are shorter +than the prefix size. This optimization can avoid many IOs for certain schemas. +@return true, to avoid clustered index lookup. */ +static +bool row_search_with_covering_prefix( + row_prebuilt_t* prebuilt, + const rec_t* rec, + const ulint* offsets) +{ + const dict_index_t* index = prebuilt->index; + ut_ad(!dict_index_is_clust(index)); + + if (!srv_prefix_index_cluster_optimization) { + return false; + } + + /** Optimization only applicable if there the number of secondary index + fields are greater than or equal to number of clustered index fields. */ + if (prebuilt->n_template > index->n_fields) { + return false; + } + + for (ulint i = 0; i < prebuilt->n_template; i++) { + mysql_row_templ_t* templ = prebuilt->mysql_template + i; + ulint j = templ->rec_prefix_field_no; + + /** Condition (1) : is the field in the index. */ + if (j == ULINT_UNDEFINED) { + return false; + } + + /** Condition (2): If this is a prefix index then + row's value size shorter than prefix length. */ + + if (!templ->rec_field_is_prefix) { + continue; + } + + ulint rec_size = rec_offs_nth_size(offsets, j); + const dict_field_t* field = dict_index_get_nth_field(index, j); + ulint max_chars = field->prefix_len / templ->mbmaxlen; + + ut_a(field->prefix_len > 0); + + if (rec_size < max_chars) { + /* Record in bytes shorter than the index + prefix length in char. */ + continue; + } + + if (rec_size * templ->mbminlen >= field->prefix_len) { + /* Shortest representation string by the + byte length of the record is longer than the + maximum possible index prefix. */ + return false; + } + + size_t num_chars = rec_field_len_in_chars( + field->col, j, rec, offsets); + + if (num_chars >= max_chars) { + /* No of chars to store the record exceeds + the index prefix character length. */ + return false; + } + } + + /* If prefix index optimization condition satisfied then + for all columns above, use rec_prefix_field_no instead of + rec_field_no, and skip the clustered lookup below. */ + for (ulint i = 0; i < prebuilt->n_template; i++) { + mysql_row_templ_t* templ = prebuilt->mysql_template + i; + templ->rec_field_no = templ->rec_prefix_field_no; + ut_a(templ->rec_field_no != ULINT_UNDEFINED); + } + + srv_stats.n_sec_rec_cluster_reads_avoided.inc(); + return true; +} + /********************************************************************//** Searches for rows in the database. This is used in the interface to MySQL. This function opens a cursor, and also implements fetch next @@ -3729,7 +3843,6 @@ row_search_for_mysql( ulint* offsets = offsets_; ibool table_lock_waited = FALSE; byte* next_buf = 0; - ibool use_clustered_index = FALSE; rec_offs_init(offsets_); @@ -4790,69 +4903,10 @@ locks_ok: break; } - /* Get the clustered index record if needed, if we did not do the - search using the clustered index... */ - - use_clustered_index = - (index != clust_index && prebuilt->need_to_access_clustered); - - if (use_clustered_index && srv_prefix_index_cluster_optimization - && prebuilt->n_template <= index->n_fields) { - /* ...but, perhaps avoid the clustered index lookup if - all of the following are true: - 1) all columns are in the secondary index - 2) all values for columns that are prefix-only - indexes are shorter than the prefix size - This optimization can avoid many IOs for certain schemas. - */ - ibool row_contains_all_values = TRUE; - int i; - for (i = 0; i < prebuilt->n_template; i++) { - /* Condition (1) from above: is the field in the - index (prefix or not)? */ - mysql_row_templ_t* templ = - prebuilt->mysql_template + i; - ulint secondary_index_field_no = - templ->rec_prefix_field_no; - if (secondary_index_field_no == ULINT_UNDEFINED) { - row_contains_all_values = FALSE; - break; - } - /* Condition (2) from above: if this is a - prefix, is this row's value size shorter - than the prefix? */ - if (templ->rec_field_is_prefix) { - ulint record_size = rec_offs_nth_size( - offsets, - secondary_index_field_no); - const dict_field_t *field = - dict_index_get_nth_field( - index, - secondary_index_field_no); - ut_a(field->prefix_len > 0); - if (record_size >= field->prefix_len) { - row_contains_all_values = FALSE; - break; - } - } + if (index != clust_index && prebuilt->need_to_access_clustered) { + if (row_search_with_covering_prefix(prebuilt, rec, offsets)) { + goto use_covering_index; } - /* If (1) and (2) were true for all columns above, use - rec_prefix_field_no instead of rec_field_no, and skip - the clustered lookup below. */ - if (row_contains_all_values) { - for (i = 0; i < prebuilt->n_template; i++) { - mysql_row_templ_t* templ = - prebuilt->mysql_template + i; - templ->rec_field_no = - templ->rec_prefix_field_no; - ut_a(templ->rec_field_no != ULINT_UNDEFINED); - } - use_clustered_index = FALSE; - srv_stats.n_sec_rec_cluster_reads_avoided.inc(); - } - } - - if (use_clustered_index) { requires_clust_rec: ut_ad(index != clust_index); /* We use a 'goto' to the preceding label if a consistent @@ -4938,6 +4992,7 @@ requires_clust_rec: } } } else { +use_covering_index: result_rec = rec; } diff --git a/storage/xtradb/row/row0sel.cc b/storage/xtradb/row/row0sel.cc index b81ea60a413..97007c1107c 100644 --- a/storage/xtradb/row/row0sel.cc +++ b/storage/xtradb/row/row0sel.cc @@ -3685,6 +3685,117 @@ row_search_idx_cond_check( return(result); } +/** Return the record field length in characters. +@param[in] col table column of the field +@param[in] field_no field number +@param[in] rec physical record +@param[in] offsets field offsets in the physical record +@return field length in characters. */ +static +size_t +rec_field_len_in_chars( + const dict_col_t* col, + const ulint field_no, + const rec_t* rec, + const ulint* offsets) +{ + const ulint cset = dtype_get_charset_coll(col->prtype); + const CHARSET_INFO* cs = all_charsets[cset]; + ulint rec_field_len; + const char* rec_field = reinterpret_cast( + rec_get_nth_field( + rec, offsets, field_no, &rec_field_len)); + + if (UNIV_UNLIKELY(!cs)) { + ib_logf(IB_LOG_LEVEL_WARN, "Missing collation " ULINTPF, cset); + return SIZE_T_MAX; + } + + return(cs->cset->numchars(cs, rec_field, rec_field + rec_field_len)); +} + + +/** Avoid the clustered index lookup if all the following conditions +are true: +1) all columns are in secondary index +2) all values for columns that are prefix-only indexes are shorter +than the prefix size. This optimization can avoid many IOs for certain schemas. +@return true, to avoid clustered index lookup. */ +static +bool row_search_with_covering_prefix( + row_prebuilt_t* prebuilt, + const rec_t* rec, + const ulint* offsets) +{ + const dict_index_t* index = prebuilt->index; + ut_ad(!dict_index_is_clust(index)); + + if (!srv_prefix_index_cluster_optimization) { + return false; + } + + /** Optimization only applicable if the number of secondary index + fields are greater than or equal to number of clustered index fields. */ + if (prebuilt->n_template > index->n_fields) { + return false; + } + + for (ulint i = 0; i < prebuilt->n_template; i++) { + mysql_row_templ_t* templ = prebuilt->mysql_template + i; + ulint j = templ->rec_prefix_field_no; + + /** Condition (1) : is the field in the index. */ + if (j == ULINT_UNDEFINED) { + return false; + } + + /** Condition (2): If this is a prefix index then + row's value size shorter than prefix length. */ + + if (!templ->rec_field_is_prefix) { + continue; + } + + ulint rec_size = rec_offs_nth_size(offsets, j); + const dict_field_t* field = dict_index_get_nth_field(index, j); + ulint max_chars = field->prefix_len / templ->mbmaxlen; + + ut_a(field->prefix_len > 0); + + if (rec_size < max_chars) { + /* Record in bytes shorter than the index + prefix length in char. */ + continue; + } + + if (rec_size * templ->mbminlen >= field->prefix_len) { + /* Shortest representation string by the + byte length of the record is longer than the + maximum possible index prefix. */ + return false; + } + + + size_t num_chars = rec_field_len_in_chars( + field->col, j, rec, offsets); + + if (num_chars >= max_chars) { + /* No of chars to store the record exceeds + the index prefix character length. */ + return false; + } + } + + for (ulint i = 0; i < prebuilt->n_template; i++) { + mysql_row_templ_t* templ = prebuilt->mysql_template + i; + templ->rec_field_no = templ->rec_prefix_field_no; + ut_a(templ->rec_field_no != ULINT_UNDEFINED); + } + + srv_stats.n_sec_rec_cluster_reads_avoided.inc(); + return true; +} + /********************************************************************//** Searches for rows in the database. This is used in the interface to MySQL. This function opens a cursor, and also implements fetch next @@ -3748,7 +3859,6 @@ row_search_for_mysql( ulint* offsets = offsets_; ibool table_lock_waited = FALSE; byte* next_buf = 0; - bool use_clustered_index = false; rec_offs_init(offsets_); @@ -4810,71 +4920,10 @@ locks_ok: break; } - /* Get the clustered index record if needed, if we did not do the - search using the clustered index... */ - - use_clustered_index = - (index != clust_index && prebuilt->need_to_access_clustered); - - if (use_clustered_index && srv_prefix_index_cluster_optimization - && prebuilt->n_template <= index->n_fields) { - /* ...but, perhaps avoid the clustered index lookup if - all of the following are true: - 1) all columns are in the secondary index - 2) all values for columns that are prefix-only - indexes are shorter than the prefix size - This optimization can avoid many IOs for certain schemas. - */ - bool row_contains_all_values = true; - unsigned int i; - for (i = 0; i < prebuilt->n_template; i++) { - /* Condition (1) from above: is the field in the - index (prefix or not)? */ - const mysql_row_templ_t* templ = - prebuilt->mysql_template + i; - ulint secondary_index_field_no = - templ->rec_prefix_field_no; - if (secondary_index_field_no == ULINT_UNDEFINED) { - row_contains_all_values = false; - break; - } - /* Condition (2) from above: if this is a - prefix, is this row's value size shorter - than the prefix? */ - if (templ->rec_field_is_prefix) { - ulint record_size = rec_offs_nth_size( - offsets, - secondary_index_field_no); - const dict_field_t *field = - dict_index_get_nth_field( - index, - secondary_index_field_no); - ut_a(field->prefix_len > 0); - if (record_size >= field->prefix_len - / templ->mbmaxlen) { - row_contains_all_values = false; - break; - } - } + if (index != clust_index && prebuilt->need_to_access_clustered) { + if (row_search_with_covering_prefix(prebuilt, rec, offsets)) { + goto use_covering_index; } - /* If (1) and (2) were true for all columns above, use - rec_prefix_field_no instead of rec_field_no, and skip - the clustered lookup below. */ - if (row_contains_all_values) { - for (i = 0; i < prebuilt->n_template; i++) { - mysql_row_templ_t* templ = - prebuilt->mysql_template + i; - templ->rec_field_no = - templ->rec_prefix_field_no; - ut_a(templ->rec_field_no != ULINT_UNDEFINED); - } - use_clustered_index = false; - srv_stats.n_sec_rec_cluster_reads_avoided.inc(); - } - } - - if (use_clustered_index) { - requires_clust_rec: ut_ad(index != clust_index); /* We use a 'goto' to the preceding label if a consistent @@ -4960,6 +5009,7 @@ requires_clust_rec: } } } else { +use_covering_index: result_rec = rec; } From 6247c64c2aac8143f436456ca5f8d33da2b0d365 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 19 Mar 2018 13:11:54 +0200 Subject: [PATCH 089/139] MDEV-12396 IMPORT TABLESPACE cleanup Reduce unnecessary inter-module calls for IMPORT TABLESPACE. Move some IMPORT-related code from fil0fil.cc to row0import.cc. PageCallback: Remove. Make AbstractCallback the base class. PageConverter: Define some member functions inline. --- storage/innobase/fil/fil0fil.cc | 485 +-------------------------- storage/innobase/include/fil0fil.h | 101 ------ storage/innobase/row/row0import.cc | 507 +++++++++++++++++++++++++++- storage/xtradb/fil/fil0fil.cc | 485 +-------------------------- storage/xtradb/include/fil0fil.h | 101 ------ storage/xtradb/row/row0import.cc | 512 ++++++++++++++++++++++++++++- 6 files changed, 1008 insertions(+), 1183 deletions(-) diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 0cdbee09548..738afe4ab86 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -25,8 +25,6 @@ Created 10/25/1995 Heikki Tuuri *******************************************************/ #include "fil0fil.h" -#include "fil0pagecompress.h" -#include "fsp0pagecompress.h" #include "fil0crypt.h" #include @@ -49,12 +47,10 @@ Created 10/25/1995 Heikki Tuuri #include "page0zip.h" #include "trx0sys.h" #include "row0mysql.h" -#include "os0file.h" #ifndef UNIV_HOTBACKUP # include "buf0lru.h" # include "ibuf0ibuf.h" # include "sync0sync.h" -# include "os0sync.h" #else /* !UNIV_HOTBACKUP */ # include "srv0srv.h" static ulint srv_data_read, srv_data_written; @@ -696,7 +692,7 @@ add_size: space->size += node->size; } - ulint atomic_writes = fsp_flags_get_atomic_writes(space->flags); + ulint atomic_writes = FSP_FLAGS_GET_ATOMIC_WRITES(space->flags); /* printf("Opening file %s\n", node->name); */ @@ -3921,7 +3917,6 @@ fil_open_single_table_tablespace( fsp_open_info remote; ulint tablespaces_found = 0; ulint valid_tablespaces_found = 0; - ulint atomic_writes = 0; fil_space_crypt_t* crypt_data = NULL; #ifdef UNIV_SYNC_DEBUG @@ -3936,7 +3931,7 @@ fil_open_single_table_tablespace( } ut_ad(fsp_flags_is_valid(flags & ~FSP_FLAGS_MEM_MASK, id)); - atomic_writes = fsp_flags_get_atomic_writes(flags); + const ulint atomic_writes = FSP_FLAGS_GET_ATOMIC_WRITES(flags); memset(&def, 0, sizeof(def)); memset(&dict, 0, sizeof(dict)); @@ -5841,7 +5836,8 @@ fil_io( } else if (type == OS_FILE_WRITE) { ut_ad(!srv_read_only_mode); srv_stats.data_written.add(len); - if (fil_page_is_index_page((byte *)buf)) { + if (mach_read_from_2(static_cast(buf) + + FIL_PAGE_TYPE) == FIL_PAGE_INDEX) { srv_stats.index_pages_written.inc(); } else { srv_stats.non_index_pages_written.inc(); @@ -6338,479 +6334,6 @@ fil_close(void) fil_system = NULL; } -/********************************************************************//** -Initializes a buffer control block when the buf_pool is created. */ -static -void -fil_buf_block_init( -/*===============*/ - buf_block_t* block, /*!< in: pointer to control block */ - byte* frame) /*!< in: pointer to buffer frame */ -{ - UNIV_MEM_DESC(frame, UNIV_PAGE_SIZE); - - block->frame = frame; - - block->page.io_fix = BUF_IO_NONE; - /* There are assertions that check for this. */ - block->page.buf_fix_count = 1; - block->page.state = BUF_BLOCK_READY_FOR_USE; - - page_zip_des_init(&block->page.zip); -} - -struct fil_iterator_t { - pfs_os_file_t file; /*!< File handle */ - const char* filepath; /*!< File path name */ - os_offset_t start; /*!< From where to start */ - os_offset_t end; /*!< Where to stop */ - os_offset_t file_size; /*!< File size in bytes */ - ulint page_size; /*!< Page size */ - ulint n_io_buffers; /*!< Number of pages to use - for IO */ - byte* io_buffer; /*!< Buffer to use for IO */ - fil_space_crypt_t *crypt_data; /*!< Crypt data (if encrypted) */ - byte* crypt_io_buffer; /*!< IO buffer when encrypted */ -}; - -/********************************************************************//** -TODO: This can be made parallel trivially by chunking up the file and creating -a callback per thread. . Main benefit will be to use multiple CPUs for -checksums and compressed tables. We have to do compressed tables block by -block right now. Secondly we need to decompress/compress and copy too much -of data. These are CPU intensive. - -Iterate over all the pages in the tablespace. -@param iter - Tablespace iterator -@param block - block to use for IO -@param callback - Callback to inspect and update page contents -@retval DB_SUCCESS or error code */ -static -dberr_t -fil_iterate( -/*========*/ - const fil_iterator_t& iter, - buf_block_t* block, - PageCallback& callback) -{ - os_offset_t offset; - ulint page_no = 0; - ulint space_id = callback.get_space_id(); - ulint n_bytes = iter.n_io_buffers * iter.page_size; - - ut_ad(!srv_read_only_mode); - - /* TODO: For compressed tables we do a lot of useless - copying for non-index pages. Unfortunately, it is - required by buf_zip_decompress() */ - const bool row_compressed = callback.get_zip_size() > 0; - - for (offset = iter.start; offset < iter.end; offset += n_bytes) { - - byte* io_buffer = iter.io_buffer; - - block->frame = io_buffer; - - if (row_compressed) { - page_zip_des_init(&block->page.zip); - page_zip_set_size(&block->page.zip, iter.page_size); - block->page.zip.data = block->frame + UNIV_PAGE_SIZE; - ut_d(block->page.zip.m_external = true); - ut_ad(iter.page_size == callback.get_zip_size()); - - /* Zip IO is done in the compressed page buffer. */ - io_buffer = block->page.zip.data; - } - - /* We have to read the exact number of bytes. Otherwise the - InnoDB IO functions croak on failed reads. */ - - n_bytes = static_cast( - ut_min(static_cast(n_bytes), - iter.end - offset)); - - ut_ad(n_bytes > 0); - ut_ad(!(n_bytes % iter.page_size)); - - const bool encrypted = iter.crypt_data != NULL - && iter.crypt_data->should_encrypt(); - /* Use additional crypt io buffer if tablespace is encrypted */ - byte* const readptr = encrypted - ? iter.crypt_io_buffer : io_buffer; - byte* const writeptr = readptr; - - if (!os_file_read(iter.file, readptr, offset, (ulint) n_bytes)) { - - ib_logf(IB_LOG_LEVEL_ERROR, "os_file_read() failed"); - - return(DB_IO_ERROR); - } - - bool updated = false; - os_offset_t page_off = offset; - ulint n_pages_read = (ulint) n_bytes / iter.page_size; - bool decrypted = false; - - for (ulint i = 0; i < n_pages_read; ++i) { - ulint size = iter.page_size; - dberr_t err = DB_SUCCESS; - byte* src = readptr + (i * size); - byte* dst = io_buffer + (i * size); - bool frame_changed = false; - - ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE); - - const bool page_compressed - = page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED - || page_type == FIL_PAGE_PAGE_COMPRESSED; - - /* If tablespace is encrypted, we need to decrypt - the page. Note that tablespaces are not in - fil_system during import. */ - if (encrypted) { - decrypted = fil_space_decrypt( - iter.crypt_data, - dst, //dst - iter.page_size, - src, // src - &err); // src - - if (err != DB_SUCCESS) { - return(err); - } - - if (decrypted) { - updated = true; - } else { - if (!page_compressed && !row_compressed) { - block->frame = src; - frame_changed = true; - } else { - memcpy(dst, src, size); - } - } - } - - /* If the original page is page_compressed, we need - to decompress page before we can update it. */ - if (page_compressed) { - fil_decompress_page(NULL, dst, ulong(size), - NULL); - updated = true; - } - - buf_block_set_file_page(block, space_id, page_no++); - - if ((err = callback(page_off, block)) != DB_SUCCESS) { - - return(err); - - } else if (!updated) { - updated = buf_block_get_state(block) - == BUF_BLOCK_FILE_PAGE; - } - - buf_block_set_state(block, BUF_BLOCK_NOT_USED); - buf_block_set_state(block, BUF_BLOCK_READY_FOR_USE); - - /* If tablespace is encrypted we use additional - temporary scratch area where pages are read - for decrypting readptr == crypt_io_buffer != io_buffer. - - Destination for decryption is a buffer pool block - block->frame == dst == io_buffer that is updated. - Pages that did not require decryption even when - tablespace is marked as encrypted are not copied - instead block->frame is set to src == readptr. - - For encryption we again use temporary scratch area - writeptr != io_buffer == dst - that is then written to the tablespace - - (1) For normal tables io_buffer == dst == writeptr - (2) For only page compressed tables - io_buffer == dst == writeptr - (3) For encrypted (and page compressed) - readptr != io_buffer == dst != writeptr - */ - - ut_ad(!encrypted && !page_compressed ? - src == dst && dst == writeptr + (i * size):1); - ut_ad(page_compressed && !encrypted ? - src == dst && dst == writeptr + (i * size):1); - ut_ad(encrypted ? - src != dst && dst != writeptr + (i * size):1); - - if (encrypted) { - memcpy(writeptr + (i * size), - row_compressed ? block->page.zip.data : - block->frame, size); - } - - if (frame_changed) { - block->frame = dst; - } - - src = io_buffer + (i * size); - - if (page_compressed) { - ulint len = 0; - - fil_compress_page( - NULL, - src, - NULL, - size, - 0,/* FIXME: compression level */ - 512,/* FIXME: use proper block size */ - encrypted, - &len); - - updated = true; - } - - /* If tablespace is encrypted, encrypt page before we - write it back. Note that we should not encrypt the - buffer that is in buffer pool. */ - /* NOTE: At this stage of IMPORT the - buffer pool is not being used at all! */ - if (decrypted && encrypted) { - byte *dest = writeptr + (i * size); - ulint space = mach_read_from_4( - src + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); - ulint offset = mach_read_from_4(src + FIL_PAGE_OFFSET); - ib_uint64_t lsn = mach_read_from_8(src + FIL_PAGE_LSN); - - byte* tmp = fil_encrypt_buf( - iter.crypt_data, - space, - offset, - lsn, - src, - iter.page_size == UNIV_PAGE_SIZE ? 0 : iter.page_size, - dest); - - if (tmp == src) { - /* TODO: remove unnecessary memcpy's */ - memcpy(dest, src, size); - } - - updated = true; - } - - page_off += iter.page_size; - block->frame += iter.page_size; - } - - /* A page was updated in the set, write back to disk. */ - if (updated - && !os_file_write( - iter.filepath, iter.file, writeptr, - offset, (ulint) n_bytes)) { - - ib_logf(IB_LOG_LEVEL_ERROR, "os_file_write() failed"); - - return(DB_IO_ERROR); - } - } - - return(DB_SUCCESS); -} - -/********************************************************************//** -Iterate over all the pages in the tablespace. -@param table - the table definiton in the server -@param n_io_buffers - number of blocks to read and write together -@param callback - functor that will do the page updates -@return DB_SUCCESS or error code */ -UNIV_INTERN -dberr_t -fil_tablespace_iterate( -/*===================*/ - dict_table_t* table, - ulint n_io_buffers, - PageCallback& callback) -{ - dberr_t err; - pfs_os_file_t file; - char* filepath; - - ut_a(n_io_buffers > 0); - ut_ad(!srv_read_only_mode); - - DBUG_EXECUTE_IF("ib_import_trigger_corruption_1", - return(DB_CORRUPTION);); - - if (DICT_TF_HAS_DATA_DIR(table->flags)) { - dict_get_and_save_data_dir_path(table, false); - ut_a(table->data_dir_path); - - filepath = os_file_make_remote_pathname( - table->data_dir_path, table->name, "ibd"); - } else { - filepath = fil_make_ibd_name(table->name, false); - } - - { - ibool success; - - file = os_file_create_simple_no_error_handling( - innodb_file_data_key, filepath, - OS_FILE_OPEN, OS_FILE_READ_WRITE, &success, FALSE); - - DBUG_EXECUTE_IF("fil_tablespace_iterate_failure", - { - static bool once; - - if (!once || ut_rnd_interval(0, 10) == 5) { - once = true; - success = FALSE; - os_file_close(file); - } - }); - - if (!success) { - /* The following call prints an error message */ - os_file_get_last_error(true); - - ib_logf(IB_LOG_LEVEL_ERROR, - "Trying to import a tablespace, but could not " - "open the tablespace file %s", filepath); - - mem_free(filepath); - - return(DB_TABLESPACE_NOT_FOUND); - - } else { - err = DB_SUCCESS; - } - } - - callback.set_file(filepath, file); - - os_offset_t file_size = os_file_get_size(file); - ut_a(file_size != (os_offset_t) -1); - - /* The block we will use for every physical page */ - buf_block_t block; - - memset(&block, 0x0, sizeof(block)); - - /* Allocate a page to read in the tablespace header, so that we - can determine the page size and zip_size (if it is compressed). - We allocate an extra page in case it is a compressed table. One - page is to ensure alignement. */ - - void* page_ptr = mem_alloc(3 * UNIV_PAGE_SIZE); - byte* page = static_cast(ut_align(page_ptr, UNIV_PAGE_SIZE)); - - fil_buf_block_init(&block, page); - - /* Read the first page and determine the page and zip size. */ - - if (!os_file_read(file, page, 0, UNIV_PAGE_SIZE)) { - - err = DB_IO_ERROR; - - } else if ((err = callback.init(file_size, &block)) == DB_SUCCESS) { - fil_iterator_t iter; - - iter.file = file; - iter.start = 0; - iter.end = file_size; - iter.filepath = filepath; - iter.file_size = file_size; - iter.n_io_buffers = n_io_buffers; - iter.page_size = callback.get_page_size(); - - /* In MariaDB/MySQL 5.6 tablespace does not exist - during import, therefore we can't use space directly - here. */ - ulint crypt_data_offset = fsp_header_get_crypt_offset( - callback.get_zip_size()); - - /* read (optional) crypt data */ - iter.crypt_data = fil_space_read_crypt_data( - 0, page, crypt_data_offset); - - /* Compressed pages can't be optimised for block IO for now. - We do the IMPORT page by page. */ - - if (callback.get_zip_size() > 0) { - iter.n_io_buffers = 1; - ut_a(iter.page_size == callback.get_zip_size()); - } - - /** If tablespace is encrypted, it needs extra buffers */ - if (iter.crypt_data != NULL) { - /* decrease io buffers so that memory - * consumption doesnt double - * note: the +1 is to avoid n_io_buffers getting down to 0 */ - iter.n_io_buffers = (iter.n_io_buffers + 1) / 2; - } - - /** Add an extra page for compressed page scratch area. */ - - void* io_buffer = mem_alloc( - (2 + iter.n_io_buffers) * UNIV_PAGE_SIZE); - - iter.io_buffer = static_cast( - ut_align(io_buffer, UNIV_PAGE_SIZE)); - - void* crypt_io_buffer = NULL; - if (iter.crypt_data != NULL) { - crypt_io_buffer = mem_alloc( - (2 + iter.n_io_buffers) * UNIV_PAGE_SIZE); - iter.crypt_io_buffer = static_cast( - ut_align(crypt_io_buffer, UNIV_PAGE_SIZE)); - } - - err = fil_iterate(iter, &block, callback); - - mem_free(io_buffer); - - if (crypt_io_buffer != NULL) { - mem_free(crypt_io_buffer); - iter.crypt_io_buffer = NULL; - fil_space_destroy_crypt_data(&iter.crypt_data); - } - } - - if (err == DB_SUCCESS) { - - ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk"); - - if (!os_file_flush(file)) { - ib_logf(IB_LOG_LEVEL_INFO, "os_file_flush() failed!"); - err = DB_IO_ERROR; - } else { - ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk - done!"); - } - } - - os_file_close(file); - - mem_free(page_ptr); - mem_free(filepath); - - return(err); -} - -/** -Set the tablespace compressed table size. -@return DB_SUCCESS if it is valie or DB_CORRUPTION if not */ -dberr_t -PageCallback::set_zip_size(const buf_frame_t* page) UNIV_NOTHROW -{ - m_zip_size = fsp_header_get_zip_size(page); - - if (!ut_is_2pow(m_zip_size) || m_zip_size > UNIV_ZIP_SIZE_MAX) { - return(DB_CORRUPTION); - } - - return(DB_SUCCESS); -} - /********************************************************************//** Delete the tablespace file and any related files like .cfg. This should not be called for temporary tables. */ diff --git a/storage/innobase/include/fil0fil.h b/storage/innobase/include/fil0fil.h index 312b09e1f2d..f658f273136 100644 --- a/storage/innobase/include/fil0fil.h +++ b/storage/innobase/include/fil0fil.h @@ -1292,107 +1292,6 @@ fil_delete_file( /*============*/ const char* path); /*!< in: filepath of the ibd tablespace */ -/** Callback functor. */ -struct PageCallback { - - /** - Default constructor */ - PageCallback() - : - m_zip_size(), - m_page_size(), - m_filepath() UNIV_NOTHROW {} - - virtual ~PageCallback() UNIV_NOTHROW {} - - /** - Called for page 0 in the tablespace file at the start. - @param file_size - size of the file in bytes - @param block - contents of the first page in the tablespace file - @retval DB_SUCCESS or error code.*/ - virtual dberr_t init( - os_offset_t file_size, - const buf_block_t* block) UNIV_NOTHROW = 0; - - /** - Called for every page in the tablespace. If the page was not - updated then its state must be set to BUF_PAGE_NOT_USED. For - compressed tables the page descriptor memory will be at offset: - block->frame + UNIV_PAGE_SIZE; - @param offset - physical offset within the file - @param block - block read from file, note it is not from the buffer pool - @retval DB_SUCCESS or error code. */ - virtual dberr_t operator()( - os_offset_t offset, - buf_block_t* block) UNIV_NOTHROW = 0; - - /** - Set the name of the physical file and the file handle that is used - to open it for the file that is being iterated over. - @param filename - then physical name of the tablespace file. - @param file - OS file handle */ - void set_file(const char* filename, pfs_os_file_t file) UNIV_NOTHROW - { - m_file = file; - m_filepath = filename; - } - - /** - @return the space id of the tablespace */ - virtual ulint get_space_id() const UNIV_NOTHROW = 0; - - /** The compressed page size - @return the compressed page size */ - ulint get_zip_size() const - { - return(m_zip_size); - } - - /** - Set the tablespace compressed table size. - @return DB_SUCCESS if it is valie or DB_CORRUPTION if not */ - dberr_t set_zip_size(const buf_frame_t* page) UNIV_NOTHROW; - - /** The compressed page size - @return the compressed page size */ - ulint get_page_size() const - { - return(m_page_size); - } - - /** Compressed table page size */ - ulint m_zip_size; - - /** The tablespace page size. */ - ulint m_page_size; - - /** File handle to the tablespace */ - pfs_os_file_t m_file; - - /** Physical file path. */ - const char* m_filepath; - -protected: - // Disable copying - PageCallback(const PageCallback&); - PageCallback& operator=(const PageCallback&); -}; - -/********************************************************************//** -Iterate over all the pages in the tablespace. -@param table - the table definiton in the server -@param n_io_buffers - number of blocks to read and write together -@param callback - functor that will do the page updates -@return DB_SUCCESS or error code */ -UNIV_INTERN -dberr_t -fil_tablespace_iterate( -/*===================*/ - dict_table_t* table, - ulint n_io_buffers, - PageCallback& callback) - MY_ATTRIBUTE((nonnull, warn_unused_result)); - /*******************************************************************//** Checks if a single-table tablespace for a given table name exists in the tablespace memory cache. diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 020a814c4eb..b8e8e076b68 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -40,6 +40,7 @@ Created 2012-02-08 by Sunny Bains. #include "row0mysql.h" #include "srv0start.h" #include "row0quiesce.h" +#include "fil0pagecompress.h" #include @@ -361,7 +362,8 @@ private: /** Functor that is called for each physical page that is read from the tablespace file. */ -class AbstractCallback : public PageCallback { +class AbstractCallback +{ public: /** Constructor @param trx - covering transaction */ @@ -394,6 +396,47 @@ public: return(get_zip_size() > 0); } + /** + Set the name of the physical file and the file handle that is used + to open it for the file that is being iterated over. + @param filename - then physical name of the tablespace file. + @param file - OS file handle */ + void set_file(const char* filename, pfs_os_file_t file) UNIV_NOTHROW + { + m_file = file; + m_filepath = filename; + } + + /** The compressed page size + @return the compressed page size */ + ulint get_zip_size() const + { + return(m_zip_size); + } + + /** The compressed page size + @return the compressed page size */ + ulint get_page_size() const + { + return(m_page_size); + } + + /** + Called for every page in the tablespace. If the page was not + updated then its state must be set to BUF_PAGE_NOT_USED. For + compressed tables the page descriptor memory will be at offset: + block->frame + UNIV_PAGE_SIZE; + @param offset - physical offset within the file + @param block - block read from file, note it is not from the buffer pool + @retval DB_SUCCESS or error code. */ + virtual dberr_t operator()( + os_offset_t offset, + buf_block_t* block) UNIV_NOTHROW = 0; + + /** + @return the space id of the tablespace */ + virtual ulint get_space_id() const UNIV_NOTHROW = 0; + protected: /** Get the data page depending on the table type, compressed or not. @@ -509,6 +552,18 @@ protected: } protected: + /** Compressed table page size */ + ulint m_zip_size; + + /** The tablespace page size. */ + ulint m_page_size; + + /** File handle to the tablespace */ + pfs_os_file_t m_file; + + /** Physical file path. */ + const char* m_filepath; + /** Covering transaction. */ trx_t* m_trx; @@ -565,9 +620,9 @@ AbstractCallback::init( /* Since we don't know whether it is a compressed table or not, the data is always read into the block->frame. */ - dberr_t err = set_zip_size(block->frame); + m_zip_size = fsp_header_get_zip_size(page); - if (err != DB_SUCCESS) { + if (!ut_is_2pow(m_zip_size) || m_zip_size > UNIV_ZIP_SIZE_MAX) { return(DB_CORRUPTION); } @@ -604,11 +659,7 @@ AbstractCallback::init( m_free_limit = mach_read_from_4(page + FSP_FREE_LIMIT); m_space = mach_read_from_4(page + FSP_HEADER_OFFSET + FSP_SPACE_ID); - if ((err = set_current_xdes(0, page)) != DB_SUCCESS) { - return(err); - } - - return(DB_SUCCESS); + return set_current_xdes(0, page); } /** @@ -1596,6 +1647,7 @@ IndexPurge::purge() UNIV_NOTHROW Constructor * @param cfg - config of table being imported. * @param trx - transaction covering the import */ +inline PageConverter::PageConverter( row_import* cfg, trx_t* trx) @@ -1620,6 +1672,7 @@ Adjust the BLOB reference for a single column that is externally stored @param offsets - column offsets for the record @param i - column ordinal value @return DB_SUCCESS or error code */ +inline dberr_t PageConverter::adjust_cluster_index_blob_column( rec_t* rec, @@ -1672,6 +1725,7 @@ stored columns. @param rec - record to update @param offsets - column offsets for the record @return DB_SUCCESS or error code */ +inline dberr_t PageConverter::adjust_cluster_index_blob_columns( rec_t* rec, @@ -1705,6 +1759,7 @@ BLOB reference, write the new space id. @param rec - record to update @param offsets - column offsets for the record @return DB_SUCCESS or error code */ +inline dberr_t PageConverter::adjust_cluster_index_blob_ref( rec_t* rec, @@ -1728,6 +1783,7 @@ Purge delete-marked records, only if it is possible to do so without re-organising the B+tree. @param offsets - current row offsets. @return true if purge succeeded */ +inline bool PageConverter::purge(const ulint* offsets) UNIV_NOTHROW { @@ -1752,6 +1808,7 @@ Adjust the BLOB references and sys fields for the current record. @param offsets - column offsets for the record @param deleted - true if row is delete marked @return DB_SUCCESS or error code. */ +inline dberr_t PageConverter::adjust_cluster_record( const dict_index_t* index, @@ -1780,6 +1837,7 @@ Update the BLOB refrences and write UNDO log entries for rows that can't be purged optimistically. @param block - block to update @retval DB_SUCCESS or error code */ +inline dberr_t PageConverter::update_records( buf_block_t* block) UNIV_NOTHROW @@ -1845,6 +1903,7 @@ PageConverter::update_records( /** Update the space, index id, trx id. @return DB_SUCCESS or error code */ +inline dberr_t PageConverter::update_index_page( buf_block_t* block) UNIV_NOTHROW @@ -1914,6 +1973,7 @@ PageConverter::update_index_page( Validate the space flags and update tablespace header page. @param block - block read from file, not from the buffer pool. @retval DB_SUCCESS or error code */ +inline dberr_t PageConverter::update_header( buf_block_t* block) UNIV_NOTHROW @@ -1953,6 +2013,7 @@ PageConverter::update_header( Update the page, set the space id, max trx id and index id. @param block - block read from file @retval DB_SUCCESS or error code */ +inline dberr_t PageConverter::update_page( buf_block_t* block, @@ -3423,6 +3484,436 @@ row_import_update_discarded_flag( return(err); } +struct fil_iterator_t { + pfs_os_file_t file; /*!< File handle */ + const char* filepath; /*!< File path name */ + os_offset_t start; /*!< From where to start */ + os_offset_t end; /*!< Where to stop */ + os_offset_t file_size; /*!< File size in bytes */ + ulint page_size; /*!< Page size */ + ulint n_io_buffers; /*!< Number of pages to use + for IO */ + byte* io_buffer; /*!< Buffer to use for IO */ + fil_space_crypt_t *crypt_data; /*!< Crypt data (if encrypted) */ + byte* crypt_io_buffer; /*!< IO buffer when encrypted */ +}; + +/********************************************************************//** +TODO: This can be made parallel trivially by chunking up the file and creating +a callback per thread. . Main benefit will be to use multiple CPUs for +checksums and compressed tables. We have to do compressed tables block by +block right now. Secondly we need to decompress/compress and copy too much +of data. These are CPU intensive. + +Iterate over all the pages in the tablespace. +@param iter - Tablespace iterator +@param block - block to use for IO +@param callback - Callback to inspect and update page contents +@retval DB_SUCCESS or error code */ +static +dberr_t +fil_iterate( +/*========*/ + const fil_iterator_t& iter, + buf_block_t* block, + AbstractCallback& callback) +{ + os_offset_t offset; + ulint page_no = 0; + ulint space_id = callback.get_space_id(); + ulint n_bytes = iter.n_io_buffers * iter.page_size; + + ut_ad(!srv_read_only_mode); + + /* TODO: For compressed tables we do a lot of useless + copying for non-index pages. Unfortunately, it is + required by buf_zip_decompress() */ + const bool row_compressed = callback.get_zip_size() > 0; + + for (offset = iter.start; offset < iter.end; offset += n_bytes) { + + byte* io_buffer = iter.io_buffer; + + block->frame = io_buffer; + + if (row_compressed) { + page_zip_des_init(&block->page.zip); + page_zip_set_size(&block->page.zip, iter.page_size); + block->page.zip.data = block->frame + UNIV_PAGE_SIZE; + ut_d(block->page.zip.m_external = true); + ut_ad(iter.page_size == callback.get_zip_size()); + + /* Zip IO is done in the compressed page buffer. */ + io_buffer = block->page.zip.data; + } + + /* We have to read the exact number of bytes. Otherwise the + InnoDB IO functions croak on failed reads. */ + + n_bytes = ulint(ut_min(os_offset_t(n_bytes), + iter.end - offset)); + + ut_ad(n_bytes > 0); + ut_ad(!(n_bytes % iter.page_size)); + + const bool encrypted = iter.crypt_data != NULL + && iter.crypt_data->should_encrypt(); + /* Use additional crypt io buffer if tablespace is encrypted */ + byte* const readptr = encrypted + ? iter.crypt_io_buffer : io_buffer; + byte* const writeptr = readptr; + + if (!os_file_read(iter.file, readptr, offset, n_bytes)) { + ib_logf(IB_LOG_LEVEL_ERROR, "os_file_read() failed"); + return DB_IO_ERROR; + } + + bool updated = false; + os_offset_t page_off = offset; + ulint n_pages_read = (ulint) n_bytes / iter.page_size; + bool decrypted = false; + + for (ulint i = 0; i < n_pages_read; ++i) { + ulint size = iter.page_size; + dberr_t err = DB_SUCCESS; + byte* src = readptr + (i * size); + byte* dst = io_buffer + (i * size); + bool frame_changed = false; + + ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE); + + const bool page_compressed + = page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED + || page_type == FIL_PAGE_PAGE_COMPRESSED; + + /* If tablespace is encrypted, we need to decrypt + the page. Note that tablespaces are not in + fil_system during import. */ + if (encrypted) { + decrypted = fil_space_decrypt( + iter.crypt_data, + dst, //dst + iter.page_size, + src, // src + &err); + + if (err != DB_SUCCESS) { + return err; + } + + if (decrypted) { + updated = true; + } else { + if (!page_compressed && !row_compressed) { + block->frame = src; + frame_changed = true; + } else { + memcpy(dst, src, size); + } + } + } + + /* If the original page is page_compressed, we need + to decompress it before adjusting further. */ + if (page_compressed) { + fil_decompress_page(NULL, dst, ulong(size), + NULL); + updated = true; + } + + buf_block_set_file_page(block, space_id, page_no++); + + if ((err = callback(page_off, block)) != DB_SUCCESS) { + return err; + } else if (!updated) { + updated = buf_block_get_state(block) + == BUF_BLOCK_FILE_PAGE; + } + + /* If tablespace is encrypted we use additional + temporary scratch area where pages are read + for decrypting readptr == crypt_io_buffer != io_buffer. + + Destination for decryption is a buffer pool block + block->frame == dst == io_buffer that is updated. + Pages that did not require decryption even when + tablespace is marked as encrypted are not copied + instead block->frame is set to src == readptr. + + For encryption we again use temporary scratch area + writeptr != io_buffer == dst + that is then written to the tablespace + + (1) For normal tables io_buffer == dst == writeptr + (2) For only page compressed tables + io_buffer == dst == writeptr + (3) For encrypted (and page compressed) + readptr != io_buffer == dst != writeptr + */ + + ut_ad(!encrypted && !page_compressed ? + src == dst && dst == writeptr + (i * size):1); + ut_ad(page_compressed && !encrypted ? + src == dst && dst == writeptr + (i * size):1); + ut_ad(encrypted ? + src != dst && dst != writeptr + (i * size):1); + + if (encrypted) { + memcpy(writeptr + (i * size), + row_compressed ? block->page.zip.data : + block->frame, size); + } + + if (frame_changed) { + block->frame = dst; + } + + src = io_buffer + (i * size); + + if (page_compressed) { + ulint len = 0; + + fil_compress_page( + NULL, + src, + NULL, + size, + 0,/* FIXME: compression level */ + 512,/* FIXME: use proper block size */ + encrypted, + &len); + + updated = true; + } + + /* If tablespace is encrypted, encrypt page before we + write it back. Note that we should not encrypt the + buffer that is in buffer pool. */ + /* NOTE: At this stage of IMPORT the + buffer pool is not being used at all! */ + if (decrypted && encrypted) { + byte *dest = writeptr + (i * size); + ulint space = mach_read_from_4( + src + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); + ulint offset = mach_read_from_4(src + FIL_PAGE_OFFSET); + ib_uint64_t lsn = mach_read_from_8(src + FIL_PAGE_LSN); + + byte* tmp = fil_encrypt_buf( + iter.crypt_data, + space, + offset, + lsn, + src, + iter.page_size == UNIV_PAGE_SIZE ? 0 : iter.page_size, + dest); + + if (tmp == src) { + /* TODO: remove unnecessary memcpy's */ + memcpy(dest, src, size); + } + + updated = true; + } + + page_off += iter.page_size; + block->frame += iter.page_size; + } + + /* A page was updated in the set, write back to disk. */ + if (updated + && !os_file_write( + iter.filepath, iter.file, writeptr, + offset, (ulint) n_bytes)) { + + ib_logf(IB_LOG_LEVEL_ERROR, "os_file_write() failed"); + return DB_IO_ERROR; + } + } + + return DB_SUCCESS; +} + +/********************************************************************//** +Iterate over all the pages in the tablespace. +@param table - the table definiton in the server +@param n_io_buffers - number of blocks to read and write together +@param callback - functor that will do the page updates +@return DB_SUCCESS or error code */ +static +dberr_t +fil_tablespace_iterate( +/*===================*/ + dict_table_t* table, + ulint n_io_buffers, + AbstractCallback& callback) +{ + dberr_t err; + pfs_os_file_t file; + char* filepath; + + ut_a(n_io_buffers > 0); + ut_ad(!srv_read_only_mode); + + DBUG_EXECUTE_IF("ib_import_trigger_corruption_1", + return(DB_CORRUPTION);); + + if (DICT_TF_HAS_DATA_DIR(table->flags)) { + dict_get_and_save_data_dir_path(table, false); + ut_a(table->data_dir_path); + + filepath = os_file_make_remote_pathname( + table->data_dir_path, table->name, "ibd"); + } else { + filepath = fil_make_ibd_name(table->name, false); + } + + { + ibool success; + + file = os_file_create_simple_no_error_handling( + innodb_file_data_key, filepath, + OS_FILE_OPEN, OS_FILE_READ_WRITE, &success, FALSE); + + DBUG_EXECUTE_IF("fil_tablespace_iterate_failure", + { + static bool once; + + if (!once || ut_rnd_interval(0, 10) == 5) { + once = true; + success = FALSE; + os_file_close(file); + } + }); + + if (!success) { + /* The following call prints an error message */ + os_file_get_last_error(true); + + ib_logf(IB_LOG_LEVEL_ERROR, + "Trying to import a tablespace, but could not " + "open the tablespace file %s", filepath); + + mem_free(filepath); + + return(DB_TABLESPACE_NOT_FOUND); + + } else { + err = DB_SUCCESS; + } + } + + callback.set_file(filepath, file); + + os_offset_t file_size = os_file_get_size(file); + ut_a(file_size != (os_offset_t) -1); + + /* Allocate a page to read in the tablespace header, so that we + can determine the page size and zip_size (if it is compressed). + We allocate an extra page in case it is a compressed table. One + page is to ensure alignement. */ + + void* page_ptr = mem_alloc(3 * UNIV_PAGE_SIZE); + byte* page = static_cast(ut_align(page_ptr, UNIV_PAGE_SIZE)); + + /* The block we will use for every physical page */ + buf_block_t block; + + memset(&block, 0, sizeof block); + block.frame = page; + block.page.io_fix = BUF_IO_NONE; + block.page.buf_fix_count = 1; + block.page.state = BUF_BLOCK_FILE_PAGE; + + /* Read the first page and determine the page and zip size. */ + + if (!os_file_read(file, page, 0, UNIV_PAGE_SIZE)) { + + err = DB_IO_ERROR; + + } else if ((err = callback.init(file_size, &block)) == DB_SUCCESS) { + fil_iterator_t iter; + + iter.file = file; + iter.start = 0; + iter.end = file_size; + iter.filepath = filepath; + iter.file_size = file_size; + iter.n_io_buffers = n_io_buffers; + iter.page_size = callback.get_page_size(); + + /* In MariaDB/MySQL 5.6 tablespace does not exist + during import, therefore we can't use space directly + here. */ + ulint crypt_data_offset = fsp_header_get_crypt_offset( + callback.get_zip_size()); + + /* read (optional) crypt data */ + iter.crypt_data = fil_space_read_crypt_data( + 0, page, crypt_data_offset); + + /* Compressed pages can't be optimised for block IO for now. + We do the IMPORT page by page. */ + + if (callback.get_zip_size() > 0) { + iter.n_io_buffers = 1; + ut_a(iter.page_size == callback.get_zip_size()); + } + + /** If tablespace is encrypted, it needs extra buffers */ + if (iter.crypt_data != NULL) { + /* decrease io buffers so that memory + * consumption doesnt double + * note: the +1 is to avoid n_io_buffers getting down to 0 */ + iter.n_io_buffers = (iter.n_io_buffers + 1) / 2; + } + + /** Add an extra page for compressed page scratch area. */ + + void* io_buffer = mem_alloc( + (2 + iter.n_io_buffers) * UNIV_PAGE_SIZE); + + iter.io_buffer = static_cast( + ut_align(io_buffer, UNIV_PAGE_SIZE)); + + void* crypt_io_buffer = NULL; + if (iter.crypt_data != NULL) { + crypt_io_buffer = mem_alloc( + (2 + iter.n_io_buffers) * UNIV_PAGE_SIZE); + iter.crypt_io_buffer = static_cast( + ut_align(crypt_io_buffer, UNIV_PAGE_SIZE)); + } + + err = fil_iterate(iter, &block, callback); + + mem_free(io_buffer); + + if (crypt_io_buffer != NULL) { + mem_free(crypt_io_buffer); + iter.crypt_io_buffer = NULL; + fil_space_destroy_crypt_data(&iter.crypt_data); + } + } + + if (err == DB_SUCCESS) { + + ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk"); + + if (!os_file_flush(file)) { + ib_logf(IB_LOG_LEVEL_INFO, "os_file_flush() failed!"); + err = DB_IO_ERROR; + } else { + ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk - done!"); + } + } + + os_file_close(file); + + mem_free(page_ptr); + mem_free(filepath); + + return(err); +} + /*****************************************************************//** Imports a tablespace. The space id in the .ibd file must match the space id of the table in the data dictionary. diff --git a/storage/xtradb/fil/fil0fil.cc b/storage/xtradb/fil/fil0fil.cc index e3a5a351edf..a24b319fda6 100644 --- a/storage/xtradb/fil/fil0fil.cc +++ b/storage/xtradb/fil/fil0fil.cc @@ -25,8 +25,6 @@ Created 10/25/1995 Heikki Tuuri *******************************************************/ #include "fil0fil.h" -#include "fil0pagecompress.h" -#include "fsp0pagecompress.h" #include "fil0crypt.h" #include @@ -49,12 +47,10 @@ Created 10/25/1995 Heikki Tuuri #include "page0zip.h" #include "trx0sys.h" #include "row0mysql.h" -#include "os0file.h" #ifndef UNIV_HOTBACKUP # include "buf0lru.h" # include "ibuf0ibuf.h" # include "sync0sync.h" -# include "os0sync.h" #else /* !UNIV_HOTBACKUP */ # include "srv0srv.h" static ulint srv_data_read, srv_data_written; @@ -704,7 +700,7 @@ add_size: space->size += node->size; } - ulint atomic_writes = fsp_flags_get_atomic_writes(space->flags); + ulint atomic_writes = FSP_FLAGS_GET_ATOMIC_WRITES(space->flags); /* printf("Opening file %s\n", node->name); */ @@ -4110,7 +4106,6 @@ fil_open_single_table_tablespace( fsp_open_info remote; ulint tablespaces_found = 0; ulint valid_tablespaces_found = 0; - ulint atomic_writes = 0; fil_space_crypt_t* crypt_data = NULL; #ifdef UNIV_SYNC_DEBUG @@ -4125,7 +4120,7 @@ fil_open_single_table_tablespace( } ut_ad(fsp_flags_is_valid(flags & ~FSP_FLAGS_MEM_MASK, id)); - atomic_writes = fsp_flags_get_atomic_writes(flags); + const ulint atomic_writes = FSP_FLAGS_GET_ATOMIC_WRITES(flags); memset(&def, 0, sizeof(def)); memset(&dict, 0, sizeof(dict)); @@ -6151,7 +6146,8 @@ fil_io( } else if (type == OS_FILE_WRITE) { ut_ad(!srv_read_only_mode); srv_stats.data_written.add(len); - if (fil_page_is_index_page((byte *)buf)) { + if (mach_read_from_2(static_cast(buf) + + FIL_PAGE_TYPE) == FIL_PAGE_INDEX) { srv_stats.index_pages_written.inc(); } else { srv_stats.non_index_pages_written.inc(); @@ -6683,479 +6679,6 @@ fil_close(void) fil_system = NULL; } -/********************************************************************//** -Initializes a buffer control block when the buf_pool is created. */ -static -void -fil_buf_block_init( -/*===============*/ - buf_block_t* block, /*!< in: pointer to control block */ - byte* frame) /*!< in: pointer to buffer frame */ -{ - UNIV_MEM_DESC(frame, UNIV_PAGE_SIZE); - - block->frame = frame; - - block->page.io_fix = BUF_IO_NONE; - /* There are assertions that check for this. */ - block->page.buf_fix_count = 1; - block->page.state = BUF_BLOCK_READY_FOR_USE; - - page_zip_des_init(&block->page.zip); -} - -struct fil_iterator_t { - pfs_os_file_t file; /*!< File handle */ - const char* filepath; /*!< File path name */ - os_offset_t start; /*!< From where to start */ - os_offset_t end; /*!< Where to stop */ - os_offset_t file_size; /*!< File size in bytes */ - ulint page_size; /*!< Page size */ - ulint n_io_buffers; /*!< Number of pages to use - for IO */ - byte* io_buffer; /*!< Buffer to use for IO */ - fil_space_crypt_t *crypt_data; /*!< Crypt data (if encrypted) */ - byte* crypt_io_buffer; /*!< IO buffer when encrypted */ -}; - -/********************************************************************//** -TODO: This can be made parallel trivially by chunking up the file and creating -a callback per thread. . Main benefit will be to use multiple CPUs for -checksums and compressed tables. We have to do compressed tables block by -block right now. Secondly we need to decompress/compress and copy too much -of data. These are CPU intensive. - -Iterate over all the pages in the tablespace. -@param iter - Tablespace iterator -@param block - block to use for IO -@param callback - Callback to inspect and update page contents -@retval DB_SUCCESS or error code */ -static -dberr_t -fil_iterate( -/*========*/ - const fil_iterator_t& iter, - buf_block_t* block, - PageCallback& callback) -{ - os_offset_t offset; - ulint page_no = 0; - ulint space_id = callback.get_space_id(); - ulint n_bytes = iter.n_io_buffers * iter.page_size; - - ut_ad(!srv_read_only_mode); - - /* TODO: For compressed tables we do a lot of useless - copying for non-index pages. Unfortunately, it is - required by buf_zip_decompress() */ - const bool row_compressed = callback.get_zip_size() > 0; - - for (offset = iter.start; offset < iter.end; offset += n_bytes) { - - byte* io_buffer = iter.io_buffer; - - block->frame = io_buffer; - - if (row_compressed) { - page_zip_des_init(&block->page.zip); - page_zip_set_size(&block->page.zip, iter.page_size); - block->page.zip.data = block->frame + UNIV_PAGE_SIZE; - ut_d(block->page.zip.m_external = true); - ut_ad(iter.page_size == callback.get_zip_size()); - - /* Zip IO is done in the compressed page buffer. */ - io_buffer = block->page.zip.data; - } - - /* We have to read the exact number of bytes. Otherwise the - InnoDB IO functions croak on failed reads. */ - - n_bytes = static_cast( - ut_min(static_cast(n_bytes), - iter.end - offset)); - - ut_ad(n_bytes > 0); - ut_ad(!(n_bytes % iter.page_size)); - - const bool encrypted = iter.crypt_data != NULL - && iter.crypt_data->should_encrypt(); - /* Use additional crypt io buffer if tablespace is encrypted */ - byte* const readptr = encrypted - ? iter.crypt_io_buffer : io_buffer; - byte* const writeptr = readptr; - - if (!os_file_read(iter.file, readptr, offset, (ulint) n_bytes)) { - - ib_logf(IB_LOG_LEVEL_ERROR, "os_file_read() failed"); - - return(DB_IO_ERROR); - } - - bool updated = false; - os_offset_t page_off = offset; - ulint n_pages_read = (ulint) n_bytes / iter.page_size; - bool decrypted = false; - - for (ulint i = 0; i < n_pages_read; ++i) { - ulint size = iter.page_size; - dberr_t err = DB_SUCCESS; - byte* src = readptr + (i * size); - byte* dst = io_buffer + (i * size); - bool frame_changed = false; - - ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE); - - const bool page_compressed - = page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED - || page_type == FIL_PAGE_PAGE_COMPRESSED; - - /* If tablespace is encrypted, we need to decrypt - the page. Note that tablespaces are not in - fil_system during import. */ - if (encrypted) { - decrypted = fil_space_decrypt( - iter.crypt_data, - dst, //dst - iter.page_size, - src, // src - &err); // src - - if (err != DB_SUCCESS) { - return(err); - } - - if (decrypted) { - updated = true; - } else { - if (!page_compressed && !row_compressed) { - block->frame = src; - frame_changed = true; - } else { - memcpy(dst, src, size); - } - } - } - - /* If the original page is page_compressed, we need - to decompress page before we can update it. */ - if (page_compressed) { - fil_decompress_page(NULL, dst, ulong(size), - NULL); - updated = true; - } - - buf_block_set_file_page(block, space_id, page_no++); - - if ((err = callback(page_off, block)) != DB_SUCCESS) { - - return(err); - - } else if (!updated) { - updated = buf_block_get_state(block) - == BUF_BLOCK_FILE_PAGE; - } - - buf_block_set_state(block, BUF_BLOCK_NOT_USED); - buf_block_set_state(block, BUF_BLOCK_READY_FOR_USE); - - /* If tablespace is encrypted we use additional - temporary scratch area where pages are read - for decrypting readptr == crypt_io_buffer != io_buffer. - - Destination for decryption is a buffer pool block - block->frame == dst == io_buffer that is updated. - Pages that did not require decryption even when - tablespace is marked as encrypted are not copied - instead block->frame is set to src == readptr. - - For encryption we again use temporary scratch area - writeptr != io_buffer == dst - that is then written to the tablespace - - (1) For normal tables io_buffer == dst == writeptr - (2) For only page compressed tables - io_buffer == dst == writeptr - (3) For encrypted (and page compressed) - readptr != io_buffer == dst != writeptr - */ - - ut_ad(!encrypted && !page_compressed ? - src == dst && dst == writeptr + (i * size):1); - ut_ad(page_compressed && !encrypted ? - src == dst && dst == writeptr + (i * size):1); - ut_ad(encrypted ? - src != dst && dst != writeptr + (i * size):1); - - if (encrypted) { - memcpy(writeptr + (i * size), - row_compressed ? block->page.zip.data : - block->frame, size); - } - - if (frame_changed) { - block->frame = dst; - } - - src = io_buffer + (i * size); - - if (page_compressed) { - ulint len = 0; - - fil_compress_page( - NULL, - src, - NULL, - size, - 0,/* FIXME: compression level */ - 512,/* FIXME: use proper block size */ - encrypted, - &len); - - updated = true; - } - - /* If tablespace is encrypted, encrypt page before we - write it back. Note that we should not encrypt the - buffer that is in buffer pool. */ - /* NOTE: At this stage of IMPORT the - buffer pool is not being used at all! */ - if (decrypted && encrypted) { - byte *dest = writeptr + (i * size); - ulint space = mach_read_from_4( - src + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); - ulint offset = mach_read_from_4(src + FIL_PAGE_OFFSET); - ib_uint64_t lsn = mach_read_from_8(src + FIL_PAGE_LSN); - - byte* tmp = fil_encrypt_buf( - iter.crypt_data, - space, - offset, - lsn, - src, - iter.page_size == UNIV_PAGE_SIZE ? 0 : iter.page_size, - dest); - - if (tmp == src) { - /* TODO: remove unnecessary memcpy's */ - memcpy(dest, src, size); - } - - updated = true; - } - - page_off += iter.page_size; - block->frame += iter.page_size; - } - - /* A page was updated in the set, write back to disk. */ - if (updated - && !os_file_write( - iter.filepath, iter.file, writeptr, - offset, (ulint) n_bytes)) { - - ib_logf(IB_LOG_LEVEL_ERROR, "os_file_write() failed"); - - return(DB_IO_ERROR); - } - } - - return(DB_SUCCESS); -} - -/********************************************************************//** -Iterate over all the pages in the tablespace. -@param table - the table definiton in the server -@param n_io_buffers - number of blocks to read and write together -@param callback - functor that will do the page updates -@return DB_SUCCESS or error code */ -UNIV_INTERN -dberr_t -fil_tablespace_iterate( -/*===================*/ - dict_table_t* table, - ulint n_io_buffers, - PageCallback& callback) -{ - dberr_t err; - pfs_os_file_t file; - char* filepath; - - ut_a(n_io_buffers > 0); - ut_ad(!srv_read_only_mode); - - DBUG_EXECUTE_IF("ib_import_trigger_corruption_1", - return(DB_CORRUPTION);); - - if (DICT_TF_HAS_DATA_DIR(table->flags)) { - dict_get_and_save_data_dir_path(table, false); - ut_a(table->data_dir_path); - - filepath = os_file_make_remote_pathname( - table->data_dir_path, table->name, "ibd"); - } else { - filepath = fil_make_ibd_name(table->name, false); - } - - { - ibool success; - - file = os_file_create_simple_no_error_handling( - innodb_file_data_key, filepath, - OS_FILE_OPEN, OS_FILE_READ_WRITE, &success, FALSE); - - DBUG_EXECUTE_IF("fil_tablespace_iterate_failure", - { - static bool once; - - if (!once || ut_rnd_interval(0, 10) == 5) { - once = true; - success = FALSE; - os_file_close(file); - } - }); - - if (!success) { - /* The following call prints an error message */ - os_file_get_last_error(true); - - ib_logf(IB_LOG_LEVEL_ERROR, - "Trying to import a tablespace, but could not " - "open the tablespace file %s", filepath); - - mem_free(filepath); - - return(DB_TABLESPACE_NOT_FOUND); - - } else { - err = DB_SUCCESS; - } - } - - callback.set_file(filepath, file); - - os_offset_t file_size = os_file_get_size(file); - ut_a(file_size != (os_offset_t) -1); - - /* The block we will use for every physical page */ - buf_block_t block; - - memset(&block, 0x0, sizeof(block)); - - /* Allocate a page to read in the tablespace header, so that we - can determine the page size and zip_size (if it is compressed). - We allocate an extra page in case it is a compressed table. One - page is to ensure alignement. */ - - void* page_ptr = mem_alloc(3 * UNIV_PAGE_SIZE); - byte* page = static_cast(ut_align(page_ptr, UNIV_PAGE_SIZE)); - - fil_buf_block_init(&block, page); - - /* Read the first page and determine the page and zip size. */ - - if (!os_file_read(file, page, 0, UNIV_PAGE_SIZE)) { - - err = DB_IO_ERROR; - - } else if ((err = callback.init(file_size, &block)) == DB_SUCCESS) { - fil_iterator_t iter; - - iter.file = file; - iter.start = 0; - iter.end = file_size; - iter.filepath = filepath; - iter.file_size = file_size; - iter.n_io_buffers = n_io_buffers; - iter.page_size = callback.get_page_size(); - - /* In MariaDB/MySQL 5.6 tablespace does not exist - during import, therefore we can't use space directly - here. */ - ulint crypt_data_offset = fsp_header_get_crypt_offset( - callback.get_zip_size()); - - /* read (optional) crypt data */ - iter.crypt_data = fil_space_read_crypt_data( - 0, page, crypt_data_offset); - - /* Compressed pages can't be optimised for block IO for now. - We do the IMPORT page by page. */ - - if (callback.get_zip_size() > 0) { - iter.n_io_buffers = 1; - ut_a(iter.page_size == callback.get_zip_size()); - } - - /** If tablespace is encrypted, it needs extra buffers */ - if (iter.crypt_data != NULL) { - /* decrease io buffers so that memory - * consumption doesnt double - * note: the +1 is to avoid n_io_buffers getting down to 0 */ - iter.n_io_buffers = (iter.n_io_buffers + 1) / 2; - } - - /** Add an extra page for compressed page scratch area. */ - - void* io_buffer = mem_alloc( - (2 + iter.n_io_buffers) * UNIV_PAGE_SIZE); - - iter.io_buffer = static_cast( - ut_align(io_buffer, UNIV_PAGE_SIZE)); - - void* crypt_io_buffer = NULL; - if (iter.crypt_data != NULL) { - crypt_io_buffer = mem_alloc( - (2 + iter.n_io_buffers) * UNIV_PAGE_SIZE); - iter.crypt_io_buffer = static_cast( - ut_align(crypt_io_buffer, UNIV_PAGE_SIZE)); - } - - err = fil_iterate(iter, &block, callback); - - mem_free(io_buffer); - - if (crypt_io_buffer != NULL) { - mem_free(crypt_io_buffer); - iter.crypt_io_buffer = NULL; - fil_space_destroy_crypt_data(&iter.crypt_data); - } - } - - if (err == DB_SUCCESS) { - - ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk"); - - if (!os_file_flush(file)) { - ib_logf(IB_LOG_LEVEL_INFO, "os_file_flush() failed!"); - err = DB_IO_ERROR; - } else { - ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk - done!"); - } - } - - os_file_close(file); - - mem_free(page_ptr); - mem_free(filepath); - - return(err); -} - -/** -Set the tablespace compressed table size. -@return DB_SUCCESS if it is valie or DB_CORRUPTION if not */ -dberr_t -PageCallback::set_zip_size(const buf_frame_t* page) UNIV_NOTHROW -{ - m_zip_size = fsp_header_get_zip_size(page); - - if (!ut_is_2pow(m_zip_size) || m_zip_size > UNIV_ZIP_SIZE_MAX) { - return(DB_CORRUPTION); - } - - return(DB_SUCCESS); -} - /********************************************************************//** Delete the tablespace file and any related files like .cfg. This should not be called for temporary tables. */ diff --git a/storage/xtradb/include/fil0fil.h b/storage/xtradb/include/fil0fil.h index 8c3bf7d2b06..8a4aa9b7dff 100644 --- a/storage/xtradb/include/fil0fil.h +++ b/storage/xtradb/include/fil0fil.h @@ -1309,107 +1309,6 @@ fil_delete_file( /*============*/ const char* path); /*!< in: filepath of the ibd tablespace */ -/** Callback functor. */ -struct PageCallback { - - /** - Default constructor */ - PageCallback() - : - m_zip_size(), - m_page_size(), - m_filepath() UNIV_NOTHROW {} - - virtual ~PageCallback() UNIV_NOTHROW {} - - /** - Called for page 0 in the tablespace file at the start. - @param file_size - size of the file in bytes - @param block - contents of the first page in the tablespace file - @retval DB_SUCCESS or error code.*/ - virtual dberr_t init( - os_offset_t file_size, - const buf_block_t* block) UNIV_NOTHROW = 0; - - /** - Called for every page in the tablespace. If the page was not - updated then its state must be set to BUF_PAGE_NOT_USED. For - compressed tables the page descriptor memory will be at offset: - block->frame + UNIV_PAGE_SIZE; - @param offset - physical offset within the file - @param block - block read from file, note it is not from the buffer pool - @retval DB_SUCCESS or error code. */ - virtual dberr_t operator()( - os_offset_t offset, - buf_block_t* block) UNIV_NOTHROW = 0; - - /** - Set the name of the physical file and the file handle that is used - to open it for the file that is being iterated over. - @param filename - then physical name of the tablespace file. - @param file - OS file handle */ - void set_file(const char* filename, pfs_os_file_t file) UNIV_NOTHROW - { - m_file = file; - m_filepath = filename; - } - - /** - @return the space id of the tablespace */ - virtual ulint get_space_id() const UNIV_NOTHROW = 0; - - /** The compressed page size - @return the compressed page size */ - ulint get_zip_size() const - { - return(m_zip_size); - } - - /** - Set the tablespace compressed table size. - @return DB_SUCCESS if it is valie or DB_CORRUPTION if not */ - dberr_t set_zip_size(const buf_frame_t* page) UNIV_NOTHROW; - - /** The compressed page size - @return the compressed page size */ - ulint get_page_size() const - { - return(m_page_size); - } - - /** Compressed table page size */ - ulint m_zip_size; - - /** The tablespace page size. */ - ulint m_page_size; - - /** File handle to the tablespace */ - pfs_os_file_t m_file; - - /** Physical file path. */ - const char* m_filepath; - -protected: - // Disable copying - PageCallback(const PageCallback&); - PageCallback& operator=(const PageCallback&); -}; - -/********************************************************************//** -Iterate over all the pages in the tablespace. -@param table - the table definiton in the server -@param n_io_buffers - number of blocks to read and write together -@param callback - functor that will do the page updates -@return DB_SUCCESS or error code */ -UNIV_INTERN -dberr_t -fil_tablespace_iterate( -/*===================*/ - dict_table_t* table, - ulint n_io_buffers, - PageCallback& callback) - MY_ATTRIBUTE((nonnull, warn_unused_result)); - /*******************************************************************//** Checks if a single-table tablespace for a given table name exists in the tablespace memory cache. diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc index 8a06342cafd..b8e8e076b68 100644 --- a/storage/xtradb/row/row0import.cc +++ b/storage/xtradb/row/row0import.cc @@ -40,7 +40,7 @@ Created 2012-02-08 by Sunny Bains. #include "row0mysql.h" #include "srv0start.h" #include "row0quiesce.h" -#include "buf0buf.h" +#include "fil0pagecompress.h" #include @@ -362,7 +362,8 @@ private: /** Functor that is called for each physical page that is read from the tablespace file. */ -class AbstractCallback : public PageCallback { +class AbstractCallback +{ public: /** Constructor @param trx - covering transaction */ @@ -395,6 +396,47 @@ public: return(get_zip_size() > 0); } + /** + Set the name of the physical file and the file handle that is used + to open it for the file that is being iterated over. + @param filename - then physical name of the tablespace file. + @param file - OS file handle */ + void set_file(const char* filename, pfs_os_file_t file) UNIV_NOTHROW + { + m_file = file; + m_filepath = filename; + } + + /** The compressed page size + @return the compressed page size */ + ulint get_zip_size() const + { + return(m_zip_size); + } + + /** The compressed page size + @return the compressed page size */ + ulint get_page_size() const + { + return(m_page_size); + } + + /** + Called for every page in the tablespace. If the page was not + updated then its state must be set to BUF_PAGE_NOT_USED. For + compressed tables the page descriptor memory will be at offset: + block->frame + UNIV_PAGE_SIZE; + @param offset - physical offset within the file + @param block - block read from file, note it is not from the buffer pool + @retval DB_SUCCESS or error code. */ + virtual dberr_t operator()( + os_offset_t offset, + buf_block_t* block) UNIV_NOTHROW = 0; + + /** + @return the space id of the tablespace */ + virtual ulint get_space_id() const UNIV_NOTHROW = 0; + protected: /** Get the data page depending on the table type, compressed or not. @@ -510,6 +552,18 @@ protected: } protected: + /** Compressed table page size */ + ulint m_zip_size; + + /** The tablespace page size. */ + ulint m_page_size; + + /** File handle to the tablespace */ + pfs_os_file_t m_file; + + /** Physical file path. */ + const char* m_filepath; + /** Covering transaction. */ trx_t* m_trx; @@ -566,9 +620,9 @@ AbstractCallback::init( /* Since we don't know whether it is a compressed table or not, the data is always read into the block->frame. */ - dberr_t err = set_zip_size(block->frame); + m_zip_size = fsp_header_get_zip_size(page); - if (err != DB_SUCCESS) { + if (!ut_is_2pow(m_zip_size) || m_zip_size > UNIV_ZIP_SIZE_MAX) { return(DB_CORRUPTION); } @@ -605,11 +659,7 @@ AbstractCallback::init( m_free_limit = mach_read_from_4(page + FSP_FREE_LIMIT); m_space = mach_read_from_4(page + FSP_HEADER_OFFSET + FSP_SPACE_ID); - if ((err = set_current_xdes(0, page)) != DB_SUCCESS) { - return(err); - } - - return(DB_SUCCESS); + return set_current_xdes(0, page); } /** @@ -1316,8 +1366,8 @@ row_import::match_schema( return(DB_ERROR); } else if (m_table->n_cols != m_n_cols) { ib_errf(thd, IB_LOG_LEVEL_ERROR, ER_TABLE_SCHEMA_MISMATCH, - "Number of columns don't match, table has %u " - "columns but the tablespace meta-data file has " + "Number of columns don't match, table has %u" + " columns but the tablespace meta-data file has " ULINTPF " columns", m_table->n_cols, m_n_cols); @@ -1597,6 +1647,7 @@ IndexPurge::purge() UNIV_NOTHROW Constructor * @param cfg - config of table being imported. * @param trx - transaction covering the import */ +inline PageConverter::PageConverter( row_import* cfg, trx_t* trx) @@ -1621,6 +1672,7 @@ Adjust the BLOB reference for a single column that is externally stored @param offsets - column offsets for the record @param i - column ordinal value @return DB_SUCCESS or error code */ +inline dberr_t PageConverter::adjust_cluster_index_blob_column( rec_t* rec, @@ -1673,6 +1725,7 @@ stored columns. @param rec - record to update @param offsets - column offsets for the record @return DB_SUCCESS or error code */ +inline dberr_t PageConverter::adjust_cluster_index_blob_columns( rec_t* rec, @@ -1706,6 +1759,7 @@ BLOB reference, write the new space id. @param rec - record to update @param offsets - column offsets for the record @return DB_SUCCESS or error code */ +inline dberr_t PageConverter::adjust_cluster_index_blob_ref( rec_t* rec, @@ -1729,6 +1783,7 @@ Purge delete-marked records, only if it is possible to do so without re-organising the B+tree. @param offsets - current row offsets. @return true if purge succeeded */ +inline bool PageConverter::purge(const ulint* offsets) UNIV_NOTHROW { @@ -1753,6 +1808,7 @@ Adjust the BLOB references and sys fields for the current record. @param offsets - column offsets for the record @param deleted - true if row is delete marked @return DB_SUCCESS or error code. */ +inline dberr_t PageConverter::adjust_cluster_record( const dict_index_t* index, @@ -1781,6 +1837,7 @@ Update the BLOB refrences and write UNDO log entries for rows that can't be purged optimistically. @param block - block to update @retval DB_SUCCESS or error code */ +inline dberr_t PageConverter::update_records( buf_block_t* block) UNIV_NOTHROW @@ -1846,6 +1903,7 @@ PageConverter::update_records( /** Update the space, index id, trx id. @return DB_SUCCESS or error code */ +inline dberr_t PageConverter::update_index_page( buf_block_t* block) UNIV_NOTHROW @@ -1915,6 +1973,7 @@ PageConverter::update_index_page( Validate the space flags and update tablespace header page. @param block - block read from file, not from the buffer pool. @retval DB_SUCCESS or error code */ +inline dberr_t PageConverter::update_header( buf_block_t* block) UNIV_NOTHROW @@ -1954,6 +2013,7 @@ PageConverter::update_header( Update the page, set the space id, max trx id and index id. @param block - block read from file @retval DB_SUCCESS or error code */ +inline dberr_t PageConverter::update_page( buf_block_t* block, @@ -3424,6 +3484,436 @@ row_import_update_discarded_flag( return(err); } +struct fil_iterator_t { + pfs_os_file_t file; /*!< File handle */ + const char* filepath; /*!< File path name */ + os_offset_t start; /*!< From where to start */ + os_offset_t end; /*!< Where to stop */ + os_offset_t file_size; /*!< File size in bytes */ + ulint page_size; /*!< Page size */ + ulint n_io_buffers; /*!< Number of pages to use + for IO */ + byte* io_buffer; /*!< Buffer to use for IO */ + fil_space_crypt_t *crypt_data; /*!< Crypt data (if encrypted) */ + byte* crypt_io_buffer; /*!< IO buffer when encrypted */ +}; + +/********************************************************************//** +TODO: This can be made parallel trivially by chunking up the file and creating +a callback per thread. . Main benefit will be to use multiple CPUs for +checksums and compressed tables. We have to do compressed tables block by +block right now. Secondly we need to decompress/compress and copy too much +of data. These are CPU intensive. + +Iterate over all the pages in the tablespace. +@param iter - Tablespace iterator +@param block - block to use for IO +@param callback - Callback to inspect and update page contents +@retval DB_SUCCESS or error code */ +static +dberr_t +fil_iterate( +/*========*/ + const fil_iterator_t& iter, + buf_block_t* block, + AbstractCallback& callback) +{ + os_offset_t offset; + ulint page_no = 0; + ulint space_id = callback.get_space_id(); + ulint n_bytes = iter.n_io_buffers * iter.page_size; + + ut_ad(!srv_read_only_mode); + + /* TODO: For compressed tables we do a lot of useless + copying for non-index pages. Unfortunately, it is + required by buf_zip_decompress() */ + const bool row_compressed = callback.get_zip_size() > 0; + + for (offset = iter.start; offset < iter.end; offset += n_bytes) { + + byte* io_buffer = iter.io_buffer; + + block->frame = io_buffer; + + if (row_compressed) { + page_zip_des_init(&block->page.zip); + page_zip_set_size(&block->page.zip, iter.page_size); + block->page.zip.data = block->frame + UNIV_PAGE_SIZE; + ut_d(block->page.zip.m_external = true); + ut_ad(iter.page_size == callback.get_zip_size()); + + /* Zip IO is done in the compressed page buffer. */ + io_buffer = block->page.zip.data; + } + + /* We have to read the exact number of bytes. Otherwise the + InnoDB IO functions croak on failed reads. */ + + n_bytes = ulint(ut_min(os_offset_t(n_bytes), + iter.end - offset)); + + ut_ad(n_bytes > 0); + ut_ad(!(n_bytes % iter.page_size)); + + const bool encrypted = iter.crypt_data != NULL + && iter.crypt_data->should_encrypt(); + /* Use additional crypt io buffer if tablespace is encrypted */ + byte* const readptr = encrypted + ? iter.crypt_io_buffer : io_buffer; + byte* const writeptr = readptr; + + if (!os_file_read(iter.file, readptr, offset, n_bytes)) { + ib_logf(IB_LOG_LEVEL_ERROR, "os_file_read() failed"); + return DB_IO_ERROR; + } + + bool updated = false; + os_offset_t page_off = offset; + ulint n_pages_read = (ulint) n_bytes / iter.page_size; + bool decrypted = false; + + for (ulint i = 0; i < n_pages_read; ++i) { + ulint size = iter.page_size; + dberr_t err = DB_SUCCESS; + byte* src = readptr + (i * size); + byte* dst = io_buffer + (i * size); + bool frame_changed = false; + + ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE); + + const bool page_compressed + = page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED + || page_type == FIL_PAGE_PAGE_COMPRESSED; + + /* If tablespace is encrypted, we need to decrypt + the page. Note that tablespaces are not in + fil_system during import. */ + if (encrypted) { + decrypted = fil_space_decrypt( + iter.crypt_data, + dst, //dst + iter.page_size, + src, // src + &err); + + if (err != DB_SUCCESS) { + return err; + } + + if (decrypted) { + updated = true; + } else { + if (!page_compressed && !row_compressed) { + block->frame = src; + frame_changed = true; + } else { + memcpy(dst, src, size); + } + } + } + + /* If the original page is page_compressed, we need + to decompress it before adjusting further. */ + if (page_compressed) { + fil_decompress_page(NULL, dst, ulong(size), + NULL); + updated = true; + } + + buf_block_set_file_page(block, space_id, page_no++); + + if ((err = callback(page_off, block)) != DB_SUCCESS) { + return err; + } else if (!updated) { + updated = buf_block_get_state(block) + == BUF_BLOCK_FILE_PAGE; + } + + /* If tablespace is encrypted we use additional + temporary scratch area where pages are read + for decrypting readptr == crypt_io_buffer != io_buffer. + + Destination for decryption is a buffer pool block + block->frame == dst == io_buffer that is updated. + Pages that did not require decryption even when + tablespace is marked as encrypted are not copied + instead block->frame is set to src == readptr. + + For encryption we again use temporary scratch area + writeptr != io_buffer == dst + that is then written to the tablespace + + (1) For normal tables io_buffer == dst == writeptr + (2) For only page compressed tables + io_buffer == dst == writeptr + (3) For encrypted (and page compressed) + readptr != io_buffer == dst != writeptr + */ + + ut_ad(!encrypted && !page_compressed ? + src == dst && dst == writeptr + (i * size):1); + ut_ad(page_compressed && !encrypted ? + src == dst && dst == writeptr + (i * size):1); + ut_ad(encrypted ? + src != dst && dst != writeptr + (i * size):1); + + if (encrypted) { + memcpy(writeptr + (i * size), + row_compressed ? block->page.zip.data : + block->frame, size); + } + + if (frame_changed) { + block->frame = dst; + } + + src = io_buffer + (i * size); + + if (page_compressed) { + ulint len = 0; + + fil_compress_page( + NULL, + src, + NULL, + size, + 0,/* FIXME: compression level */ + 512,/* FIXME: use proper block size */ + encrypted, + &len); + + updated = true; + } + + /* If tablespace is encrypted, encrypt page before we + write it back. Note that we should not encrypt the + buffer that is in buffer pool. */ + /* NOTE: At this stage of IMPORT the + buffer pool is not being used at all! */ + if (decrypted && encrypted) { + byte *dest = writeptr + (i * size); + ulint space = mach_read_from_4( + src + FIL_PAGE_ARCH_LOG_NO_OR_SPACE_ID); + ulint offset = mach_read_from_4(src + FIL_PAGE_OFFSET); + ib_uint64_t lsn = mach_read_from_8(src + FIL_PAGE_LSN); + + byte* tmp = fil_encrypt_buf( + iter.crypt_data, + space, + offset, + lsn, + src, + iter.page_size == UNIV_PAGE_SIZE ? 0 : iter.page_size, + dest); + + if (tmp == src) { + /* TODO: remove unnecessary memcpy's */ + memcpy(dest, src, size); + } + + updated = true; + } + + page_off += iter.page_size; + block->frame += iter.page_size; + } + + /* A page was updated in the set, write back to disk. */ + if (updated + && !os_file_write( + iter.filepath, iter.file, writeptr, + offset, (ulint) n_bytes)) { + + ib_logf(IB_LOG_LEVEL_ERROR, "os_file_write() failed"); + return DB_IO_ERROR; + } + } + + return DB_SUCCESS; +} + +/********************************************************************//** +Iterate over all the pages in the tablespace. +@param table - the table definiton in the server +@param n_io_buffers - number of blocks to read and write together +@param callback - functor that will do the page updates +@return DB_SUCCESS or error code */ +static +dberr_t +fil_tablespace_iterate( +/*===================*/ + dict_table_t* table, + ulint n_io_buffers, + AbstractCallback& callback) +{ + dberr_t err; + pfs_os_file_t file; + char* filepath; + + ut_a(n_io_buffers > 0); + ut_ad(!srv_read_only_mode); + + DBUG_EXECUTE_IF("ib_import_trigger_corruption_1", + return(DB_CORRUPTION);); + + if (DICT_TF_HAS_DATA_DIR(table->flags)) { + dict_get_and_save_data_dir_path(table, false); + ut_a(table->data_dir_path); + + filepath = os_file_make_remote_pathname( + table->data_dir_path, table->name, "ibd"); + } else { + filepath = fil_make_ibd_name(table->name, false); + } + + { + ibool success; + + file = os_file_create_simple_no_error_handling( + innodb_file_data_key, filepath, + OS_FILE_OPEN, OS_FILE_READ_WRITE, &success, FALSE); + + DBUG_EXECUTE_IF("fil_tablespace_iterate_failure", + { + static bool once; + + if (!once || ut_rnd_interval(0, 10) == 5) { + once = true; + success = FALSE; + os_file_close(file); + } + }); + + if (!success) { + /* The following call prints an error message */ + os_file_get_last_error(true); + + ib_logf(IB_LOG_LEVEL_ERROR, + "Trying to import a tablespace, but could not " + "open the tablespace file %s", filepath); + + mem_free(filepath); + + return(DB_TABLESPACE_NOT_FOUND); + + } else { + err = DB_SUCCESS; + } + } + + callback.set_file(filepath, file); + + os_offset_t file_size = os_file_get_size(file); + ut_a(file_size != (os_offset_t) -1); + + /* Allocate a page to read in the tablespace header, so that we + can determine the page size and zip_size (if it is compressed). + We allocate an extra page in case it is a compressed table. One + page is to ensure alignement. */ + + void* page_ptr = mem_alloc(3 * UNIV_PAGE_SIZE); + byte* page = static_cast(ut_align(page_ptr, UNIV_PAGE_SIZE)); + + /* The block we will use for every physical page */ + buf_block_t block; + + memset(&block, 0, sizeof block); + block.frame = page; + block.page.io_fix = BUF_IO_NONE; + block.page.buf_fix_count = 1; + block.page.state = BUF_BLOCK_FILE_PAGE; + + /* Read the first page and determine the page and zip size. */ + + if (!os_file_read(file, page, 0, UNIV_PAGE_SIZE)) { + + err = DB_IO_ERROR; + + } else if ((err = callback.init(file_size, &block)) == DB_SUCCESS) { + fil_iterator_t iter; + + iter.file = file; + iter.start = 0; + iter.end = file_size; + iter.filepath = filepath; + iter.file_size = file_size; + iter.n_io_buffers = n_io_buffers; + iter.page_size = callback.get_page_size(); + + /* In MariaDB/MySQL 5.6 tablespace does not exist + during import, therefore we can't use space directly + here. */ + ulint crypt_data_offset = fsp_header_get_crypt_offset( + callback.get_zip_size()); + + /* read (optional) crypt data */ + iter.crypt_data = fil_space_read_crypt_data( + 0, page, crypt_data_offset); + + /* Compressed pages can't be optimised for block IO for now. + We do the IMPORT page by page. */ + + if (callback.get_zip_size() > 0) { + iter.n_io_buffers = 1; + ut_a(iter.page_size == callback.get_zip_size()); + } + + /** If tablespace is encrypted, it needs extra buffers */ + if (iter.crypt_data != NULL) { + /* decrease io buffers so that memory + * consumption doesnt double + * note: the +1 is to avoid n_io_buffers getting down to 0 */ + iter.n_io_buffers = (iter.n_io_buffers + 1) / 2; + } + + /** Add an extra page for compressed page scratch area. */ + + void* io_buffer = mem_alloc( + (2 + iter.n_io_buffers) * UNIV_PAGE_SIZE); + + iter.io_buffer = static_cast( + ut_align(io_buffer, UNIV_PAGE_SIZE)); + + void* crypt_io_buffer = NULL; + if (iter.crypt_data != NULL) { + crypt_io_buffer = mem_alloc( + (2 + iter.n_io_buffers) * UNIV_PAGE_SIZE); + iter.crypt_io_buffer = static_cast( + ut_align(crypt_io_buffer, UNIV_PAGE_SIZE)); + } + + err = fil_iterate(iter, &block, callback); + + mem_free(io_buffer); + + if (crypt_io_buffer != NULL) { + mem_free(crypt_io_buffer); + iter.crypt_io_buffer = NULL; + fil_space_destroy_crypt_data(&iter.crypt_data); + } + } + + if (err == DB_SUCCESS) { + + ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk"); + + if (!os_file_flush(file)) { + ib_logf(IB_LOG_LEVEL_INFO, "os_file_flush() failed!"); + err = DB_IO_ERROR; + } else { + ib_logf(IB_LOG_LEVEL_INFO, "Sync to disk - done!"); + } + } + + os_file_close(file); + + mem_free(page_ptr); + mem_free(filepath); + + return(err); +} + /*****************************************************************//** Imports a tablespace. The space id in the .ibd file must match the space id of the table in the data dictionary. From eaa7bfb59fdea130dbc5072ad677a5ba34152d53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 20 Mar 2018 12:55:00 +0200 Subject: [PATCH 090/139] MDEV-12396 IMPORT TABLESPACE: Simplify validation fil_iterate(): Validate the pages directly. import_page_status_t, PageConverter::validate(): Remove. AbstractCallback::filename(): New accessor. AbstractCallback::is_interrupted(): Replaces periodic_check(). PageConverter::trigger_corruption(): Remove. --- storage/innobase/row/row0import.cc | 215 ++++++++--------------------- storage/xtradb/row/row0import.cc | 215 ++++++++--------------------- 2 files changed, 112 insertions(+), 318 deletions(-) diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index b8e8e076b68..24dc3a66f1f 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -421,6 +421,8 @@ public: return(m_page_size); } + const char* filename() const { return m_filepath; } + /** Called for every page in the tablespace. If the page was not updated then its state must be set to BUF_PAGE_NOT_USED. For @@ -437,6 +439,8 @@ public: @return the space id of the tablespace */ virtual ulint get_space_id() const UNIV_NOTHROW = 0; + bool is_interrupted() const { return trx_is_interrupted(m_trx); } + protected: /** Get the data page depending on the table type, compressed or not. @@ -451,18 +455,6 @@ protected: return(buf_block_get_frame(block)); } - /** Check for session interrupt. If required we could - even flush to disk here every N pages. - @retval DB_SUCCESS or error code */ - dberr_t periodic_check() UNIV_NOTHROW - { - if (trx_is_interrupted(m_trx)) { - return(DB_INTERRUPTED); - } - - return(DB_SUCCESS); - } - /** Get the physical offset of the extent descriptor within the page. @param page_no - page number of the extent descriptor @@ -732,11 +724,7 @@ FetchIndexRootPages::operator() ( os_offset_t offset, buf_block_t* block) UNIV_NOTHROW { - dberr_t err; - - if ((err = periodic_check()) != DB_SUCCESS) { - return(err); - } + if (is_interrupted()) return DB_INTERRUPTED; const page_t* page = get_frame(block); @@ -749,9 +737,9 @@ FetchIndexRootPages::operator() ( block->page.offset, (ulint) (offset / m_page_size)); - err = DB_CORRUPTION; + return DB_CORRUPTION; } else if (page_type == FIL_PAGE_TYPE_XDES) { - err = set_current_xdes(block->page.offset, page); + return set_current_xdes(block->page.offset, page); } else if (page_type == FIL_PAGE_INDEX && !is_free(block->page.offset) && is_root_page(page)) { @@ -776,7 +764,7 @@ FetchIndexRootPages::operator() ( } } - return(err); + return DB_SUCCESS; } /** @@ -900,14 +888,6 @@ public: os_offset_t offset, buf_block_t* block) UNIV_NOTHROW; private: - - /** Status returned by PageConverter::validate() */ - enum import_page_status_t { - IMPORT_PAGE_STATUS_OK, /*!< Page is OK */ - IMPORT_PAGE_STATUS_ALL_ZERO, /*!< Page is all zeros */ - IMPORT_PAGE_STATUS_CORRUPTED /*!< Page is corrupted */ - }; - /** Update the page, set the space id, max trx id and index id. @param block - block read from file @@ -917,17 +897,6 @@ private: buf_block_t* block, ulint& page_type) UNIV_NOTHROW; -#if defined UNIV_DEBUG - /** - @return true error condition is enabled. */ - bool trigger_corruption() UNIV_NOTHROW - { - return(false); - } - #else -#define trigger_corruption() (false) -#endif /* UNIV_DEBUG */ - /** Update the space, index id, trx id. @param block - block to convert @@ -940,15 +909,6 @@ private: @retval DB_SUCCESS or error code */ dberr_t update_records(buf_block_t* block) UNIV_NOTHROW; - /** - Validate the page, check for corruption. - @param offset - physical offset within file. - @param page - page read from file. - @return 0 on success, 1 if all zero, 2 if corrupted */ - import_page_status_t validate( - os_offset_t offset, - buf_block_t* page) UNIV_NOTHROW; - /** Validate the space flags and update tablespace header page. @param block - block read from file, not from the buffer pool. @@ -2075,80 +2035,15 @@ PageConverter::update_page( return(DB_CORRUPTION); } -/** -Validate the page -@param offset - physical offset within file. -@param page - page read from file. -@return status */ -PageConverter::import_page_status_t -PageConverter::validate( - os_offset_t offset, - buf_block_t* block) UNIV_NOTHROW -{ - buf_frame_t* page = get_frame(block); - - /* Check that the page number corresponds to the offset in - the file. Flag as corrupt if it doesn't. Disable the check - for LSN in buf_page_is_corrupted() */ - - if (buf_page_is_corrupted(false, page, get_zip_size(), NULL) - || (page_get_page_no(page) != offset / m_page_size - && page_get_page_no(page) != 0)) { - - return(IMPORT_PAGE_STATUS_CORRUPTED); - - } else if (offset > 0 && page_get_page_no(page) == 0) { - ulint checksum; - - checksum = mach_read_from_4(page + FIL_PAGE_SPACE_OR_CHKSUM); - if (checksum != 0) { - /* Checksum check passed in buf_page_is_corrupted(). */ - ib_logf(IB_LOG_LEVEL_WARN, - "%s: Page %lu checksum " ULINTPF - " should be zero.", - m_filepath, (ulong) (offset / m_page_size), - checksum); - } - - const byte* b = page + FIL_PAGE_OFFSET; - const byte* e = page + m_page_size - - FIL_PAGE_END_LSN_OLD_CHKSUM; - - /* If the page number is zero and offset > 0 then - the entire page MUST consist of zeroes. If not then - we flag it as corrupt. */ - - while (b != e) { - - if (*b++ && !trigger_corruption()) { - return(IMPORT_PAGE_STATUS_CORRUPTED); - } - } - - /* The page is all zero: do nothing. */ - return(IMPORT_PAGE_STATUS_ALL_ZERO); - } - - return(IMPORT_PAGE_STATUS_OK); -} - /** Called for every page in the tablespace. If the page was not updated then its state must be set to BUF_PAGE_NOT_USED. -@param offset - physical offset within the file @param block - block read from file, note it is not from the buffer pool @retval DB_SUCCESS or error code. */ dberr_t -PageConverter::operator() ( - os_offset_t offset, - buf_block_t* block) UNIV_NOTHROW +PageConverter::operator() (os_offset_t, buf_block_t* block) UNIV_NOTHROW { ulint page_type; - dberr_t err = DB_SUCCESS; - - if ((err = periodic_check()) != DB_SUCCESS) { - return(err); - } if (is_compressed_table()) { m_page_zip_ptr = &block->page.zip; @@ -2156,16 +2051,8 @@ PageConverter::operator() ( ut_ad(m_page_zip_ptr == 0); } - switch(validate(offset, block)) { - case IMPORT_PAGE_STATUS_OK: - - /* We have to decompress the compressed pages before - we can work on them */ - - if ((err = update_page(block, page_type)) != DB_SUCCESS) { - break; - } - + dberr_t err = update_page(block, page_type); + if (err == DB_SUCCESS) { /* Note: For compressed pages this function will write to the zip descriptor and for uncompressed pages it will write to page (ie. the block->frame). Therefore the caller should write @@ -2187,23 +2074,9 @@ PageConverter::operator() ( get_frame(block), get_zip_size(), m_current_lsn); } - - break; - - case IMPORT_PAGE_STATUS_ALL_ZERO: - /* The page is all zero: leave it as is. */ - break; - - case IMPORT_PAGE_STATUS_CORRUPTED: - - ib_logf(IB_LOG_LEVEL_WARN, - "%s: Page %lu at offset " UINT64PF " looks corrupted.", - m_filepath, (ulong) (offset / m_page_size), offset); - - err = DB_CORRUPTION; } - /* If we already had and old page with matching number + /* If we already had an old page with matching number in the buffer pool, evict it now, because we no longer evict the pages on DISCARD TABLESPACE. */ buf_page_get_gen(get_space_id(), get_zip_size(), block->page.offset, @@ -3519,8 +3392,6 @@ fil_iterate( AbstractCallback& callback) { os_offset_t offset; - ulint page_no = 0; - ulint space_id = callback.get_space_id(); ulint n_bytes = iter.n_io_buffers * iter.page_size; ut_ad(!srv_read_only_mode); @@ -3531,6 +3402,9 @@ fil_iterate( const bool row_compressed = callback.get_zip_size() > 0; for (offset = iter.start; offset < iter.end; offset += n_bytes) { + if (callback.is_interrupted()) { + return DB_INTERRUPTED; + } byte* io_buffer = iter.io_buffer; @@ -3572,30 +3446,45 @@ fil_iterate( os_offset_t page_off = offset; ulint n_pages_read = (ulint) n_bytes / iter.page_size; bool decrypted = false; + const ulint size = iter.page_size; + block->page.offset = page_off / size; - for (ulint i = 0; i < n_pages_read; ++i) { - ulint size = iter.page_size; + for (ulint i = 0; i < n_pages_read; + ++i, page_off += size, block->frame += size, + block->page.offset++) { dberr_t err = DB_SUCCESS; byte* src = readptr + (i * size); byte* dst = io_buffer + (i * size); bool frame_changed = false; - ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE); - const bool page_compressed - = page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED + = page_type + == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED || page_type == FIL_PAGE_PAGE_COMPRESSED; + const ulint page_no = page_get_page_no(src); + if (!page_no && page_off) { + const ulint* b = reinterpret_cast + (src); + const ulint* const e = b + size / sizeof *b; + do { + if (*b++) { + goto page_corrupted; + } + } while (b != e); + + /* Proceed to the next page, + because this one is all zero. */ + continue; + } + + if (page_no != page_off / size) { + goto page_corrupted; + } - /* If tablespace is encrypted, we need to decrypt - the page. Note that tablespaces are not in - fil_system during import. */ if (encrypted) { decrypted = fil_space_decrypt( - iter.crypt_data, - dst, //dst - iter.page_size, - src, // src - &err); + iter.crypt_data, dst, + iter.page_size, src, &err); if (err != DB_SUCCESS) { return err; @@ -3619,10 +3508,20 @@ fil_iterate( fil_decompress_page(NULL, dst, ulong(size), NULL); updated = true; + } else if (buf_page_is_corrupted( + false, + encrypted && !frame_changed + ? dst : src, + callback.get_zip_size(), NULL)) { +page_corrupted: + ib_logf(IB_LOG_LEVEL_WARN, + "%s: Page %lu at offset " + UINT64PF " looks corrupted.", + callback.filename(), + ulong(offset / size), offset); + return DB_CORRUPTION; } - buf_block_set_file_page(block, space_id, page_no++); - if ((err = callback(page_off, block)) != DB_SUCCESS) { return err; } else if (!updated) { @@ -3714,9 +3613,6 @@ fil_iterate( updated = true; } - - page_off += iter.page_size; - block->frame += iter.page_size; } /* A page was updated in the set, write back to disk. */ @@ -3820,6 +3716,7 @@ fil_tablespace_iterate( memset(&block, 0, sizeof block); block.frame = page; + block.page.space = callback.get_space_id(); block.page.io_fix = BUF_IO_NONE; block.page.buf_fix_count = 1; block.page.state = BUF_BLOCK_FILE_PAGE; diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc index b8e8e076b68..24dc3a66f1f 100644 --- a/storage/xtradb/row/row0import.cc +++ b/storage/xtradb/row/row0import.cc @@ -421,6 +421,8 @@ public: return(m_page_size); } + const char* filename() const { return m_filepath; } + /** Called for every page in the tablespace. If the page was not updated then its state must be set to BUF_PAGE_NOT_USED. For @@ -437,6 +439,8 @@ public: @return the space id of the tablespace */ virtual ulint get_space_id() const UNIV_NOTHROW = 0; + bool is_interrupted() const { return trx_is_interrupted(m_trx); } + protected: /** Get the data page depending on the table type, compressed or not. @@ -451,18 +455,6 @@ protected: return(buf_block_get_frame(block)); } - /** Check for session interrupt. If required we could - even flush to disk here every N pages. - @retval DB_SUCCESS or error code */ - dberr_t periodic_check() UNIV_NOTHROW - { - if (trx_is_interrupted(m_trx)) { - return(DB_INTERRUPTED); - } - - return(DB_SUCCESS); - } - /** Get the physical offset of the extent descriptor within the page. @param page_no - page number of the extent descriptor @@ -732,11 +724,7 @@ FetchIndexRootPages::operator() ( os_offset_t offset, buf_block_t* block) UNIV_NOTHROW { - dberr_t err; - - if ((err = periodic_check()) != DB_SUCCESS) { - return(err); - } + if (is_interrupted()) return DB_INTERRUPTED; const page_t* page = get_frame(block); @@ -749,9 +737,9 @@ FetchIndexRootPages::operator() ( block->page.offset, (ulint) (offset / m_page_size)); - err = DB_CORRUPTION; + return DB_CORRUPTION; } else if (page_type == FIL_PAGE_TYPE_XDES) { - err = set_current_xdes(block->page.offset, page); + return set_current_xdes(block->page.offset, page); } else if (page_type == FIL_PAGE_INDEX && !is_free(block->page.offset) && is_root_page(page)) { @@ -776,7 +764,7 @@ FetchIndexRootPages::operator() ( } } - return(err); + return DB_SUCCESS; } /** @@ -900,14 +888,6 @@ public: os_offset_t offset, buf_block_t* block) UNIV_NOTHROW; private: - - /** Status returned by PageConverter::validate() */ - enum import_page_status_t { - IMPORT_PAGE_STATUS_OK, /*!< Page is OK */ - IMPORT_PAGE_STATUS_ALL_ZERO, /*!< Page is all zeros */ - IMPORT_PAGE_STATUS_CORRUPTED /*!< Page is corrupted */ - }; - /** Update the page, set the space id, max trx id and index id. @param block - block read from file @@ -917,17 +897,6 @@ private: buf_block_t* block, ulint& page_type) UNIV_NOTHROW; -#if defined UNIV_DEBUG - /** - @return true error condition is enabled. */ - bool trigger_corruption() UNIV_NOTHROW - { - return(false); - } - #else -#define trigger_corruption() (false) -#endif /* UNIV_DEBUG */ - /** Update the space, index id, trx id. @param block - block to convert @@ -940,15 +909,6 @@ private: @retval DB_SUCCESS or error code */ dberr_t update_records(buf_block_t* block) UNIV_NOTHROW; - /** - Validate the page, check for corruption. - @param offset - physical offset within file. - @param page - page read from file. - @return 0 on success, 1 if all zero, 2 if corrupted */ - import_page_status_t validate( - os_offset_t offset, - buf_block_t* page) UNIV_NOTHROW; - /** Validate the space flags and update tablespace header page. @param block - block read from file, not from the buffer pool. @@ -2075,80 +2035,15 @@ PageConverter::update_page( return(DB_CORRUPTION); } -/** -Validate the page -@param offset - physical offset within file. -@param page - page read from file. -@return status */ -PageConverter::import_page_status_t -PageConverter::validate( - os_offset_t offset, - buf_block_t* block) UNIV_NOTHROW -{ - buf_frame_t* page = get_frame(block); - - /* Check that the page number corresponds to the offset in - the file. Flag as corrupt if it doesn't. Disable the check - for LSN in buf_page_is_corrupted() */ - - if (buf_page_is_corrupted(false, page, get_zip_size(), NULL) - || (page_get_page_no(page) != offset / m_page_size - && page_get_page_no(page) != 0)) { - - return(IMPORT_PAGE_STATUS_CORRUPTED); - - } else if (offset > 0 && page_get_page_no(page) == 0) { - ulint checksum; - - checksum = mach_read_from_4(page + FIL_PAGE_SPACE_OR_CHKSUM); - if (checksum != 0) { - /* Checksum check passed in buf_page_is_corrupted(). */ - ib_logf(IB_LOG_LEVEL_WARN, - "%s: Page %lu checksum " ULINTPF - " should be zero.", - m_filepath, (ulong) (offset / m_page_size), - checksum); - } - - const byte* b = page + FIL_PAGE_OFFSET; - const byte* e = page + m_page_size - - FIL_PAGE_END_LSN_OLD_CHKSUM; - - /* If the page number is zero and offset > 0 then - the entire page MUST consist of zeroes. If not then - we flag it as corrupt. */ - - while (b != e) { - - if (*b++ && !trigger_corruption()) { - return(IMPORT_PAGE_STATUS_CORRUPTED); - } - } - - /* The page is all zero: do nothing. */ - return(IMPORT_PAGE_STATUS_ALL_ZERO); - } - - return(IMPORT_PAGE_STATUS_OK); -} - /** Called for every page in the tablespace. If the page was not updated then its state must be set to BUF_PAGE_NOT_USED. -@param offset - physical offset within the file @param block - block read from file, note it is not from the buffer pool @retval DB_SUCCESS or error code. */ dberr_t -PageConverter::operator() ( - os_offset_t offset, - buf_block_t* block) UNIV_NOTHROW +PageConverter::operator() (os_offset_t, buf_block_t* block) UNIV_NOTHROW { ulint page_type; - dberr_t err = DB_SUCCESS; - - if ((err = periodic_check()) != DB_SUCCESS) { - return(err); - } if (is_compressed_table()) { m_page_zip_ptr = &block->page.zip; @@ -2156,16 +2051,8 @@ PageConverter::operator() ( ut_ad(m_page_zip_ptr == 0); } - switch(validate(offset, block)) { - case IMPORT_PAGE_STATUS_OK: - - /* We have to decompress the compressed pages before - we can work on them */ - - if ((err = update_page(block, page_type)) != DB_SUCCESS) { - break; - } - + dberr_t err = update_page(block, page_type); + if (err == DB_SUCCESS) { /* Note: For compressed pages this function will write to the zip descriptor and for uncompressed pages it will write to page (ie. the block->frame). Therefore the caller should write @@ -2187,23 +2074,9 @@ PageConverter::operator() ( get_frame(block), get_zip_size(), m_current_lsn); } - - break; - - case IMPORT_PAGE_STATUS_ALL_ZERO: - /* The page is all zero: leave it as is. */ - break; - - case IMPORT_PAGE_STATUS_CORRUPTED: - - ib_logf(IB_LOG_LEVEL_WARN, - "%s: Page %lu at offset " UINT64PF " looks corrupted.", - m_filepath, (ulong) (offset / m_page_size), offset); - - err = DB_CORRUPTION; } - /* If we already had and old page with matching number + /* If we already had an old page with matching number in the buffer pool, evict it now, because we no longer evict the pages on DISCARD TABLESPACE. */ buf_page_get_gen(get_space_id(), get_zip_size(), block->page.offset, @@ -3519,8 +3392,6 @@ fil_iterate( AbstractCallback& callback) { os_offset_t offset; - ulint page_no = 0; - ulint space_id = callback.get_space_id(); ulint n_bytes = iter.n_io_buffers * iter.page_size; ut_ad(!srv_read_only_mode); @@ -3531,6 +3402,9 @@ fil_iterate( const bool row_compressed = callback.get_zip_size() > 0; for (offset = iter.start; offset < iter.end; offset += n_bytes) { + if (callback.is_interrupted()) { + return DB_INTERRUPTED; + } byte* io_buffer = iter.io_buffer; @@ -3572,30 +3446,45 @@ fil_iterate( os_offset_t page_off = offset; ulint n_pages_read = (ulint) n_bytes / iter.page_size; bool decrypted = false; + const ulint size = iter.page_size; + block->page.offset = page_off / size; - for (ulint i = 0; i < n_pages_read; ++i) { - ulint size = iter.page_size; + for (ulint i = 0; i < n_pages_read; + ++i, page_off += size, block->frame += size, + block->page.offset++) { dberr_t err = DB_SUCCESS; byte* src = readptr + (i * size); byte* dst = io_buffer + (i * size); bool frame_changed = false; - ulint page_type = mach_read_from_2(src+FIL_PAGE_TYPE); - const bool page_compressed - = page_type == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED + = page_type + == FIL_PAGE_PAGE_COMPRESSED_ENCRYPTED || page_type == FIL_PAGE_PAGE_COMPRESSED; + const ulint page_no = page_get_page_no(src); + if (!page_no && page_off) { + const ulint* b = reinterpret_cast + (src); + const ulint* const e = b + size / sizeof *b; + do { + if (*b++) { + goto page_corrupted; + } + } while (b != e); + + /* Proceed to the next page, + because this one is all zero. */ + continue; + } + + if (page_no != page_off / size) { + goto page_corrupted; + } - /* If tablespace is encrypted, we need to decrypt - the page. Note that tablespaces are not in - fil_system during import. */ if (encrypted) { decrypted = fil_space_decrypt( - iter.crypt_data, - dst, //dst - iter.page_size, - src, // src - &err); + iter.crypt_data, dst, + iter.page_size, src, &err); if (err != DB_SUCCESS) { return err; @@ -3619,10 +3508,20 @@ fil_iterate( fil_decompress_page(NULL, dst, ulong(size), NULL); updated = true; + } else if (buf_page_is_corrupted( + false, + encrypted && !frame_changed + ? dst : src, + callback.get_zip_size(), NULL)) { +page_corrupted: + ib_logf(IB_LOG_LEVEL_WARN, + "%s: Page %lu at offset " + UINT64PF " looks corrupted.", + callback.filename(), + ulong(offset / size), offset); + return DB_CORRUPTION; } - buf_block_set_file_page(block, space_id, page_no++); - if ((err = callback(page_off, block)) != DB_SUCCESS) { return err; } else if (!updated) { @@ -3714,9 +3613,6 @@ fil_iterate( updated = true; } - - page_off += iter.page_size; - block->frame += iter.page_size; } /* A page was updated in the set, write back to disk. */ @@ -3820,6 +3716,7 @@ fil_tablespace_iterate( memset(&block, 0, sizeof block); block.frame = page; + block.page.space = callback.get_space_id(); block.page.io_fix = BUF_IO_NONE; block.page.buf_fix_count = 1; block.page.state = BUF_BLOCK_FILE_PAGE; From a80af35a85d329f3ac1456b5931ffc541c8e54c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 20 Mar 2018 13:03:01 +0200 Subject: [PATCH 091/139] MDEV-12396 IMPORT cleanup: ROW_FORMAT=COMPRESSED Initialize block.page.zip only once. PageConverter::update(): Initialize m_page_zip_ptr as late as possible. (We should really remove it at some point.) PageConverter::operator(): Refer to block->page.zip instead of m_page_zip_ptr. AbstractCallback::get_frame(): Define static. Refer to block->page.zip.data directly. fil_iterate(): Refer to block->page.zip.data directly. fil_tablespace_iterate(): Initialize block.page.zip.data as soon as possible. --- storage/innobase/row/row0import.cc | 120 ++++++++++++++--------------- storage/xtradb/row/row0import.cc | 120 ++++++++++++++--------------- 2 files changed, 116 insertions(+), 124 deletions(-) diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 24dc3a66f1f..5977bd1a77b 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -441,20 +441,17 @@ public: bool is_interrupted() const { return trx_is_interrupted(m_trx); } -protected: /** Get the data page depending on the table type, compressed or not. @param block - block read from disk @retval the buffer frame */ - buf_frame_t* get_frame(buf_block_t* block) const UNIV_NOTHROW + static byte* get_frame(const buf_block_t* block) { - if (is_compressed_table()) { - return(block->page.zip.data); - } - - return(buf_block_get_frame(block)); + return block->page.zip.data + ? block->page.zip.data : block->frame; } +protected: /** Get the physical offset of the extent descriptor within the page. @param page_no - page number of the extent descriptor @@ -1981,6 +1978,14 @@ PageConverter::update_page( { dberr_t err = DB_SUCCESS; + ut_ad(!block->page.zip.data == !is_compressed_table()); + + if (block->page.zip.data) { + m_page_zip_ptr = &block->page.zip; + } else { + ut_ad(!m_page_zip_ptr); + } + switch (page_type = fil_page_get_type(get_frame(block))) { case FIL_PAGE_TYPE_FSP_HDR: /* Work directly on the uncompressed page headers. */ @@ -2043,46 +2048,38 @@ updated then its state must be set to BUF_PAGE_NOT_USED. dberr_t PageConverter::operator() (os_offset_t, buf_block_t* block) UNIV_NOTHROW { - ulint page_type; - - if (is_compressed_table()) { - m_page_zip_ptr = &block->page.zip; - } else { - ut_ad(m_page_zip_ptr == 0); - } - - dberr_t err = update_page(block, page_type); - if (err == DB_SUCCESS) { - /* Note: For compressed pages this function will write to the - zip descriptor and for uncompressed pages it will write to - page (ie. the block->frame). Therefore the caller should write - out the descriptor contents and not block->frame for compressed - pages. */ - - if (!is_compressed_table() || page_type == FIL_PAGE_INDEX) { - - buf_flush_init_for_writing( - !is_compressed_table() - ? block->frame : block->page.zip.data, - !is_compressed_table() ? 0 : m_page_zip_ptr, - m_current_lsn); - } else { - /* Calculate and update the checksum of non-btree - pages for compressed tables explicitly here. */ - - buf_flush_update_zip_checksum( - get_frame(block), get_zip_size(), - m_current_lsn); - } - } - /* If we already had an old page with matching number in the buffer pool, evict it now, because we no longer evict the pages on DISCARD TABLESPACE. */ buf_page_get_gen(get_space_id(), get_zip_size(), block->page.offset, RW_NO_LATCH, NULL, BUF_EVICT_IF_IN_POOL, __FILE__, __LINE__, NULL); - return(err); + + ulint page_type; + + dberr_t err = update_page(block, page_type); + if (err != DB_SUCCESS) return err; + + /* Note: For compressed pages this function will write to the + zip descriptor and for uncompressed pages it will write to + page (ie. the block->frame). Therefore the caller should write + out the descriptor contents and not block->frame for compressed + pages. */ + + if (!is_compressed_table() || page_type == FIL_PAGE_INDEX) { + buf_flush_init_for_writing( + get_frame(block), + block->page.zip.data ? &block->page.zip : NULL, + m_current_lsn); + } else { + /* Calculate and update the checksum of non-btree + pages for compressed tables explicitly here. */ + buf_flush_update_zip_checksum( + get_frame(block), get_zip_size(), + m_current_lsn); + } + + return DB_SUCCESS; } /*****************************************************************//** @@ -3396,10 +3393,9 @@ fil_iterate( ut_ad(!srv_read_only_mode); - /* TODO: For compressed tables we do a lot of useless + /* TODO: For ROW_FORMAT=COMPRESSED tables we do a lot of useless copying for non-index pages. Unfortunately, it is required by buf_zip_decompress() */ - const bool row_compressed = callback.get_zip_size() > 0; for (offset = iter.start; offset < iter.end; offset += n_bytes) { if (callback.is_interrupted()) { @@ -3407,18 +3403,12 @@ fil_iterate( } byte* io_buffer = iter.io_buffer; - block->frame = io_buffer; - if (row_compressed) { - page_zip_des_init(&block->page.zip); - page_zip_set_size(&block->page.zip, iter.page_size); - block->page.zip.data = block->frame + UNIV_PAGE_SIZE; - ut_d(block->page.zip.m_external = true); - ut_ad(iter.page_size == callback.get_zip_size()); - + if (block->page.zip.data) { /* Zip IO is done in the compressed page buffer. */ io_buffer = block->page.zip.data; + ut_ad(PAGE_ZIP_MATCH(block->frame, &block->page.zip)); } /* We have to read the exact number of bytes. Otherwise the @@ -3493,7 +3483,8 @@ fil_iterate( if (decrypted) { updated = true; } else { - if (!page_compressed && !row_compressed) { + if (!page_compressed + && !block->page.zip.data) { block->frame = src; frame_changed = true; } else { @@ -3559,8 +3550,7 @@ page_corrupted: if (encrypted) { memcpy(writeptr + (i * size), - row_compressed ? block->page.zip.data : - block->frame, size); + callback.get_frame(block), size); } if (frame_changed) { @@ -3728,6 +3718,13 @@ fil_tablespace_iterate( err = DB_IO_ERROR; } else if ((err = callback.init(file_size, &block)) == DB_SUCCESS) { + if (const ulint zip_size = callback.get_zip_size()) { + page_zip_set_size(&block.page.zip, zip_size); + /* ROW_FORMAT=COMPRESSED is not optimised for block IO + for now. We do the IMPORT page by page. */ + n_io_buffers = 1; + } + fil_iterator_t iter; iter.file = file; @@ -3748,14 +3745,6 @@ fil_tablespace_iterate( iter.crypt_data = fil_space_read_crypt_data( 0, page, crypt_data_offset); - /* Compressed pages can't be optimised for block IO for now. - We do the IMPORT page by page. */ - - if (callback.get_zip_size() > 0) { - iter.n_io_buffers = 1; - ut_a(iter.page_size == callback.get_zip_size()); - } - /** If tablespace is encrypted, it needs extra buffers */ if (iter.crypt_data != NULL) { /* decrease io buffers so that memory @@ -3780,6 +3769,13 @@ fil_tablespace_iterate( ut_align(crypt_io_buffer, UNIV_PAGE_SIZE)); } + if (block.page.zip.ssize) { + ut_ad(iter.n_io_buffers == 1); + block.frame = iter.io_buffer; + block.page.zip.data = block.frame + UNIV_PAGE_SIZE; + ut_d(block.page.zip.m_external = true); + } + err = fil_iterate(iter, &block, callback); mem_free(io_buffer); diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc index 24dc3a66f1f..5977bd1a77b 100644 --- a/storage/xtradb/row/row0import.cc +++ b/storage/xtradb/row/row0import.cc @@ -441,20 +441,17 @@ public: bool is_interrupted() const { return trx_is_interrupted(m_trx); } -protected: /** Get the data page depending on the table type, compressed or not. @param block - block read from disk @retval the buffer frame */ - buf_frame_t* get_frame(buf_block_t* block) const UNIV_NOTHROW + static byte* get_frame(const buf_block_t* block) { - if (is_compressed_table()) { - return(block->page.zip.data); - } - - return(buf_block_get_frame(block)); + return block->page.zip.data + ? block->page.zip.data : block->frame; } +protected: /** Get the physical offset of the extent descriptor within the page. @param page_no - page number of the extent descriptor @@ -1981,6 +1978,14 @@ PageConverter::update_page( { dberr_t err = DB_SUCCESS; + ut_ad(!block->page.zip.data == !is_compressed_table()); + + if (block->page.zip.data) { + m_page_zip_ptr = &block->page.zip; + } else { + ut_ad(!m_page_zip_ptr); + } + switch (page_type = fil_page_get_type(get_frame(block))) { case FIL_PAGE_TYPE_FSP_HDR: /* Work directly on the uncompressed page headers. */ @@ -2043,46 +2048,38 @@ updated then its state must be set to BUF_PAGE_NOT_USED. dberr_t PageConverter::operator() (os_offset_t, buf_block_t* block) UNIV_NOTHROW { - ulint page_type; - - if (is_compressed_table()) { - m_page_zip_ptr = &block->page.zip; - } else { - ut_ad(m_page_zip_ptr == 0); - } - - dberr_t err = update_page(block, page_type); - if (err == DB_SUCCESS) { - /* Note: For compressed pages this function will write to the - zip descriptor and for uncompressed pages it will write to - page (ie. the block->frame). Therefore the caller should write - out the descriptor contents and not block->frame for compressed - pages. */ - - if (!is_compressed_table() || page_type == FIL_PAGE_INDEX) { - - buf_flush_init_for_writing( - !is_compressed_table() - ? block->frame : block->page.zip.data, - !is_compressed_table() ? 0 : m_page_zip_ptr, - m_current_lsn); - } else { - /* Calculate and update the checksum of non-btree - pages for compressed tables explicitly here. */ - - buf_flush_update_zip_checksum( - get_frame(block), get_zip_size(), - m_current_lsn); - } - } - /* If we already had an old page with matching number in the buffer pool, evict it now, because we no longer evict the pages on DISCARD TABLESPACE. */ buf_page_get_gen(get_space_id(), get_zip_size(), block->page.offset, RW_NO_LATCH, NULL, BUF_EVICT_IF_IN_POOL, __FILE__, __LINE__, NULL); - return(err); + + ulint page_type; + + dberr_t err = update_page(block, page_type); + if (err != DB_SUCCESS) return err; + + /* Note: For compressed pages this function will write to the + zip descriptor and for uncompressed pages it will write to + page (ie. the block->frame). Therefore the caller should write + out the descriptor contents and not block->frame for compressed + pages. */ + + if (!is_compressed_table() || page_type == FIL_PAGE_INDEX) { + buf_flush_init_for_writing( + get_frame(block), + block->page.zip.data ? &block->page.zip : NULL, + m_current_lsn); + } else { + /* Calculate and update the checksum of non-btree + pages for compressed tables explicitly here. */ + buf_flush_update_zip_checksum( + get_frame(block), get_zip_size(), + m_current_lsn); + } + + return DB_SUCCESS; } /*****************************************************************//** @@ -3396,10 +3393,9 @@ fil_iterate( ut_ad(!srv_read_only_mode); - /* TODO: For compressed tables we do a lot of useless + /* TODO: For ROW_FORMAT=COMPRESSED tables we do a lot of useless copying for non-index pages. Unfortunately, it is required by buf_zip_decompress() */ - const bool row_compressed = callback.get_zip_size() > 0; for (offset = iter.start; offset < iter.end; offset += n_bytes) { if (callback.is_interrupted()) { @@ -3407,18 +3403,12 @@ fil_iterate( } byte* io_buffer = iter.io_buffer; - block->frame = io_buffer; - if (row_compressed) { - page_zip_des_init(&block->page.zip); - page_zip_set_size(&block->page.zip, iter.page_size); - block->page.zip.data = block->frame + UNIV_PAGE_SIZE; - ut_d(block->page.zip.m_external = true); - ut_ad(iter.page_size == callback.get_zip_size()); - + if (block->page.zip.data) { /* Zip IO is done in the compressed page buffer. */ io_buffer = block->page.zip.data; + ut_ad(PAGE_ZIP_MATCH(block->frame, &block->page.zip)); } /* We have to read the exact number of bytes. Otherwise the @@ -3493,7 +3483,8 @@ fil_iterate( if (decrypted) { updated = true; } else { - if (!page_compressed && !row_compressed) { + if (!page_compressed + && !block->page.zip.data) { block->frame = src; frame_changed = true; } else { @@ -3559,8 +3550,7 @@ page_corrupted: if (encrypted) { memcpy(writeptr + (i * size), - row_compressed ? block->page.zip.data : - block->frame, size); + callback.get_frame(block), size); } if (frame_changed) { @@ -3728,6 +3718,13 @@ fil_tablespace_iterate( err = DB_IO_ERROR; } else if ((err = callback.init(file_size, &block)) == DB_SUCCESS) { + if (const ulint zip_size = callback.get_zip_size()) { + page_zip_set_size(&block.page.zip, zip_size); + /* ROW_FORMAT=COMPRESSED is not optimised for block IO + for now. We do the IMPORT page by page. */ + n_io_buffers = 1; + } + fil_iterator_t iter; iter.file = file; @@ -3748,14 +3745,6 @@ fil_tablespace_iterate( iter.crypt_data = fil_space_read_crypt_data( 0, page, crypt_data_offset); - /* Compressed pages can't be optimised for block IO for now. - We do the IMPORT page by page. */ - - if (callback.get_zip_size() > 0) { - iter.n_io_buffers = 1; - ut_a(iter.page_size == callback.get_zip_size()); - } - /** If tablespace is encrypted, it needs extra buffers */ if (iter.crypt_data != NULL) { /* decrease io buffers so that memory @@ -3780,6 +3769,13 @@ fil_tablespace_iterate( ut_align(crypt_io_buffer, UNIV_PAGE_SIZE)); } + if (block.page.zip.ssize) { + ut_ad(iter.n_io_buffers == 1); + block.frame = iter.io_buffer; + block.page.zip.data = block.frame + UNIV_PAGE_SIZE; + ut_d(block.page.zip.m_external = true); + } + err = fil_iterate(iter, &block, callback); mem_free(io_buffer); From e0a0fe7d8124b9f395a6c97f538693e729a0b043 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 19 Mar 2018 18:12:37 +0200 Subject: [PATCH 092/139] MDEV-12396 IMPORT TABLESPACE: Do not retry partial reads fil_iterate(), fil_tablespace_iterate(): Replace os_file_read() with os_file_read_no_error_handling(). os_file_read_func(), os_file_read_no_error_handling_func(): Do not retry partial reads. There used to be an infinite amount of retries. Because InnoDB extends both data and log files upfront, partial reads should be impossible during normal operation. --- .../r/default_row_format_compatibility.result | 3 ++ .../t/default_row_format_compatibility.test | 7 +++ storage/innobase/os/os0file.cc | 22 +++++++-- storage/innobase/row/row0import.cc | 9 ++-- storage/xtradb/os/os0file.cc | 46 ++++++++++++------- storage/xtradb/row/row0import.cc | 9 ++-- 6 files changed, 66 insertions(+), 30 deletions(-) diff --git a/mysql-test/suite/innodb/r/default_row_format_compatibility.result b/mysql-test/suite/innodb/r/default_row_format_compatibility.result index 741241ddba0..6ba83f04136 100644 --- a/mysql-test/suite/innodb/r/default_row_format_compatibility.result +++ b/mysql-test/suite/innodb/r/default_row_format_compatibility.result @@ -39,6 +39,9 @@ SHOW TABLE STATUS LIKE 'tab'; Name Engine Version Row_format Rows Avg_row_length Data_length Max_data_length Index_length Data_free Auto_increment Create_time Update_time Check_time Collation Checksum Create_options Comment tab InnoDB # Compact # # # # # # NULL # NULL NULL latin1_swedish_ci NULL ALTER TABLE tab DISCARD TABLESPACE; +call mtr.add_suppression("InnoDB: Tried to read .* bytes at offset 0"); +ALTER TABLE tab IMPORT TABLESPACE; +ERROR HY000: Internal error: Cannot reset LSNs in table '"test"."tab"' : I/O error ALTER TABLE tab IMPORT TABLESPACE; SELECT * FROM tab; a diff --git a/mysql-test/suite/innodb/t/default_row_format_compatibility.test b/mysql-test/suite/innodb/t/default_row_format_compatibility.test index 17ab21ca06b..0f433b1fcfe 100644 --- a/mysql-test/suite/innodb/t/default_row_format_compatibility.test +++ b/mysql-test/suite/innodb/t/default_row_format_compatibility.test @@ -81,7 +81,14 @@ SHOW TABLE STATUS LIKE 'tab'; ALTER TABLE tab DISCARD TABLESPACE; # Move the *ibd,*.cfg file into orginal location +--copy_file $MYSQLD_DATADIR/tab.cfg $MYSQLD_DATADIR/test/tab.ibd --move_file $MYSQLD_DATADIR/tab.cfg $MYSQLD_DATADIR/test/tab.cfg + +call mtr.add_suppression("InnoDB: Tried to read .* bytes at offset 0"); + +--error ER_INTERNAL_ERROR +ALTER TABLE tab IMPORT TABLESPACE; +--remove_file $MYSQLD_DATADIR/test/tab.ibd --move_file $MYSQLD_DATADIR/tab.ibd $MYSQLD_DATADIR/test/tab.ibd # Check import is successful (because same row_format) diff --git a/storage/innobase/os/os0file.cc b/storage/innobase/os/os0file.cc index fb6ef815476..81614549702 100644 --- a/storage/innobase/os/os0file.cc +++ b/storage/innobase/os/os0file.cc @@ -2,7 +2,7 @@ Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. -Copyright (c) 2013, 2017, MariaDB Corporation. +Copyright (c) 2013, 2018, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Percona Inc.. Those modifications are @@ -2842,8 +2842,15 @@ try_again: MONITOR_ATOMIC_DEC_LOW(MONITOR_OS_PENDING_READS, monitor); - if (ret && len == n) { + if (!ret) { + } else if (len == n) { return(TRUE); + } else { + ib_logf(IB_LOG_LEVEL_ERROR, + "Tried to read " ULINTPF " bytes at offset " + UINT64PF ". Was only able to read %lu.", + n, offset, ret); + return FALSE; } #else /* __WIN__ */ ibool retry; @@ -2866,6 +2873,7 @@ try_again: "Tried to read " ULINTPF " bytes at offset " UINT64PF ". Was only able to read %ld.", n, offset, (lint) ret); + return FALSE; } #endif /* __WIN__ */ #ifdef __WIN__ @@ -2964,8 +2972,15 @@ try_again: MONITOR_ATOMIC_DEC_LOW(MONITOR_OS_PENDING_READS, monitor); - if (ret && len == n) { + if (!ret) { + } else if (len == n) { return(TRUE); + } else { + ib_logf(IB_LOG_LEVEL_ERROR, + "Tried to read " ULINTPF " bytes at offset " + UINT64PF ". Was only able to read %lu.", + n, offset, len); + return FALSE; } #else /* __WIN__ */ ibool retry; @@ -2988,6 +3003,7 @@ try_again: "Tried to read " ULINTPF " bytes at offset " UINT64PF ". Was only able to read %ld.", n, offset, (lint) ret); + return FALSE; } #endif /* __WIN__ */ #ifdef __WIN__ diff --git a/storage/innobase/row/row0import.cc b/storage/innobase/row/row0import.cc index 5977bd1a77b..c00eb57f91d 100644 --- a/storage/innobase/row/row0import.cc +++ b/storage/innobase/row/row0import.cc @@ -44,9 +44,7 @@ Created 2012-02-08 by Sunny Bains. #include -/** The size of the buffer to use for IO. Note: os_file_read() doesn't expect -reads to fail. If you set the buffer size to be greater than a multiple of the -file size then it will assert. TODO: Fix this limitation of the IO functions. +/** The size of the buffer to use for IO. @param n - page size of the tablespace. @retval number of pages */ #define IO_BUFFER_SIZE(n) ((1024 * 1024) / n) @@ -3427,7 +3425,8 @@ fil_iterate( ? iter.crypt_io_buffer : io_buffer; byte* const writeptr = readptr; - if (!os_file_read(iter.file, readptr, offset, n_bytes)) { + if (!os_file_read_no_error_handling(iter.file, readptr, + offset, n_bytes)) { ib_logf(IB_LOG_LEVEL_ERROR, "os_file_read() failed"); return DB_IO_ERROR; } @@ -3713,7 +3712,7 @@ fil_tablespace_iterate( /* Read the first page and determine the page and zip size. */ - if (!os_file_read(file, page, 0, UNIV_PAGE_SIZE)) { + if (!os_file_read_no_error_handling(file, page, 0, UNIV_PAGE_SIZE)) { err = DB_IO_ERROR; diff --git a/storage/xtradb/os/os0file.cc b/storage/xtradb/os/os0file.cc index 634ebb2af49..6a63f31b37a 100644 --- a/storage/xtradb/os/os0file.cc +++ b/storage/xtradb/os/os0file.cc @@ -2,7 +2,7 @@ Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2009, Percona Inc. -Copyright (c) 2013, 2017, MariaDB Corporation. +Copyright (c) 2013, 2018, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Percona Inc.. Those modifications are @@ -3169,15 +3169,21 @@ try_again: overlapped.hEvent = win_get_syncio_event(); ret = ReadFile(file, buf, n, NULL, &overlapped); if (ret) { - ret = GetOverlappedResult(file, &overlapped, (DWORD *)&len, FALSE); - } - else if(GetLastError() == ERROR_IO_PENDING) { - ret = GetOverlappedResult(file, &overlapped, (DWORD *)&len, TRUE); + ret = GetOverlappedResult(file, &overlapped, &len, FALSE); + } else if (GetLastError() == ERROR_IO_PENDING) { + ret = GetOverlappedResult(file, &overlapped, &len, TRUE); } MONITOR_ATOMIC_DEC_LOW(MONITOR_OS_PENDING_READS, monitor); - if (ret && len == n) { + if (!ret) { + } else if (len == n) { return(TRUE); + } else { + ib_logf(IB_LOG_LEVEL_ERROR, + "Tried to read " ULINTPF " bytes at offset " + UINT64PF ". Was only able to read %lu.", + n, offset, ret); + return FALSE; } #else /* __WIN__ */ ibool retry; @@ -3204,6 +3210,7 @@ try_again: "Tried to read " ULINTPF " bytes at offset " UINT64PF ". Was only able to read %ld.", n, offset, (lint) ret); + return FALSE; } #endif /* __WIN__ */ retry = os_file_handle_error(NULL, "read", __FILE__, __LINE__); @@ -3272,15 +3279,21 @@ try_again: overlapped.hEvent = win_get_syncio_event(); ret = ReadFile(file, buf, n, NULL, &overlapped); if (ret) { - ret = GetOverlappedResult(file, &overlapped, (DWORD *)&len, FALSE); - } - else if(GetLastError() == ERROR_IO_PENDING) { - ret = GetOverlappedResult(file, &overlapped, (DWORD *)&len, TRUE); + ret = GetOverlappedResult(file, &overlapped, &len, FALSE); + } else if (GetLastError() == ERROR_IO_PENDING) { + ret = GetOverlappedResult(file, &overlapped, &len, TRUE); } MONITOR_ATOMIC_DEC_LOW(MONITOR_OS_PENDING_READS, monitor); - if (ret && len == n) { + if (!ret) { + } else if (len == n) { return(TRUE); + } else { + ib_logf(IB_LOG_LEVEL_ERROR, + "Tried to read " ULINTPF " bytes at offset " + UINT64PF ". Was only able to read %lu.", + n, offset, len); + return FALSE; } #else /* __WIN__ */ ibool retry; @@ -3303,6 +3316,7 @@ try_again: "Tried to read " ULINTPF " bytes at offset " UINT64PF ". Was only able to read %ld.", n, offset, (lint) ret); + return FALSE; } #endif /* __WIN__ */ retry = os_file_handle_error_no_exit(NULL, "read", FALSE, __FILE__, __LINE__); @@ -3383,10 +3397,9 @@ retry: overlapped.hEvent = win_get_syncio_event(); ret = WriteFile(file, buf, n, NULL, &overlapped); if (ret) { - ret = GetOverlappedResult(file, &overlapped, (DWORD *)&len, FALSE); - } - else if ( GetLastError() == ERROR_IO_PENDING) { - ret = GetOverlappedResult(file, &overlapped, (DWORD *)&len, TRUE); + ret = GetOverlappedResult(file, &overlapped, &len, FALSE); + } else if (GetLastError() == ERROR_IO_PENDING) { + ret = GetOverlappedResult(file, &overlapped, &len, TRUE); } MONITOR_ATOMIC_DEC_LOW(MONITOR_OS_PENDING_WRITES, monitor); @@ -6588,8 +6601,7 @@ os_file_trim( DWORD tmp; if (ret) { ret = GetOverlappedResult(slot->file, &overlapped, &tmp, FALSE); - } - else if (GetLastError() == ERROR_IO_PENDING) { + } else if (GetLastError() == ERROR_IO_PENDING) { ret = GetOverlappedResult(slot->file, &overlapped, &tmp, TRUE); } if (!ret) { diff --git a/storage/xtradb/row/row0import.cc b/storage/xtradb/row/row0import.cc index 5977bd1a77b..c00eb57f91d 100644 --- a/storage/xtradb/row/row0import.cc +++ b/storage/xtradb/row/row0import.cc @@ -44,9 +44,7 @@ Created 2012-02-08 by Sunny Bains. #include -/** The size of the buffer to use for IO. Note: os_file_read() doesn't expect -reads to fail. If you set the buffer size to be greater than a multiple of the -file size then it will assert. TODO: Fix this limitation of the IO functions. +/** The size of the buffer to use for IO. @param n - page size of the tablespace. @retval number of pages */ #define IO_BUFFER_SIZE(n) ((1024 * 1024) / n) @@ -3427,7 +3425,8 @@ fil_iterate( ? iter.crypt_io_buffer : io_buffer; byte* const writeptr = readptr; - if (!os_file_read(iter.file, readptr, offset, n_bytes)) { + if (!os_file_read_no_error_handling(iter.file, readptr, + offset, n_bytes)) { ib_logf(IB_LOG_LEVEL_ERROR, "os_file_read() failed"); return DB_IO_ERROR; } @@ -3713,7 +3712,7 @@ fil_tablespace_iterate( /* Read the first page and determine the page and zip size. */ - if (!os_file_read(file, page, 0, UNIV_PAGE_SIZE)) { + if (!os_file_read_no_error_handling(file, page, 0, UNIV_PAGE_SIZE)) { err = DB_IO_ERROR; From 15051ab14a7fa2d392e8d8068801010d10e36823 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 21 Mar 2018 08:13:43 +0200 Subject: [PATCH 093/139] Disable a failing test --- mysql-test/suite/galera/disabled.def | 1 + 1 file changed, 1 insertion(+) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index da268abe0d2..1551fae072b 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -49,3 +49,4 @@ galera_suspend_slave : MDEV-13549 Galera test failures 10.1 galera_gtid : MDEV-13549 Galera test failures 10.1 galera_gtid_slave : MDEV-13549 Galera test failures 10.1 galera_unicode_identifiers : MDEV-13549 Galera test failures 10.1 +galera_var_reject_queries : MW-245 FIXME: fails on buildbot From 2dd4e50d5f74451a5f6bf56d1a36bafffcca878c Mon Sep 17 00:00:00 2001 From: Varun Gupta Date: Wed, 21 Mar 2018 01:34:45 +0530 Subject: [PATCH 094/139] MDEV-15555: select from DUAL where false yielding wrong result when in a IN For the query having an IN subquery with no tables, we were converting the subquery with an expression between the left part and the select list of the subquery . This can give incorrect results when we have a condition in the subquery with a dual table (as this is treated as a no table). The fix is that we don't do this conversion when we have conds in the subquery with a dual table. --- mysql-test/r/subselect4.result | 11 +++++++++++ mysql-test/t/subselect4.test | 8 ++++++++ sql/item_subselect.cc | 2 +- 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/subselect4.result b/mysql-test/r/subselect4.result index d3c63ff9a2f..c20c048b919 100644 --- a/mysql-test/r/subselect4.result +++ b/mysql-test/r/subselect4.result @@ -2498,5 +2498,16 @@ FROM t2 WHERE b <= 'quux' GROUP BY field; field COUNT(DISTINCT c) 0 1 drop table t1,t2; +# +# MDEV-15555: select from DUAL where false yielding wrong result when in a IN +# +explain +SELECT 2 IN (SELECT 2 from DUAL WHERE 1 != 1); +id select_type table type possible_keys key key_len ref rows Extra +1 PRIMARY NULL NULL NULL NULL NULL NULL NULL No tables used +2 SUBQUERY NULL NULL NULL NULL NULL NULL NULL Impossible WHERE +SELECT 2 IN (SELECT 2 from DUAL WHERE 1 != 1); +2 IN (SELECT 2 from DUAL WHERE 1 != 1) +0 SET optimizer_switch= @@global.optimizer_switch; set @@tmp_table_size= @@global.tmp_table_size; diff --git a/mysql-test/t/subselect4.test b/mysql-test/t/subselect4.test index f051c8eaaf2..673dc9be0b4 100644 --- a/mysql-test/t/subselect4.test +++ b/mysql-test/t/subselect4.test @@ -2035,5 +2035,13 @@ SELECT ( SELECT COUNT(*) FROM t1 WHERE a = c ) AS field, COUNT(DISTINCT c) FROM t2 WHERE b <= 'quux' GROUP BY field; drop table t1,t2; +--echo # +--echo # MDEV-15555: select from DUAL where false yielding wrong result when in a IN +--echo # + +explain +SELECT 2 IN (SELECT 2 from DUAL WHERE 1 != 1); +SELECT 2 IN (SELECT 2 from DUAL WHERE 1 != 1); + SET optimizer_switch= @@global.optimizer_switch; set @@tmp_table_size= @@global.tmp_table_size; diff --git a/sql/item_subselect.cc b/sql/item_subselect.cc index 46d41fd61e3..57dcbd4f540 100644 --- a/sql/item_subselect.cc +++ b/sql/item_subselect.cc @@ -1746,7 +1746,7 @@ Item_in_subselect::single_value_transformer(JOIN *join) Item* join_having= join->having ? join->having : join->tmp_having; if (!(join_having || select_lex->with_sum_func || select_lex->group_list.elements) && - select_lex->table_list.elements == 0 && + select_lex->table_list.elements == 0 && !join->conds && !select_lex->master_unit()->is_union()) { Item *where_item= (Item*) select_lex->item_list.head(); From 96520384536fdf62aa0b0f444dd1be2d36c3a39b Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Wed, 21 Mar 2018 12:33:38 +0400 Subject: [PATCH 095/139] MDEV-14533 Provide information_schema tables using which hardware information can be obtained. DISKS plugin implementation added to the tree. --- mysql-test/suite/plugins/r/disks.result | 14 ++ mysql-test/suite/plugins/t/disks.test | 11 ++ .../information_schema_disks/CMakeLists.txt | 5 + plugin/information_schema_disks/README.txt | 101 ++++++++++++ .../information_schema_disks.cc | 154 ++++++++++++++++++ 5 files changed, 285 insertions(+) create mode 100644 mysql-test/suite/plugins/r/disks.result create mode 100644 mysql-test/suite/plugins/t/disks.test create mode 100644 plugin/information_schema_disks/CMakeLists.txt create mode 100644 plugin/information_schema_disks/README.txt create mode 100644 plugin/information_schema_disks/information_schema_disks.cc diff --git a/mysql-test/suite/plugins/r/disks.result b/mysql-test/suite/plugins/r/disks.result new file mode 100644 index 00000000000..53e73ec6f66 --- /dev/null +++ b/mysql-test/suite/plugins/r/disks.result @@ -0,0 +1,14 @@ +install plugin DISKS soname 'disks'; +show create table information_schema.disks; +Table Create Table +DISKS CREATE TEMPORARY TABLE `DISKS` ( + `Disk` varchar(4096) NOT NULL DEFAULT '', + `Path` varchar(4096) NOT NULL DEFAULT '', + `Total` int(32) NOT NULL DEFAULT '0', + `Used` int(32) NOT NULL DEFAULT '0', + `Available` int(32) NOT NULL DEFAULT '0' +) ENGINE=MEMORY DEFAULT CHARSET=utf8 +select sum(Total) > sum(Available), sum(Total)>sum(Used) from information_schema.disks; +sum(Total) > sum(Available) sum(Total)>sum(Used) +1 1 +uninstall plugin DISKS; diff --git a/mysql-test/suite/plugins/t/disks.test b/mysql-test/suite/plugins/t/disks.test new file mode 100644 index 00000000000..a2371b97584 --- /dev/null +++ b/mysql-test/suite/plugins/t/disks.test @@ -0,0 +1,11 @@ +--source include/not_windows.inc + +if (!$DISKS_SO) { + skip No DISKS plugin; +} + +install plugin DISKS soname 'disks'; +show create table information_schema.disks; +select sum(Total) > sum(Available), sum(Total)>sum(Used) from information_schema.disks; + +uninstall plugin DISKS; diff --git a/plugin/information_schema_disks/CMakeLists.txt b/plugin/information_schema_disks/CMakeLists.txt new file mode 100644 index 00000000000..a0ed929c62c --- /dev/null +++ b/plugin/information_schema_disks/CMakeLists.txt @@ -0,0 +1,5 @@ +IF(NOT WIN32) + INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/sql) + MYSQL_ADD_PLUGIN(DISKS information_schema_disks.cc MODULE_ONLY RECOMPILE_FOR_EMBEDDED) +ENDIF() + diff --git a/plugin/information_schema_disks/README.txt b/plugin/information_schema_disks/README.txt new file mode 100644 index 00000000000..1c9b8fb6283 --- /dev/null +++ b/plugin/information_schema_disks/README.txt @@ -0,0 +1,101 @@ +Information Schema Disks +------------------------ +This is a proof-of-concept information schema plugin that allows the +disk space situation to be monitored. When installed, it can be used +as follows: + + > select * from information_schema.disks; + +-----------+-----------------------+-----------+----------+-----------+ + | Disk | Path | Total | Used | Available | + +-----------+-----------------------+-----------+----------+-----------+ + | /dev/sda3 | / | 47929956 | 30666304 | 14805864 | + | /dev/sda1 | /boot/efi | 191551 | 3461 | 188090 | + | /dev/sda4 | /home | 174679768 | 80335392 | 85448120 | + | /dev/sdb1 | /mnt/hdd | 961301832 | 83764 | 912363644 | + | /dev/sdb1 | /home/wikman/Music | 961301832 | 83764 | 912363644 | + | /dev/sdb1 | /home/wikman/Videos | 961301832 | 83764 | 912363644 | + | /dev/sdb1 | /home/wikman/hdd | 961301832 | 83764 | 912363644 | + | /dev/sdb1 | /home/wikman/Pictures | 961301832 | 83764 | 912363644 | + | /dev/sda3 | /var/lib/docker/aufs | 47929956 | 30666304 | 14805864 | + +-----------+-----------------------+-----------+----------+-----------+ + 9 rows in set (0.00 sec) + +- 'Disk' is the name of the disk itself. +- 'Path' is the mount point of the disk. +- 'Total' is the total space in KiB. +- 'Used' is the used amount of space in KiB, and +- 'Available' is the amount of space in KiB available to non-root users. + +Note that as the amount of space available to root may be more that what +is available to non-root users, 'available' + 'used' may be less than 'total'. + +All paths to which a particular disk has been mounted are reported. The +rationale is that someone might want to take different action e.g. depending +on which disk is relevant for a particular path. This leads to the same disk +being reported multiple times. An alternative to this would be to have two +tables; disks and mounts. + + > select * from information_schema.disks; + +-----------+-----------+----------+-----------+ + | Disk | Total | Used | Available | + +-----------+-----------+----------+-----------+ + | /dev/sda3 | 47929956 | 30666304 | 14805864 | + | /dev/sda1 | 191551 | 3461 | 188090 | + | /dev/sda4 | 174679768 | 80335392 | 85448120 | + | /dev/sdb1 | 961301832 | 83764 | 912363644 | + +-----------+-----------+----------+-----------+ + + > select * from information_schema.mounts; + +-----------------------+-----------+ + | Path | Disk | + +-----------------------+-----------+ + | / | /dev/sda3 | + | /boot/efi | /dev/sda1 | + | /home | /dev/sda4 | + | /mnt/hdd | /dev/sdb1 | + | /home/wikman/Music | /dev/sdb1 | + ... + + +Building +-------- +- Ensure that the directory information_schema_disks is in the top-level + directory of the server. +- Add + + ADD_SUBDIRECTORY(information_schema_disks) + + to the top-level CMakeLists.txt + +> Invoke make + + $ make + +Installation +------------ +- Copy information_schema_disks/libinformation_schema_disks.so to the plugin + directory of the server: + + $ cd information_schema_disks + $ sudo cp libinformation_schema_disks.so plugin-directory-of-server + +- Using mysql, install the plugin: + + MariaDB [(none)]> install plugin disks soname 'libinformation_schema_disks.so'; + +Usage +----- +The plugin appears as the table 'disks' in 'information_schema'. + + MariaDB [(none)]> select * from information_schema.disks; + +-----------+-----------------------+-----------+----------+-----------+ + | Disk | Path | Total | Used | Available | + +-----------+-----------------------+-----------+----------+-----------+ + | /dev/sda3 | / | 47929956 | 30666308 | 14805860 | + | /dev/sda1 | /boot/efi | 191551 | 3461 | 188090 | + | /dev/sda4 | /home | 174679768 | 80348148 | 85435364 | + | /dev/sdb1 | /mnt/hdd | 961301832 | 83764 | 912363644 | + | /dev/sdb1 | /home/wikman/Music | 961301832 | 83764 | 912363644 | + | /dev/sdb1 | /home/wikman/Videos | 961301832 | 83764 | 912363644 | + ... + diff --git a/plugin/information_schema_disks/information_schema_disks.cc b/plugin/information_schema_disks/information_schema_disks.cc new file mode 100644 index 00000000000..b5e3a6dc728 --- /dev/null +++ b/plugin/information_schema_disks/information_schema_disks.cc @@ -0,0 +1,154 @@ +/* + Copyright (c) 2017, MariaDB + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; version 2 of the License. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA */ + +#include +#include +#include +#include +#include +#include + +namespace +{ + +struct st_mysql_information_schema disks_table_info = { MYSQL_INFORMATION_SCHEMA_INTERFACE_VERSION }; + +ST_FIELD_INFO disks_table_fields[]= +{ + { "Disk", PATH_MAX, MYSQL_TYPE_STRING, 0, 0 ,0, 0 }, + { "Path", PATH_MAX, MYSQL_TYPE_STRING, 0, 0 ,0, 0 }, + { "Total", 32, MYSQL_TYPE_LONG, 0, 0 ,0 ,0 }, // Total amount available + { "Used", 32, MYSQL_TYPE_LONG, 0, 0 ,0 ,0 }, // Amount of space used + { "Available", 32, MYSQL_TYPE_LONG, 0, 0 ,0 ,0 }, // Amount available to users other than root. + { 0, 0, MYSQL_TYPE_NULL, 0, 0, 0, 0 } +}; + +int disks_table_add_row(THD* pThd, + TABLE* pTable, + const char* zDisk, + const char* zPath, + const struct statvfs& info) +{ + // From: http://pubs.opengroup.org/onlinepubs/009695399/basedefs/sys/statvfs.h.html + // + // f_frsize Fundamental file system block size. + // f_blocks Total number of blocks on file system in units of f_frsize. + // f_bfree Total number of free blocks. + // f_bavail Number of free blocks available to non-privileged process. + + size_t total = (info.f_frsize * info.f_blocks) / 1024; + size_t used = (info.f_frsize * (info.f_blocks - info.f_bfree)) / 1024; + size_t avail = (info.f_frsize * info.f_bavail) / 1024; + + pTable->field[0]->store(zDisk, strlen(zDisk), system_charset_info); + pTable->field[1]->store(zPath, strlen(zPath), system_charset_info); + pTable->field[2]->store(total); + pTable->field[3]->store(used); + pTable->field[4]->store(avail); + + // 0 means success. + return (schema_table_store_record(pThd, pTable) != 0) ? 1 : 0; +} + +int disks_table_add_row(THD* pThd, TABLE* pTable, const char* zDisk, const char* zPath) +{ + int rv = 0; + + struct statvfs info; + + if (statvfs(zPath, &info) == 0) // We ignore failures. + { + rv = disks_table_add_row(pThd, pTable, zDisk, zPath, info); + } + + return rv; +} + +int disks_fill_table(THD* pThd, TABLE_LIST* pTables, Item* pCond) +{ + int rv = 1; + TABLE* pTable = pTables->table; + + FILE* pFile = setmntent("/etc/mtab", "r"); + + if (pFile) + { + const size_t BUFFER_SIZE = 4096; // 4K should be sufficient. + + char* pBuffer = new (std::nothrow) char [BUFFER_SIZE]; + + if (pBuffer) + { + rv = 0; + + struct mntent ent; + struct mntent* pEnt; + + while ((rv == 0) && (pEnt = getmntent_r(pFile, &ent, pBuffer, BUFFER_SIZE))) + { + // We only report the ones that refer to physical disks. + if (pEnt->mnt_fsname[0] == '/') + { + rv = disks_table_add_row(pThd, pTable, pEnt->mnt_fsname, pEnt->mnt_dir); + } + } + + delete [] pBuffer; + } + else + { + rv = 1; + } + + endmntent(pFile); + } + + return rv; +} + +int disks_table_init(void *ptr) +{ + ST_SCHEMA_TABLE* pSchema_table = (ST_SCHEMA_TABLE*)ptr; + + pSchema_table->fields_info = disks_table_fields; + pSchema_table->fill_table = disks_fill_table; + return 0; +} + +} + +extern "C" +{ + +mysql_declare_plugin(disks_library) +{ + MYSQL_INFORMATION_SCHEMA_PLUGIN, + &disks_table_info, /* type-specific descriptor */ + "DISKS", /* table name */ + "MariaDB", /* author */ + "Disk space information", /* description */ + PLUGIN_LICENSE_GPL, /* license type */ + disks_table_init, /* init function */ + NULL, + 0x0100, /* version = 1.0 */ + NULL, /* no status variables */ + NULL, /* no system variables */ + NULL, /* no reserved information */ + 0 /* no flags */ +} +mysql_declare_plugin_end; + +} From c704523195749285725797e2e1d4a955d47e364b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 21 Mar 2018 11:58:17 +0200 Subject: [PATCH 096/139] Remove orphan wsrep_node_is_ready() --- sql/sql_parse.cc | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index affadf01d2c..0a68d8ccea8 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -913,21 +913,6 @@ static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables) #endif /* WITH_WSREP */ #ifndef EMBEDDED_LIBRARY -#ifdef WITH_WSREP -static bool wsrep_node_is_ready(THD *thd) -{ - if (thd->variables.wsrep_on && !thd->wsrep_applier && - (!wsrep_ready || wsrep_reject_queries != WSREP_REJECT_NONE)) - { - my_message(ER_UNKNOWN_COM_ERROR, - "WSREP has not yet prepared node for application use", - MYF(0)); - return false; - } - return true; -} -#endif - /** Read one command from connection and execute it (query or simple command). This function is called in loop from thread function. From ca9d9029e607bb9cc09ecaa295e4a09682b9f485 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 21 Mar 2018 12:32:38 +0200 Subject: [PATCH 097/139] Partially revert commit 2a729b5f4b14f9f04cf81e1d8dd4eec4ad6cb7cd Define CF_SKIP_WSREP_CHECK only once. --- sql/sql_class.h | 8 -------- 1 file changed, 8 deletions(-) diff --git a/sql/sql_class.h b/sql/sql_class.h index b42b6ece9d4..91030145022 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -5475,14 +5475,6 @@ public: sent by the user (ie: stored procedure). */ #define CF_SKIP_QUESTIONS (1U << 1) -#ifdef WITH_WSREP -/** - Do not check that wsrep snapshot is ready before allowing this command -*/ -#define CF_SKIP_WSREP_CHECK (1U << 2) -#else -#define CF_SKIP_WSREP_CHECK 0 -#endif /* WITH_WSREP */ /** Do not check that wsrep snapshot is ready before allowing this command From 4629db0dd6442ea7c2d3ecd636060bc4d21f2d19 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Wed, 21 Mar 2018 09:38:23 +0200 Subject: [PATCH 098/139] Fix test failure on galera_var_reject_queries. --- mysql-test/suite/galera/disabled.def | 2 +- mysql-test/suite/galera/t/galera_var_reject_queries.test | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 1551fae072b..49081918bfd 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -49,4 +49,4 @@ galera_suspend_slave : MDEV-13549 Galera test failures 10.1 galera_gtid : MDEV-13549 Galera test failures 10.1 galera_gtid_slave : MDEV-13549 Galera test failures 10.1 galera_unicode_identifiers : MDEV-13549 Galera test failures 10.1 -galera_var_reject_queries : MW-245 FIXME: fails on buildbot +galera.galera_gcs_fc_limit : MDEV-13549 Galera test failures 10.1 diff --git a/mysql-test/suite/galera/t/galera_var_reject_queries.test b/mysql-test/suite/galera/t/galera_var_reject_queries.test index b1af9d8aa2b..6859855c35f 100644 --- a/mysql-test/suite/galera/t/galera_var_reject_queries.test +++ b/mysql-test/suite/galera/t/galera_var_reject_queries.test @@ -23,7 +23,7 @@ SELECT * FROM t1; SET GLOBAL wsrep_reject_queries = ALL_KILL; --connection node_1a ---error ER_CONNECTION_KILLED,2013 +--error ER_CONNECTION_KILLED,2013,2006 SELECT * FROM t1; --connect node_1b, 127.0.0.1, root, , test, $NODE_MYPORT_1 From f3994b74327eef37fa6010368f7f8db044cf70f8 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 21 Mar 2018 12:13:37 +0100 Subject: [PATCH 099/139] MDEV-15492: Subquery crash similar to MDEV-10050 Detection of first execution of PS fixed. More debug info. --- mysql-test/r/ps_qc_innodb.result | 23 +++++++++++++++++++++ mysql-test/t/ps_qc_innodb.test | 35 ++++++++++++++++++++++++++++++++ sql/sql_class.cc | 4 ++++ sql/sql_prepare.cc | 4 +++- 4 files changed, 65 insertions(+), 1 deletion(-) create mode 100644 mysql-test/r/ps_qc_innodb.result create mode 100644 mysql-test/t/ps_qc_innodb.test diff --git a/mysql-test/r/ps_qc_innodb.result b/mysql-test/r/ps_qc_innodb.result new file mode 100644 index 00000000000..775055e858f --- /dev/null +++ b/mysql-test/r/ps_qc_innodb.result @@ -0,0 +1,23 @@ +# +# MDEV-15492: Subquery crash similar to MDEV-10050 +# +SET @qcs.save= @@global.query_cache_size, @qct.save= @@global.query_cache_type; +SET GLOBAL query_cache_size= 512*1024*1024, query_cache_type= ON; +CREATE TABLE t1 (a INT) ENGINE=InnoDB; +CREATE TABLE t2 (b INT) ENGINE=InnoDB; +CREATE VIEW v AS select a from t1 join t2; +PREPARE stmt1 FROM "SELECT * FROM t1 WHERE a in (SELECT a FROM v)"; +PREPARE stmt2 FROM "SELECT * FROM t1 WHERE a in (SELECT a FROM v)"; +EXECUTE stmt2; +a +EXECUTE stmt1; +a +INSERT INTO t2 VALUES (0); +EXECUTE stmt1; +a +START TRANSACTION; +EXECUTE stmt1; +a +DROP VIEW v; +DROP TABLE t1, t2; +SET GLOBAL query_cache_size= @qcs.save, query_cache_type= @qct.save; diff --git a/mysql-test/t/ps_qc_innodb.test b/mysql-test/t/ps_qc_innodb.test new file mode 100644 index 00000000000..e09a2bf4070 --- /dev/null +++ b/mysql-test/t/ps_qc_innodb.test @@ -0,0 +1,35 @@ +--source include/have_query_cache.inc +--source include/have_innodb.inc + +--echo # +--echo # MDEV-15492: Subquery crash similar to MDEV-10050 +--echo # + +SET @qcs.save= @@global.query_cache_size, @qct.save= @@global.query_cache_type; +SET GLOBAL query_cache_size= 512*1024*1024, query_cache_type= ON; + +--connect (con1,localhost,root,,test) +CREATE TABLE t1 (a INT) ENGINE=InnoDB; +CREATE TABLE t2 (b INT) ENGINE=InnoDB; +CREATE VIEW v AS select a from t1 join t2; + +PREPARE stmt1 FROM "SELECT * FROM t1 WHERE a in (SELECT a FROM v)"; + +--connect (con2,localhost,root,,test) +PREPARE stmt2 FROM "SELECT * FROM t1 WHERE a in (SELECT a FROM v)"; +EXECUTE stmt2; + +--connection con1 +EXECUTE stmt1; +INSERT INTO t2 VALUES (0); +EXECUTE stmt1; +START TRANSACTION; +EXECUTE stmt1; + +# Cleanup +--disconnect con1 +--disconnect con2 +--connection default +DROP VIEW v; +DROP TABLE t1, t2; +SET GLOBAL query_cache_size= @qcs.save, query_cache_type= @qct.save; diff --git a/sql/sql_class.cc b/sql/sql_class.cc index c88c13b9524..ff06b7fb3dc 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -2252,15 +2252,19 @@ void THD::check_and_register_item_tree_change(Item **place, Item **new_value, void THD::rollback_item_tree_changes() { + DBUG_ENTER("THD::rollback_item_tree_changes"); I_List_iterator it(change_list); Item_change_record *change; while ((change= it++)) { + DBUG_PRINT("info", ("Rollback: %p (%p) <- %p", + *change->place, change->place, change->old_value)); *change->place= change->old_value; } /* We can forget about changes memory: it's allocated in runtime memroot */ change_list.empty(); + DBUG_VOID_RETURN; } diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index a3bf9d6c93c..6aa6aacc504 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -3819,6 +3819,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) Statement stmt_backup; Query_arena *old_stmt_arena; bool error= TRUE; + bool qc_executed= FALSE; char saved_cur_db_name_buf[SAFE_NAME_LEN+1]; LEX_STRING saved_cur_db_name= @@ -3937,6 +3938,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) thd->lex->sql_command= SQLCOM_SELECT; status_var_increment(thd->status_var.com_stat[SQLCOM_SELECT]); thd->update_stats(); + qc_executed= TRUE; } } @@ -3960,7 +3962,7 @@ bool Prepared_statement::execute(String *expanded_query, bool open_cursor) thd->set_statement(&stmt_backup); thd->stmt_arena= old_stmt_arena; - if (state == Query_arena::STMT_PREPARED) + if (state == Query_arena::STMT_PREPARED && !qc_executed) state= Query_arena::STMT_EXECUTED; if (error == 0 && this->lex->sql_command == SQLCOM_CALL) From 03a80e20f7ccb8b3215d08945fc3923010acada1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 21 Mar 2018 16:15:02 +0200 Subject: [PATCH 100/139] pfs_os_file_read_no_error_handling_int_fd_func(): Remove a variable --- storage/innobase/include/os0file.ic | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/innobase/include/os0file.ic b/storage/innobase/include/os0file.ic index 6cb67af94ca..a7e4f2695da 100644 --- a/storage/innobase/include/os0file.ic +++ b/storage/innobase/include/os0file.ic @@ -372,9 +372,9 @@ pfs_os_file_read_no_error_handling_int_fd_func( locker, n, __FILE__, __LINE__); } - ulint fulfilled; + bool success = DB_SUCCESS == os_file_read_no_error_handling_func( - type, OS_FILE_FROM_FD(file), buf, offset, n, &fulfilled); + type, OS_FILE_FROM_FD(file), buf, offset, n, NULL); if (locker != NULL) { PSI_FILE_CALL(end_file_wait)(locker, n); From ddc5c65333a4add28907ccb82054ecba0ff6b873 Mon Sep 17 00:00:00 2001 From: Varun Gupta Date: Thu, 22 Mar 2018 03:01:53 +0530 Subject: [PATCH 101/139] MDEV-14779: using left join causes incorrect results with materialization and derived tables Conversion of a subquery to a semi-join is blocked when we have an IN subquery predicate in the on_expr of an outer join. Currently this scenario is handled but the cases when an IN subquery predicate is wrapped inside a Item_in_optimizer item then this blocking is not done. --- mysql-test/r/join_outer.result | 18 +++++++++++++++++- mysql-test/r/join_outer_jcl6.result | 18 +++++++++++++++++- mysql-test/t/join_outer.test | 18 +++++++++++++++++- sql/item_cmpfunc.h | 5 +++++ sql/item_func.h | 2 +- sql/opt_subselect.cc | 4 ++++ 6 files changed, 61 insertions(+), 4 deletions(-) diff --git a/mysql-test/r/join_outer.result b/mysql-test/r/join_outer.result index 74580e67499..67b22ca86b2 100644 --- a/mysql-test/r/join_outer.result +++ b/mysql-test/r/join_outer.result @@ -2346,11 +2346,27 @@ CREATE TABLE t1 (b1 BIT NOT NULL); INSERT INTO t1 VALUES (0),(1); CREATE TABLE t2 (b2 BIT NOT NULL); INSERT INTO t2 VALUES (0),(1); -SET SESSION JOIN_CACHE_LEVEL = 3; +set @save_join_cache_level= @@join_cache_level; +SET @@join_cache_level = 3; SELECT t1.b1+'0' , t2.b2 + '0' FROM t1 LEFT JOIN t2 ON b1 = b2; t1.b1+'0' t2.b2 + '0' 0 0 1 1 DROP TABLE t1, t2; +set @join_cache_level= @save_join_cache_level; +# +# MDEV-14779: using left join causes incorrect results with materialization and derived tables +# +create table t1(id int); +insert into t1 values (1),(2); +create table t2(sid int, id int); +insert into t2 values (1,1),(2,2); +select * from t1 t +left join (select * from t2 where sid in (select max(sid) from t2 where 0=1 group by id)) r +on t.id=r.id ; +id sid id +1 NULL NULL +2 NULL NULL +drop table t1, t2; # end of 5.5 tests SET optimizer_switch=@save_optimizer_switch; diff --git a/mysql-test/r/join_outer_jcl6.result b/mysql-test/r/join_outer_jcl6.result index d46a4ee6c7a..c019da6197b 100644 --- a/mysql-test/r/join_outer_jcl6.result +++ b/mysql-test/r/join_outer_jcl6.result @@ -2357,12 +2357,28 @@ CREATE TABLE t1 (b1 BIT NOT NULL); INSERT INTO t1 VALUES (0),(1); CREATE TABLE t2 (b2 BIT NOT NULL); INSERT INTO t2 VALUES (0),(1); -SET SESSION JOIN_CACHE_LEVEL = 3; +set @save_join_cache_level= @@join_cache_level; +SET @@join_cache_level = 3; SELECT t1.b1+'0' , t2.b2 + '0' FROM t1 LEFT JOIN t2 ON b1 = b2; t1.b1+'0' t2.b2 + '0' 0 0 1 1 DROP TABLE t1, t2; +set @join_cache_level= @save_join_cache_level; +# +# MDEV-14779: using left join causes incorrect results with materialization and derived tables +# +create table t1(id int); +insert into t1 values (1),(2); +create table t2(sid int, id int); +insert into t2 values (1,1),(2,2); +select * from t1 t +left join (select * from t2 where sid in (select max(sid) from t2 where 0=1 group by id)) r +on t.id=r.id ; +id sid id +1 NULL NULL +2 NULL NULL +drop table t1, t2; # end of 5.5 tests SET optimizer_switch=@save_optimizer_switch; set join_cache_level=default; diff --git a/mysql-test/t/join_outer.test b/mysql-test/t/join_outer.test index 896cc137e07..2769aea9969 100644 --- a/mysql-test/t/join_outer.test +++ b/mysql-test/t/join_outer.test @@ -1891,9 +1891,25 @@ INSERT INTO t1 VALUES (0),(1); CREATE TABLE t2 (b2 BIT NOT NULL); INSERT INTO t2 VALUES (0),(1); -SET SESSION JOIN_CACHE_LEVEL = 3; +set @save_join_cache_level= @@join_cache_level; +SET @@join_cache_level = 3; SELECT t1.b1+'0' , t2.b2 + '0' FROM t1 LEFT JOIN t2 ON b1 = b2; DROP TABLE t1, t2; +set @join_cache_level= @save_join_cache_level; + +--echo # +--echo # MDEV-14779: using left join causes incorrect results with materialization and derived tables +--echo # + +create table t1(id int); +insert into t1 values (1),(2); +create table t2(sid int, id int); +insert into t2 values (1,1),(2,2); + +select * from t1 t + left join (select * from t2 where sid in (select max(sid) from t2 where 0=1 group by id)) r + on t.id=r.id ; +drop table t1, t2; --echo # end of 5.5 tests diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index a045a08e4fd..3c8cc71370d 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -255,6 +255,7 @@ public: bool is_null(); longlong val_int(); void cleanup(); + enum Functype functype() const { return IN_OPTIMIZER_FUNC; } const char *func_name() const { return ""; } Item_cache **get_cache() { return &cache; } void keep_top_level_cache(); @@ -270,6 +271,10 @@ public: void fix_after_pullout(st_select_lex *new_parent, Item **ref); virtual void print(String *str, enum_query_type query_type); void restore_first_argument(); + Item* get_wrapped_in_subselect_item() + { + return args[1]; + } }; class Comp_creator diff --git a/sql/item_func.h b/sql/item_func.h index 2157c6b6b6d..60122f03e0b 100644 --- a/sql/item_func.h +++ b/sql/item_func.h @@ -66,7 +66,7 @@ public: NOW_FUNC, TRIG_COND_FUNC, SUSERVAR_FUNC, GUSERVAR_FUNC, COLLATE_FUNC, EXTRACT_FUNC, CHAR_TYPECAST_FUNC, FUNC_SP, UDF_FUNC, - NEG_FUNC, GSYSVAR_FUNC }; + NEG_FUNC, GSYSVAR_FUNC, IN_OPTIMIZER_FUNC }; enum optimize_type { OPTIMIZE_NONE,OPTIMIZE_KEY,OPTIMIZE_OP, OPTIMIZE_NULL, OPTIMIZE_EQUAL }; enum Type type() const { return FUNC_ITEM; } diff --git a/sql/opt_subselect.cc b/sql/opt_subselect.cc index 24f35a0c14c..c21541c4b97 100644 --- a/sql/opt_subselect.cc +++ b/sql/opt_subselect.cc @@ -1006,6 +1006,10 @@ bool check_for_outer_joins(List *join_list) void find_and_block_conversion_to_sj(Item *to_find, List_iterator_fast &li) { + if (to_find->type() == Item::FUNC_ITEM && + ((Item_func*)to_find)->functype() == Item_func::IN_OPTIMIZER_FUNC) + to_find= ((Item_in_optimizer*)to_find)->get_wrapped_in_subselect_item(); + if (to_find->type() != Item::SUBSELECT_ITEM || ((Item_subselect *) to_find)->substype() != Item_subselect::IN_SUBS) return; From b6d68c6aa3df0b8490dc92f2b22f9e92b67d6ef1 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Thu, 22 Mar 2018 14:19:16 +0530 Subject: [PATCH 102/139] MDEV-13561 Mariabackup is incompatible with retroactively created innodb_undo_tablespaces - Mariabackup supports starting undo tablespace id which is greater than 1. --- extra/mariabackup/common.h | 4 +- extra/mariabackup/xtrabackup.cc | 85 +++++++++++++++++++ .../suite/mariabackup/undo_space_id.opt | 2 + .../suite/mariabackup/undo_space_id.result | 13 +++ .../suite/mariabackup/undo_space_id.test | 25 ++++++ storage/xtradb/srv/srv0start.cc | 6 +- 6 files changed, 130 insertions(+), 5 deletions(-) create mode 100644 mysql-test/suite/mariabackup/undo_space_id.opt create mode 100644 mysql-test/suite/mariabackup/undo_space_id.result create mode 100644 mysql-test/suite/mariabackup/undo_space_id.test diff --git a/extra/mariabackup/common.h b/extra/mariabackup/common.h index 7b1dfd7a0db..340ad66e28a 100644 --- a/extra/mariabackup/common.h +++ b/extra/mariabackup/common.h @@ -27,7 +27,9 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #include -# define fil_is_user_tablespace_id(i) ((i) > srv_undo_tablespaces_open) +/** Determine if (i) is a user tablespace id or not. */ +# define fil_is_user_tablespace_id(i) (i != 0 \ + && !srv_is_undo_tablespace(i)) #ifdef _MSC_VER #define stat _stati64 diff --git a/extra/mariabackup/xtrabackup.cc b/extra/mariabackup/xtrabackup.cc index 44214b0c207..b4931b5ebe7 100644 --- a/extra/mariabackup/xtrabackup.cc +++ b/extra/mariabackup/xtrabackup.cc @@ -3079,6 +3079,85 @@ xb_fil_io_init(void) fsp_init(); } +/** Assign srv_undo_space_id_start variable if there are undo tablespace present. +Read the TRX_SYS page from ibdata1 file and get the minimum space id from +the first slot rollback segments of TRX_SYS_PAGE_NO. +@retval DB_ERROR if file open or page read failed. +@retval DB_SUCCESS if srv_undo_space_id assigned successfully. */ +static dberr_t xb_assign_undo_space_start() +{ + ulint dirnamelen; + char name[1000]; + pfs_os_file_t file; + byte* buf; + byte* page; + ibool ret; + dberr_t error = DB_SUCCESS; + ulint space, page_no; + + if (srv_undo_tablespaces == 0) { + return error; + } + + srv_normalize_path_for_win(srv_data_home); + dirnamelen = strlen(srv_data_home); + memcpy(name, srv_data_home, dirnamelen); + + if (dirnamelen && name[dirnamelen - 1] != SRV_PATH_SEPARATOR) { + name[dirnamelen++] = SRV_PATH_SEPARATOR; + } + + ut_snprintf(name + dirnamelen, strlen(name) + strlen("ibdata1"), + "%s", "ibdata1"); + + file = os_file_create(innodb_file_data_key, name, OS_FILE_OPEN, + OS_FILE_NORMAL, OS_DATA_FILE, &ret, 0); + + if (ret == FALSE) { + fprintf(stderr, "InnoDB: Error in opening %s\n", name); + return DB_ERROR; + } + + buf = static_cast(ut_malloc(2 * UNIV_PAGE_SIZE)); + page = static_cast(ut_align(buf, UNIV_PAGE_SIZE)); + +retry: + ret = os_file_read(file, page, TRX_SYS_PAGE_NO * UNIV_PAGE_SIZE, + UNIV_PAGE_SIZE); + + if (!ret) { + fprintf(stderr, "InnoDB: Reading TRX_SYS page failed."); + error = DB_ERROR; + goto func_exit; + } + + /* TRX_SYS page can't be compressed or encrypted. */ + if (buf_page_is_corrupted(false, page, 0, NULL)) { + goto retry; + } + + /* 0th slot always points to system tablespace. + 1st slot should point to first undotablespace which is minimum. */ + + page_no = mach_read_ulint(TRX_SYS + TRX_SYS_RSEGS + + TRX_SYS_RSEG_SLOT_SIZE + + TRX_SYS_RSEG_PAGE_NO + page, MLOG_4BYTES); + ut_ad(page_no != FIL_NULL); + + space = mach_read_ulint(TRX_SYS + TRX_SYS_RSEGS + + TRX_SYS_RSEG_SLOT_SIZE + + TRX_SYS_RSEG_SPACE + page, MLOG_4BYTES); + + srv_undo_space_id_start = space; + +func_exit: + ut_free(buf); + ret = os_file_close(file); + ut_a(ret); + + return error; +} + /**************************************************************************** Populates the tablespace memory cache by scanning for and opening data files. @returns DB_SUCCESS or error code.*/ @@ -3132,6 +3211,12 @@ xb_load_tablespaces(void) /* Add separate undo tablespaces to fil_system */ + err = xb_assign_undo_space_start(); + + if (err != DB_SUCCESS) { + return err; + } + err = srv_undo_tablespaces_init(FALSE, TRUE, srv_undo_tablespaces, diff --git a/mysql-test/suite/mariabackup/undo_space_id.opt b/mysql-test/suite/mariabackup/undo_space_id.opt new file mode 100644 index 00000000000..01b1d91e820 --- /dev/null +++ b/mysql-test/suite/mariabackup/undo_space_id.opt @@ -0,0 +1,2 @@ +--debug=d,innodb_undo_upgrade +--innodb_undo_tablespaces=2 diff --git a/mysql-test/suite/mariabackup/undo_space_id.result b/mysql-test/suite/mariabackup/undo_space_id.result new file mode 100644 index 00000000000..96d3e2a58f4 --- /dev/null +++ b/mysql-test/suite/mariabackup/undo_space_id.result @@ -0,0 +1,13 @@ +# Create 2 UNDO TABLESPACE(UNDO003, UNDO004) +CREATE TABLE t1(a varchar(60)) ENGINE INNODB; +start transaction; +INSERT INTO t1 VALUES(1); +# xtrabackup backup +# Display undo log files from target directory +undo003 +undo004 +# xtrabackup prepare +# Display undo log files from targer directory +undo003 +undo004 +DROP TABLE t1; diff --git a/mysql-test/suite/mariabackup/undo_space_id.test b/mysql-test/suite/mariabackup/undo_space_id.test new file mode 100644 index 00000000000..8adeb18e5a7 --- /dev/null +++ b/mysql-test/suite/mariabackup/undo_space_id.test @@ -0,0 +1,25 @@ +--source include/have_innodb.inc +--source include/have_debug.inc + +--echo # Create 2 UNDO TABLESPACE(UNDO003, UNDO004) + +let $basedir=$MYSQLTEST_VARDIR/tmp/backup; + +CREATE TABLE t1(a varchar(60)) ENGINE INNODB; +start transaction; +INSERT INTO t1 VALUES(1); + +--echo # xtrabackup backup +--disable_result_log +exec $XTRABACKUP --defaults-file=$MYSQLTEST_VARDIR/my.cnf --backup --target-dir=$basedir; +--enable_result_log +--echo # Display undo log files from target directory +list_files $basedir undo*; + +--echo # xtrabackup prepare +exec $XTRABACKUP --prepare --apply-log-only --target-dir=$basedir; +--echo # Display undo log files from targer directory +list_files $basedir undo*; + +DROP TABLE t1; +rmdir $basedir; diff --git a/storage/xtradb/srv/srv0start.cc b/storage/xtradb/srv/srv0start.cc index 2aa2426ca9c..4ed310089ac 100644 --- a/storage/xtradb/srv/srv0start.cc +++ b/storage/xtradb/srv/srv0start.cc @@ -1505,14 +1505,12 @@ srv_undo_tablespaces_init( if (backup_mode) { ut_ad(!create_new_db); - /* MDEV-13561 FIXME: Determine srv_undo_space_id_start - from the undo001 file. */ - srv_undo_space_id_start = 1; - for (i = 0; i < n_undo_tablespaces; i++) { undo_tablespace_ids[i] = i + srv_undo_space_id_start; } + + prev_space_id = srv_undo_space_id_start - 1; } } From 8d32959b091c8473cad706f94b6701f7f4eaf05b Mon Sep 17 00:00:00 2001 From: Eugene Kosov Date: Wed, 31 Jan 2018 20:30:46 +0300 Subject: [PATCH 103/139] fix data races srv_last_monitor_time: make all accesses relaxed atomical WARNING: ThreadSanitizer: data race (pid=12041) Write of size 8 at 0x000003949278 by thread T26 (mutexes: write M226445748578513120): #0 thd_destructor_proxy storage/innobase/handler/ha_innodb.cc:314:14 (mysqld+0x19b5505) Previous read of size 8 at 0x000003949278 by main thread: #0 innobase_init(void*) storage/innobase/handler/ha_innodb.cc:4180:11 (mysqld+0x1a03404) #1 ha_initialize_handlerton(st_plugin_int*) sql/handler.cc:522:31 (mysqld+0xc5ec73) #2 plugin_initialize(st_mem_root*, st_plugin_int*, int*, char**, bool) sql/sql_plugin.cc:1447:9 (mysqld+0x134908d) #3 plugin_init(int*, char**, int) sql/sql_plugin.cc:1729:15 (mysqld+0x13484f0) #4 init_server_components() sql/mysqld.cc:5345:7 (mysqld+0xbf720f) #5 mysqld_main(int, char**) sql/mysqld.cc:5940:7 (mysqld+0xbf107d) #6 main sql/main.cc:25:10 (mysqld+0xbe971b) Location is global 'srv_running' of size 8 at 0x000003949278 (mysqld+0x000003949278) WARNING: ThreadSanitizer: data race (pid=27869) Atomic write of size 4 at 0x7b4800000c00 by thread T8: #0 __tsan_atomic32_exchange llvm/projects/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cc:589 (mysqld+0xbd4eac) #1 TTASEventMutex::exit() storage/innobase/include/ib0mutex.h:467:7 (mysqld+0x1a8d4cb) #2 PolicyMutex >::exit() storage/innobase/include/ib0mutex.h:609:10 (mysqld+0x1a7839e) #3 fil_validate() storage/innobase/fil/fil0fil.cc:5535:2 (mysqld+0x1abd913) #4 fil_validate_skip() storage/innobase/fil/fil0fil.cc:204:9 (mysqld+0x1aba601) #5 fil_aio_wait(unsigned long) storage/innobase/fil/fil0fil.cc:5296:2 (mysqld+0x1abbae6) #6 io_handler_thread storage/innobase/srv/srv0start.cc:340:3 (mysqld+0x21abe1e) Previous read of size 4 at 0x7b4800000c00 by main thread (mutexes: write M1273, write M1271): #0 TTASEventMutex::state() const storage/innobase/include/ib0mutex.h:530:10 (mysqld+0x21c66e2) #1 sync_array_detect_deadlock(sync_array_t*, sync_cell_t*, sync_cell_t*, unsigned long) storage/innobase/sync/sync0arr.cc:746:14 (mysqld+0x21c1c7a) #2 sync_array_wait_event(sync_array_t*, sync_cell_t*&) storage/innobase/sync/sync0arr.cc:465:6 (mysqld+0x21c1708) #3 TTASEventMutex::enter(unsigned int, unsigned int, char const*, unsigned int) storage/innobase/include/ib0mutex.h:516:6 (mysqld+0x1a8c206) #4 PolicyMutex >::enter(unsigned int, unsigned int, char const*, unsigned int) storage/innobase/include/ib0mutex.h:635:10 (mysqld+0x1a782c3) #5 fil_mutex_enter_and_prepare_for_io(unsigned long) storage/innobase/fil/fil0fil.cc:1131:3 (mysqld+0x1a9a92e) #6 fil_io(IORequest const&, bool, page_id_t const&, page_size_t const&, unsigned long, unsigned long, void*, void*, bool) storage/innobase/fil/fil0fil.cc:5082:2 (mysqld+0x1ab8de2) #7 buf_flush_write_block_low(buf_page_t*, buf_flush_t, bool) storage/innobase/buf/buf0flu.cc:1112:3 (mysqld+0x1cb970a) #8 buf_flush_page(buf_pool_t*, buf_page_t*, buf_flush_t, bool) storage/innobase/buf/buf0flu.cc:1270:3 (mysqld+0x1cb7d70) #9 buf_flush_try_neighbors(page_id_t const&, buf_flush_t, unsigned long, unsigned long) storage/innobase/buf/buf0flu.cc:1493:9 (mysqld+0x1cc9674) #10 buf_flush_page_and_try_neighbors(buf_page_t*, buf_flush_t, unsigned long, unsigned long*) storage/innobase/buf/buf0flu.cc:1565:13 (mysqld+0x1cbadf3) #11 buf_do_flush_list_batch(buf_pool_t*, unsigned long, unsigned long) storage/innobase/buf/buf0flu.cc:1825:3 (mysqld+0x1cbbcb8) #12 buf_flush_batch(buf_pool_t*, buf_flush_t, unsigned long, unsigned long, flush_counters_t*) storage/innobase/buf/buf0flu.cc:1895:16 (mysqld+0x1cbb459) #13 buf_flush_do_batch(buf_pool_t*, buf_flush_t, unsigned long, unsigned long, flush_counters_t*) storage/innobase/buf/buf0flu.cc:2065:2 (mysqld+0x1cbcfe1) #14 buf_flush_lists(unsigned long, unsigned long, unsigned long*) storage/innobase/buf/buf0flu.cc:2167:8 (mysqld+0x1cbd5a3) #15 log_preflush_pool_modified_pages(unsigned long) storage/innobase/log/log0log.cc:1400:13 (mysqld+0x1eefc3b) #16 log_make_checkpoint_at(unsigned long, bool) storage/innobase/log/log0log.cc:1751:10 (mysqld+0x1eefb16) #17 buf_dblwr_create() storage/innobase/buf/buf0dblwr.cc:335:2 (mysqld+0x1cd2141) #18 innobase_start_or_create_for_mysql() storage/innobase/srv/srv0start.cc:2539:10 (mysqld+0x21b4d8e) #19 innobase_init(void*) storage/innobase/handler/ha_innodb.cc:4193:8 (mysqld+0x1a5e3d7) #20 ha_initialize_handlerton(st_plugin_int*) sql/handler.cc:522:31 (mysqld+0xc74d33) #21 plugin_initialize(st_mem_root*, st_plugin_int*, int*, char**, bool) sql/sql_plugin.cc:1447:9 (mysqld+0x1376d5d) #22 plugin_init(int*, char**, int) sql/sql_plugin.cc:1729:15 (mysqld+0x13761c0) #23 init_server_components() sql/mysqld.cc:5348:7 (mysqld+0xc0d0ff) #24 mysqld_main(int, char**) sql/mysqld.cc:5943:7 (mysqld+0xc06f9d) #25 main sql/main.cc:25:10 (mysqld+0xbff71b) WARNING: ThreadSanitizer: data race (pid=29031) Write of size 8 at 0x0000039e48e0 by thread T15: #0 srv_monitor_thread storage/innobase/srv/srv0srv.cc:1699:24 (mysqld+0x21a254e) Previous write of size 8 at 0x0000039e48e0 by thread T14: #0 srv_refresh_innodb_monitor_stats() storage/innobase/srv/srv0srv.cc:1165:24 (mysqld+0x21a3124) #1 srv_error_monitor_thread storage/innobase/srv/srv0srv.cc:1836:3 (mysqld+0x21a2d40) Location is global 'srv_last_monitor_time' of size 8 at 0x0000039e48e0 (mysqld+0x0000039e48e0) --- storage/innobase/handler/ha_innodb.cc | 23 +++++++++++++++-------- storage/innobase/include/ib0mutex.h | 3 ++- storage/innobase/include/srv0srv.h | 2 +- storage/innobase/srv/srv0srv.cc | 23 ++++++++++++++--------- storage/innobase/srv/srv0start.cc | 3 ++- 5 files changed, 34 insertions(+), 20 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index b8e72f398f1..89bd78c9db0 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -305,7 +305,7 @@ is_partition( /** Signal to shut down InnoDB (NULL if shutdown was signaled, or if running in innodb_read_only mode, srv_read_only_mode) */ -volatile st_my_thread_var *srv_running; +st_my_thread_var *srv_running; /** Service thread that waits for the server shutdown and stops purge threads. Purge workers have THDs that are needed to calculate virtual columns. This THDs must be destroyed rather early in the server shutdown sequence. @@ -332,12 +332,14 @@ thd_destructor_proxy(void *) myvar->current_cond = &thd_destructor_cond; mysql_mutex_lock(&thd_destructor_mutex); - srv_running = myvar; + my_atomic_storeptr_explicit(&srv_running, myvar, + MY_MEMORY_ORDER_RELAXED); /* wait until the server wakes the THD to abort and die */ while (!srv_running->abort) mysql_cond_wait(&thd_destructor_cond, &thd_destructor_mutex); mysql_mutex_unlock(&thd_destructor_mutex); - srv_running = NULL; + my_atomic_storeptr_explicit(&srv_running, NULL, + MY_MEMORY_ORDER_RELAXED); while (srv_fast_shutdown == 0 && (trx_sys_any_active_transactions() || @@ -4333,7 +4335,8 @@ innobase_change_buffering_inited_ok: mysql_thread_create(thd_destructor_thread_key, &thd_destructor_thread, NULL, thd_destructor_proxy, NULL); - while (!srv_running) + while (!my_atomic_loadptr_explicit(&srv_running, + MY_MEMORY_ORDER_RELAXED)) os_thread_sleep(20); } @@ -4427,10 +4430,12 @@ innobase_end(handlerton*, ha_panic_function) hash_table_free(innobase_open_tables); innobase_open_tables = NULL; - if (!abort_loop && srv_running) { + st_my_thread_var* running = my_atomic_loadptr_explicit( + &srv_running, MY_MEMORY_ORDER_RELAXED); + if (!abort_loop && running) { // may be UNINSTALL PLUGIN statement - srv_running->abort = 1; - mysql_cond_broadcast(srv_running->current_cond); + running->abort = 1; + mysql_cond_broadcast(running->current_cond); } if (!srv_read_only_mode) { @@ -17764,7 +17769,9 @@ fast_shutdown_validate( uint new_val = *reinterpret_cast(save); - if (srv_fast_shutdown && !new_val && !srv_running) { + if (srv_fast_shutdown && !new_val + && !my_atomic_loadptr_explicit(&srv_running, + MY_MEMORY_ORDER_RELAXED)) { return(1); } diff --git a/storage/innobase/include/ib0mutex.h b/storage/innobase/include/ib0mutex.h index 76f02cc1521..afa8f295e1c 100644 --- a/storage/innobase/include/ib0mutex.h +++ b/storage/innobase/include/ib0mutex.h @@ -527,7 +527,8 @@ struct TTASEventMutex { int32 state() const UNIV_NOTHROW { - return(m_lock_word); + return(my_atomic_load32_explicit(&m_lock_word, + MY_MEMORY_ORDER_RELAXED)); } /** The event that the mutex will wait in sync0arr.cc diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index e24aa89f046..9ba65d8097e 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -476,7 +476,7 @@ extern uint srv_fast_shutdown; /*!< If this is 1, do not do a /** Signal to shut down InnoDB (NULL if shutdown was signaled, or if running in innodb_read_only mode, srv_read_only_mode) */ -extern volatile st_my_thread_var *srv_running; +extern st_my_thread_var *srv_running; extern ibool srv_innodb_status; diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index 90196516651..c415b0a031e 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -1170,7 +1170,16 @@ srv_refresh_innodb_monitor_stats(void) { mutex_enter(&srv_innodb_monitor_mutex); - srv_last_monitor_time = time(NULL); + time_t current_time = time(NULL); + + if (difftime(current_time, srv_last_monitor_time) <= 60) { + /* We referesh InnoDB Monitor values so that averages are + printed from at most 60 last seconds */ + mutex_exit(&srv_innodb_monitor_mutex); + return; + } + + srv_last_monitor_time = current_time; os_aio_refresh_stats(); @@ -1792,6 +1801,8 @@ loop: } } + srv_refresh_innodb_monitor_stats(); + if (srv_shutdown_state != SRV_SHUTDOWN_NONE) { goto exit_func; } @@ -1863,13 +1874,6 @@ loop: old_lsn = new_lsn; } - if (difftime(time(NULL), srv_last_monitor_time) > 60) { - /* We referesh InnoDB Monitor values so that averages are - printed from at most 60 last seconds */ - - srv_refresh_innodb_monitor_stats(); - } - /* Update the statistics collected for deciding LRU eviction policy. */ buf_LRU_stat_update(); @@ -2938,7 +2942,8 @@ srv_purge_wakeup() srv_release_threads(SRV_WORKER, n_workers); } - } while (!srv_running + } while (!my_atomic_loadptr_explicit(&srv_running, + MY_MEMORY_ORDER_RELAXED) && (srv_sys.n_threads_active[SRV_WORKER] || srv_sys.n_threads_active[SRV_PURGE])); } diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 8c2d0dc8871..424b45e8def 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -2840,7 +2840,8 @@ srv_shutdown_bg_undo_sources() void innodb_shutdown() { - ut_ad(!srv_running); + ut_ad(!my_atomic_loadptr_explicit(&srv_running, + MY_MEMORY_ORDER_RELAXED)); ut_ad(!srv_undo_sources); switch (srv_operation) { From 0cba2c1ccb28bd9ef65926f3c91d5f6cc9f08cf9 Mon Sep 17 00:00:00 2001 From: Alexander Barkov Date: Thu, 22 Mar 2018 16:23:37 +0400 Subject: [PATCH 104/139] MDEV-15633 Memory leak after MDEV-15005 --- sql/item_cmpfunc.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sql/item_cmpfunc.h b/sql/item_cmpfunc.h index 41d5ce25fd4..f35859fd3c1 100644 --- a/sql/item_cmpfunc.h +++ b/sql/item_cmpfunc.h @@ -2265,6 +2265,11 @@ public: void sort(Item_field_cmpfunc compare, void *arg); void fix_length_and_dec(); bool fix_fields(THD *thd, Item **ref); + void cleanup() + { + delete eval_item; + eval_item= NULL; + } void update_used_tables(); COND *build_equal_items(THD *thd, COND_EQUAL *inherited, bool link_item_fields, From b98d80eb84f9b2e433314fa60c2d3c5b968e580d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 22 Mar 2018 10:58:22 +0200 Subject: [PATCH 105/139] Correct a wait condition in a disabled test The test will still time out. That should be addressed in MDEV-8139. --- mysql-test/suite/encryption/t/innodb_scrub_background.test | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/suite/encryption/t/innodb_scrub_background.test b/mysql-test/suite/encryption/t/innodb_scrub_background.test index c705ee51006..3843e9d16eb 100644 --- a/mysql-test/suite/encryption/t/innodb_scrub_background.test +++ b/mysql-test/suite/encryption/t/innodb_scrub_background.test @@ -96,7 +96,7 @@ SET GLOBAL innodb_encryption_threads=5; let $cnt=600; while ($cnt) { - let $success=`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_SCRUBBING WHERE LAST_SCRUB_COMPLETED IS NULL AND ( NAME in ('test/t1', 'test/t2', 'test/t3') OR SPACE = 0 )`; + let $success=`SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_SCRUBBING WHERE LAST_SCRUB_COMPLETED IS NULL AND (NAME LIKE 'test/%' OR SPACE = 0)`; if ($success) { let $cnt=0; From 2fb31821deb98d4324b1555ff5ede21feba77f6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 22 Mar 2018 11:26:38 +0200 Subject: [PATCH 106/139] MDEV-11984 Avoid accessing SYS_TABLESPACES unnecessarily The following INFORMATION_SCHEMA views were unnecessarily retrieving the data from the SYS_TABLESPACES table instead of directly fetching it from the fil_system cache: information_schema.innodb_tablespaces_encryption information_schema.innodb_tablespaces_scrubbing InnoDB always loads all tablespace metadata into memory at startup and never evicts it while the tablespace exists. With this fix, accessing these views will be much faster and use less memory, and include data about all tablespaces, including undo tablespaces. The view information_schema.innodb_sys_tablespaces will still reflect the contents of the SYS_TABLESPACES table. --- .../encryption/r/encrypt_and_grep.result | 12 +- .../encryption/r/innodb-spatial-index.result | 4 +- .../encryption/r/innodb_encryption.result | 40 ++--- .../suite/encryption/t/encrypt_and_grep.test | 6 + .../encryption/t/innodb-spatial-index.test | 2 + .../suite/encryption/t/innodb_encryption.test | 40 +++-- storage/innobase/handler/i_s.cc | 157 ++++-------------- 7 files changed, 95 insertions(+), 166 deletions(-) diff --git a/mysql-test/suite/encryption/r/encrypt_and_grep.result b/mysql-test/suite/encryption/r/encrypt_and_grep.result index 38dc11e7850..7068d1d3502 100644 --- a/mysql-test/suite/encryption/r/encrypt_and_grep.result +++ b/mysql-test/suite/encryption/r/encrypt_and_grep.result @@ -14,11 +14,11 @@ NAME test/t3 SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; NAME -mysql/innodb_table_stats +innodb_system mysql/innodb_index_stats +mysql/innodb_table_stats test/t1 test/t2 -innodb_system # t1 yes on expecting NOT FOUND NOT FOUND /foobarsecret/ in t1.ibd # t2 ... on expecting NOT FOUND @@ -32,11 +32,11 @@ SET GLOBAL innodb_encrypt_tables = off; # Wait max 10 min for key encryption threads to decrypt all spaces SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; NAME -mysql/innodb_table_stats +innodb_system mysql/innodb_index_stats +mysql/innodb_table_stats test/t2 test/t3 -innodb_system SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; NAME test/t1 @@ -56,11 +56,11 @@ NAME test/t3 SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; NAME -mysql/innodb_table_stats +innodb_system mysql/innodb_index_stats +mysql/innodb_table_stats test/t1 test/t2 -innodb_system # t1 yes on expecting NOT FOUND NOT FOUND /foobarsecret/ in t1.ibd # t2 ... on expecting NOT FOUND diff --git a/mysql-test/suite/encryption/r/innodb-spatial-index.result b/mysql-test/suite/encryption/r/innodb-spatial-index.result index d8f76988f9e..6856d1eecc9 100644 --- a/mysql-test/suite/encryption/r/innodb-spatial-index.result +++ b/mysql-test/suite/encryption/r/innodb-spatial-index.result @@ -36,11 +36,11 @@ INSERT INTO t2 values(1, 'secret', ST_GeomFromText('POINT(903994614 180726515)') # Success! SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION > 0; NAME -mysql/innodb_table_stats +innodb_system mysql/innodb_index_stats +mysql/innodb_table_stats test/t1 test/t2 -innodb_system SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; NAME DROP TABLE t1, t2; diff --git a/mysql-test/suite/encryption/r/innodb_encryption.result b/mysql-test/suite/encryption/r/innodb_encryption.result index 72f2632ba26..7b7601a289c 100644 --- a/mysql-test/suite/encryption/r/innodb_encryption.result +++ b/mysql-test/suite/encryption/r/innodb_encryption.result @@ -8,23 +8,23 @@ innodb_encryption_rotation_iops 100 innodb_encryption_threads 4 SET GLOBAL innodb_encrypt_tables = ON; # Wait max 10 min for key encryption threads to encrypt all spaces -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; NAME -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; NAME -mysql/innodb_table_stats -mysql/innodb_index_stats innodb_system # Success! # Now turn off encryption and wait for threads to decrypt everything SET GLOBAL innodb_encrypt_tables = off; # Wait max 10 min for key encryption threads to encrypt all spaces -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; NAME -mysql/innodb_table_stats -mysql/innodb_index_stats innodb_system -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; NAME # Success! # Shutdown innodb_encryption_threads @@ -33,23 +33,23 @@ SET GLOBAL innodb_encryption_threads=0; # since threads are off tables should remain unencrypted SET GLOBAL innodb_encrypt_tables = on; # Wait 15s to check that nothing gets encrypted -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; NAME -mysql/innodb_table_stats -mysql/innodb_index_stats innodb_system -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; NAME # Success! # Startup innodb_encryption_threads SET GLOBAL innodb_encryption_threads=@start_global_value; # Wait max 10 min for key encryption threads to encrypt all spaces -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; NAME -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; NAME -mysql/innodb_table_stats -mysql/innodb_index_stats innodb_system # Success! # Restart mysqld --innodb_encrypt_tables=0 --innodb_encryption_threads=0 @@ -60,10 +60,10 @@ innodb_encrypt_tables OFF innodb_encryption_rotate_key_age 15 innodb_encryption_rotation_iops 100 innodb_encryption_threads 0 -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; NAME -mysql/innodb_table_stats -mysql/innodb_index_stats innodb_system -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; NAME diff --git a/mysql-test/suite/encryption/t/encrypt_and_grep.test b/mysql-test/suite/encryption/t/encrypt_and_grep.test index f7916843b89..eb587d7ea12 100644 --- a/mysql-test/suite/encryption/t/encrypt_and_grep.test +++ b/mysql-test/suite/encryption/t/encrypt_and_grep.test @@ -33,7 +33,9 @@ insert t3 values (repeat('dummysecret', 12)); --let $wait_condition=SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 --source include/wait_condition.inc +--sorted_result SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; +--sorted_result SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; --source include/shutdown_mysqld.inc @@ -67,7 +69,9 @@ SET GLOBAL innodb_encrypt_tables = off; --let $wait_condition=SELECT COUNT(*) = $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 AND CURRENT_KEY_VERSION = 0; --source include/wait_condition.inc +--sorted_result SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; +--sorted_result SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; --source include/shutdown_mysqld.inc @@ -100,7 +104,9 @@ SET GLOBAL innodb_encrypt_tables = on; --let $wait_condition=SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; --source include/wait_condition.inc +--sorted_result SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; +--sorted_result SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; --source include/shutdown_mysqld.inc diff --git a/mysql-test/suite/encryption/t/innodb-spatial-index.test b/mysql-test/suite/encryption/t/innodb-spatial-index.test index 8eb6a803765..28b35379a6b 100644 --- a/mysql-test/suite/encryption/t/innodb-spatial-index.test +++ b/mysql-test/suite/encryption/t/innodb-spatial-index.test @@ -69,7 +69,9 @@ INSERT INTO t2 values(1, 'secret', ST_GeomFromText('POINT(903994614 180726515)') --echo # Success! +--sorted_result SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION > 0; +--sorted_result SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; DROP TABLE t1, t2; diff --git a/mysql-test/suite/encryption/t/innodb_encryption.test b/mysql-test/suite/encryption/t/innodb_encryption.test index d183a2914bd..cd844af0649 100644 --- a/mysql-test/suite/encryption/t/innodb_encryption.test +++ b/mysql-test/suite/encryption/t/innodb_encryption.test @@ -21,8 +21,12 @@ SET GLOBAL innodb_encrypt_tables = ON; --let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; --source include/wait_condition.inc -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; +--sorted_result +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; +--sorted_result +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; --echo # Success! @@ -34,8 +38,12 @@ SET GLOBAL innodb_encrypt_tables = off; --let $wait_condition=SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; --source include/wait_condition.inc -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; +--sorted_result +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; +--sorted_result +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; --echo # Success! @@ -51,8 +59,12 @@ SET GLOBAL innodb_encrypt_tables = on; --let $wait_condition=SELECT COUNT(*) = 0 FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; --source include/wait_condition.inc -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; +--sorted_result +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; +--sorted_result +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; --echo # Success! @@ -64,8 +76,12 @@ SET GLOBAL innodb_encryption_threads=@start_global_value; --let $wait_condition=SELECT COUNT(*) >= $tables_count FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; --source include/wait_condition.inc -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; +--sorted_result +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; +--sorted_result +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; --echo # Success! --echo # Restart mysqld --innodb_encrypt_tables=0 --innodb_encryption_threads=0 @@ -74,5 +90,9 @@ SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_ SHOW VARIABLES LIKE 'innodb_encrypt%'; -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0; -SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0; +--sorted_result +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION <> 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; +--sorted_result +SELECT NAME FROM INFORMATION_SCHEMA.INNODB_TABLESPACES_ENCRYPTION WHERE MIN_KEY_VERSION = 0 +AND NAME NOT LIKE 'innodb_undo%' AND NAME NOT LIKE 'mysql/innodb_%_stats'; diff --git a/storage/innobase/handler/i_s.cc b/storage/innobase/handler/i_s.cc index 7615dba9a0e..a7b678230ee 100644 --- a/storage/innobase/handler/i_s.cc +++ b/storage/innobase/handler/i_s.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2007, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2017, MariaDB Corporation. +Copyright (c) 2014, 2018, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -8658,12 +8658,6 @@ i_s_tablespaces_encryption_fill_table( TABLE_LIST* tables, /*!< in/out: tables to fill */ Item* ) /*!< in: condition (not used) */ { - btr_pcur_t pcur; - const rec_t* rec; - mem_heap_t* heap; - mtr_t mtr; - bool found_space_0 = false; - DBUG_ENTER("i_s_tablespaces_encryption_fill_table"); RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name); @@ -8672,68 +8666,24 @@ i_s_tablespaces_encryption_fill_table( DBUG_RETURN(0); } - heap = mem_heap_create(1000); - mutex_enter(&dict_sys->mutex); - mtr_start(&mtr); + mutex_enter(&fil_system->mutex); - rec = dict_startscan_system(&pcur, &mtr, SYS_TABLESPACES); - - while (rec) { - const char* err_msg; - ulint space_id; - const char* name; - ulint flags; - - /* Extract necessary information from a SYS_TABLESPACES row */ - err_msg = dict_process_sys_tablespaces( - heap, rec, &space_id, &name, &flags); - - mtr_commit(&mtr); - mutex_exit(&dict_sys->mutex); - - if (space_id == 0) { - found_space_0 = true; + for (fil_space_t* space = UT_LIST_GET_FIRST(fil_system->space_list); + space; space = UT_LIST_GET_NEXT(space_list, space)) { + if (space->purpose == FIL_TYPE_TABLESPACE) { + space->n_pending_ops++; + mutex_exit(&fil_system->mutex); + if (int err = i_s_dict_fill_tablespaces_encryption( + thd, space, tables->table)) { + fil_space_release(space); + DBUG_RETURN(err); + } + mutex_enter(&fil_system->mutex); + space->n_pending_ops--; } - - fil_space_t* space = fil_space_acquire_silent(space_id); - - if (!err_msg && space) { - i_s_dict_fill_tablespaces_encryption( - thd, space, tables->table); - } else { - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_CANT_FIND_SYSTEM_REC, "%s", - err_msg); - } - - if (space) { - fil_space_release(space); - } - - mem_heap_empty(heap); - - /* Get the next record */ - mutex_enter(&dict_sys->mutex); - mtr_start(&mtr); - rec = dict_getnext_system(&pcur, &mtr); - } - - mtr_commit(&mtr); - mutex_exit(&dict_sys->mutex); - mem_heap_free(heap); - - if (found_space_0 == false) { - /* space 0 does for what ever unknown reason not show up - * in iteration above, add it manually */ - - fil_space_t* space = fil_space_acquire_silent(0); - - i_s_dict_fill_tablespaces_encryption( - thd, space, tables->table); - - fil_space_release(space); } + mutex_exit(&fil_system->mutex); DBUG_RETURN(0); } /*******************************************************************//** @@ -8979,12 +8929,6 @@ i_s_tablespaces_scrubbing_fill_table( TABLE_LIST* tables, /*!< in/out: tables to fill */ Item* ) /*!< in: condition (not used) */ { - btr_pcur_t pcur; - const rec_t* rec; - mem_heap_t* heap; - mtr_t mtr; - bool found_space_0 = false; - DBUG_ENTER("i_s_tablespaces_scrubbing_fill_table"); RETURN_IF_INNODB_NOT_STARTED(tables->schema_table_name); @@ -8993,67 +8937,24 @@ i_s_tablespaces_scrubbing_fill_table( DBUG_RETURN(0); } - heap = mem_heap_create(1000); - mutex_enter(&dict_sys->mutex); - mtr_start(&mtr); + mutex_enter(&fil_system->mutex); - rec = dict_startscan_system(&pcur, &mtr, SYS_TABLESPACES); - - while (rec) { - const char* err_msg; - ulint space_id; - const char* name; - ulint flags; - - /* Extract necessary information from a SYS_TABLESPACES row */ - err_msg = dict_process_sys_tablespaces( - heap, rec, &space_id, &name, &flags); - - mtr_commit(&mtr); - mutex_exit(&dict_sys->mutex); - - if (space_id == 0) { - found_space_0 = true; + for (fil_space_t* space = UT_LIST_GET_FIRST(fil_system->space_list); + space; space = UT_LIST_GET_NEXT(space_list, space)) { + if (space->purpose == FIL_TYPE_TABLESPACE) { + space->n_pending_ops++; + mutex_exit(&fil_system->mutex); + if (int err = i_s_dict_fill_tablespaces_scrubbing( + thd, space, tables->table)) { + fil_space_release(space); + DBUG_RETURN(err); + } + mutex_enter(&fil_system->mutex); + space->n_pending_ops--; } - - fil_space_t* space = fil_space_acquire_silent(space_id); - - if (!err_msg && space) { - i_s_dict_fill_tablespaces_scrubbing( - thd, space, tables->table); - } else { - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, - ER_CANT_FIND_SYSTEM_REC, "%s", - err_msg); - } - - if (space) { - fil_space_release(space); - } - - mem_heap_empty(heap); - - /* Get the next record */ - mutex_enter(&dict_sys->mutex); - mtr_start(&mtr); - rec = dict_getnext_system(&pcur, &mtr); - } - - mtr_commit(&mtr); - mutex_exit(&dict_sys->mutex); - mem_heap_free(heap); - - if (found_space_0 == false) { - /* space 0 does for what ever unknown reason not show up - * in iteration above, add it manually */ - fil_space_t* space = fil_space_acquire_silent(0); - - i_s_dict_fill_tablespaces_scrubbing( - thd, space, tables->table); - - fil_space_release(space); } + mutex_exit(&fil_system->mutex); DBUG_RETURN(0); } /*******************************************************************//** From fc05777eac8defc15fd3cfa8ae55c7d2f2b8671a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 22 Mar 2018 17:56:45 +0200 Subject: [PATCH 107/139] Enable --suite=innodb_undo --- .../innodb_undo/include/have_undo_tablespaces.combinations | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 mysql-test/suite/innodb_undo/include/have_undo_tablespaces.combinations diff --git a/mysql-test/suite/innodb_undo/include/have_undo_tablespaces.combinations b/mysql-test/suite/innodb_undo/include/have_undo_tablespaces.combinations new file mode 100644 index 00000000000..dbfe4e6c63a --- /dev/null +++ b/mysql-test/suite/innodb_undo/include/have_undo_tablespaces.combinations @@ -0,0 +1,2 @@ +[2] +innodb-undo-tablespaces=2 From 1123f87b5c463611ff325b97c428290d6685267c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Thu, 22 Mar 2018 21:01:20 +0200 Subject: [PATCH 108/139] Fix unused variable thd warning in embedded The warning came up post merge and is visible when compiling without WSREP. --- sql/handler.cc | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sql/handler.cc b/sql/handler.cc index a27778b8115..8093c17135b 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -5822,9 +5822,9 @@ static inline int binlog_log_row(TABLE* table, const uchar *after_record, Log_func *log_func) { +#ifdef WITH_WSREP THD *const thd= table->in_use; -#ifdef WITH_WSREP /* only InnoDB tables will be replicated through binlog emulation */ if ((WSREP_EMULATE_BINLOG(thd) && table->file->partition_ht()->db_type != DB_TYPE_INNODB) || From f5b2761c701fa971f0455c05f757c3a147632056 Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Fri, 23 Mar 2018 00:18:21 +0400 Subject: [PATCH 109/139] MDEV-10871 Add logging capability to pam_user_map.c. The 'debug' option implemented for the pam_user_map.so. --- plugin/auth_pam/mapper/pam_user_map.c | 84 ++++++++++++++++++++++++++- 1 file changed, 81 insertions(+), 3 deletions(-) diff --git a/plugin/auth_pam/mapper/pam_user_map.c b/plugin/auth_pam/mapper/pam_user_map.c index e62be946c4a..c03ea12be74 100644 --- a/plugin/auth_pam/mapper/pam_user_map.c +++ b/plugin/auth_pam/mapper/pam_user_map.c @@ -22,14 +22,24 @@ top: accounting @group_ro: readonly ========================================================= +If something doesn't work as expected you can get verbose +comments with the 'debug' option like this +========================================================= +auth required pam_user_map.so debug +========================================================= +These comments are written to the syslog as 'authpriv.debug' +and usually end up in /var/log/secure file. */ #include #include +#include +#include #include #include #include +#include #include #define FILENAME "/etc/security/user_map.conf" @@ -90,9 +100,42 @@ static int user_in_group(const gid_t *user_groups, int ng,const char *group) } +static void print_groups(pam_handle_t *pamh, const gid_t *user_groups, int ng) +{ + char buf[256]; + char *c_buf= buf, *buf_end= buf+sizeof(buf)-2; + struct group *gr; + int cg; + + for (cg=0; cg < ng; cg++) + { + char *c; + if (c_buf == buf_end) + break; + *(c_buf++)= ','; + if (!(gr= getgrgid(user_groups[cg])) || + !(c= gr->gr_name)) + continue; + while (*c) + { + if (c_buf == buf_end) + break; + *(c_buf++)= *(c++); + } + } + c_buf[0]= c_buf[1]= 0; + pam_syslog(pamh, LOG_DEBUG, "User belongs to %d %s [%s].\n", + ng, (ng == 1) ? "group" : "groups", buf+1); +} + + +static const char debug_keyword[]= "debug"; +#define SYSLOG_DEBUG if (mode_debug) pam_syslog + int pam_sm_authenticate(pam_handle_t *pamh, int flags, int argc, const char *argv[]) { + int mode_debug= 0; int pam_err, line= 0; const char *username; char buf[256]; @@ -101,6 +144,14 @@ int pam_sm_authenticate(pam_handle_t *pamh, int flags, gid_t *groups= group_buffer; int n_groups= -1; + for (; argc > 0; argc--) + { + if (strcasecmp(argv[argc-1], debug_keyword) == 0) + mode_debug= 1; + } + + SYSLOG_DEBUG(pamh, LOG_DEBUG, "Opening file '%s'.\n", FILENAME); + f= fopen(FILENAME, "r"); if (f == NULL) { @@ -110,12 +161,18 @@ int pam_sm_authenticate(pam_handle_t *pamh, int flags, pam_err = pam_get_item(pamh, PAM_USER, (const void**)&username); if (pam_err != PAM_SUCCESS) + { + pam_syslog(pamh, LOG_ERR, "Cannot get username.\n"); goto ret; + } + + SYSLOG_DEBUG(pamh, LOG_DEBUG, "Incoming username '%s'.\n", username); while (fgets(buf, sizeof(buf), f) != NULL) { char *s= buf, *from, *to, *end_from, *end_to; int check_group; + int cmp_result; line++; @@ -124,7 +181,11 @@ int pam_sm_authenticate(pam_handle_t *pamh, int flags, if ((check_group= *s == '@')) { if (n_groups < 0) + { n_groups= populate_user_groups(username, &groups); + if (mode_debug) + print_groups(pamh, groups, n_groups); + } s++; } from= s; @@ -139,14 +200,30 @@ int pam_sm_authenticate(pam_handle_t *pamh, int flags, if (end_to == to) goto syntax_error; *end_from= *end_to= 0; - if (check_group ? - user_in_group(groups, n_groups, from) : - (strcmp(username, from) == 0)) + + if (check_group) + { + cmp_result= user_in_group(groups, n_groups, from); + SYSLOG_DEBUG(pamh, LOG_DEBUG, "Check if user is in group '%s': %s\n", + from, cmp_result ? "YES":"NO"); + } + else + { + cmp_result= (strcmp(username, from) == 0); + SYSLOG_DEBUG(pamh, LOG_DEBUG, "Check if username '%s': %s\n", + from, cmp_result ? "YES":"NO"); + } + if (cmp_result) { pam_err= pam_set_item(pamh, PAM_USER, to); + SYSLOG_DEBUG(pamh, LOG_DEBUG, + (pam_err == PAM_SUCCESS) ? "User mapped as '%s'\n" : + "Couldn't map as '%s'\n", to); goto ret; } } + + SYSLOG_DEBUG(pamh, LOG_DEBUG, "User not found in the list.\n"); pam_err= PAM_AUTH_ERR; goto ret; @@ -162,6 +239,7 @@ ret: return pam_err; } + int pam_sm_setcred(pam_handle_t *pamh, int flags, int argc, const char *argv[]) { From e147a4a0676633d08539f593d14c63493bf26ecc Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Thu, 22 Mar 2018 22:47:40 +0400 Subject: [PATCH 110/139] Fixed build failure --- storage/innobase/handler/ha_innodb.cc | 18 ++++++++++++------ storage/innobase/include/ib0mutex.h | 3 ++- storage/innobase/srv/srv0srv.cc | 3 ++- storage/innobase/srv/srv0start.cc | 3 ++- 4 files changed, 18 insertions(+), 9 deletions(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 89bd78c9db0..82e3dfb9533 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -332,13 +332,15 @@ thd_destructor_proxy(void *) myvar->current_cond = &thd_destructor_cond; mysql_mutex_lock(&thd_destructor_mutex); - my_atomic_storeptr_explicit(&srv_running, myvar, + my_atomic_storeptr_explicit(reinterpret_cast(&srv_running), + myvar, MY_MEMORY_ORDER_RELAXED); /* wait until the server wakes the THD to abort and die */ while (!srv_running->abort) mysql_cond_wait(&thd_destructor_cond, &thd_destructor_mutex); mysql_mutex_unlock(&thd_destructor_mutex); - my_atomic_storeptr_explicit(&srv_running, NULL, + my_atomic_storeptr_explicit(reinterpret_cast(&srv_running), + NULL, MY_MEMORY_ORDER_RELAXED); while (srv_fast_shutdown == 0 && @@ -4335,7 +4337,8 @@ innobase_change_buffering_inited_ok: mysql_thread_create(thd_destructor_thread_key, &thd_destructor_thread, NULL, thd_destructor_proxy, NULL); - while (!my_atomic_loadptr_explicit(&srv_running, + while (!my_atomic_loadptr_explicit(reinterpret_cast + (&srv_running), MY_MEMORY_ORDER_RELAXED)) os_thread_sleep(20); } @@ -4430,8 +4433,10 @@ innobase_end(handlerton*, ha_panic_function) hash_table_free(innobase_open_tables); innobase_open_tables = NULL; - st_my_thread_var* running = my_atomic_loadptr_explicit( - &srv_running, MY_MEMORY_ORDER_RELAXED); + st_my_thread_var* running = reinterpret_cast( + my_atomic_loadptr_explicit( + reinterpret_cast(&srv_running), + MY_MEMORY_ORDER_RELAXED)); if (!abort_loop && running) { // may be UNINSTALL PLUGIN statement running->abort = 1; @@ -17770,7 +17775,8 @@ fast_shutdown_validate( uint new_val = *reinterpret_cast(save); if (srv_fast_shutdown && !new_val - && !my_atomic_loadptr_explicit(&srv_running, + && !my_atomic_loadptr_explicit(reinterpret_cast + (&srv_running), MY_MEMORY_ORDER_RELAXED)) { return(1); } diff --git a/storage/innobase/include/ib0mutex.h b/storage/innobase/include/ib0mutex.h index afa8f295e1c..7b289c7a98c 100644 --- a/storage/innobase/include/ib0mutex.h +++ b/storage/innobase/include/ib0mutex.h @@ -527,7 +527,8 @@ struct TTASEventMutex { int32 state() const UNIV_NOTHROW { - return(my_atomic_load32_explicit(&m_lock_word, + return(my_atomic_load32_explicit(const_cast + (&m_lock_word), MY_MEMORY_ORDER_RELAXED)); } diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index c415b0a031e..73bacb8c6d5 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -2942,7 +2942,8 @@ srv_purge_wakeup() srv_release_threads(SRV_WORKER, n_workers); } - } while (!my_atomic_loadptr_explicit(&srv_running, + } while (!my_atomic_loadptr_explicit(reinterpret_cast + (&srv_running), MY_MEMORY_ORDER_RELAXED) && (srv_sys.n_threads_active[SRV_WORKER] || srv_sys.n_threads_active[SRV_PURGE])); diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index 77fd93c6f56..ba4d82e9b0c 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -2839,7 +2839,8 @@ srv_shutdown_bg_undo_sources() void innodb_shutdown() { - ut_ad(!my_atomic_loadptr_explicit(&srv_running, + ut_ad(!my_atomic_loadptr_explicit(reinterpret_cast + (&srv_running), MY_MEMORY_ORDER_RELAXED)); ut_ad(!srv_undo_sources); From b6e2973ee66bad2e998cd2acd2c47fecaa1e6942 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 21 Mar 2018 12:59:40 +0100 Subject: [PATCH 111/139] MDEV-14533 Provide information_schema tables using which hardware information can be obtained. update README, use maria_declare_plugin(), specify the author. --- .../CMakeLists.txt | 0 .../README.txt | 25 ++++--------------- .../information_schema_disks.cc | 13 +++++----- 3 files changed, 12 insertions(+), 26 deletions(-) rename plugin/{information_schema_disks => disks}/CMakeLists.txt (100%) rename plugin/{information_schema_disks => disks}/README.txt (86%) rename plugin/{information_schema_disks => disks}/information_schema_disks.cc (92%) diff --git a/plugin/information_schema_disks/CMakeLists.txt b/plugin/disks/CMakeLists.txt similarity index 100% rename from plugin/information_schema_disks/CMakeLists.txt rename to plugin/disks/CMakeLists.txt diff --git a/plugin/information_schema_disks/README.txt b/plugin/disks/README.txt similarity index 86% rename from plugin/information_schema_disks/README.txt rename to plugin/disks/README.txt index 1c9b8fb6283..b49db3c03b5 100644 --- a/plugin/information_schema_disks/README.txt +++ b/plugin/disks/README.txt @@ -57,31 +57,16 @@ tables; disks and mounts. ... -Building --------- -- Ensure that the directory information_schema_disks is in the top-level - directory of the server. -- Add - - ADD_SUBDIRECTORY(information_schema_disks) - - to the top-level CMakeLists.txt - -> Invoke make - - $ make - Installation ------------ -- Copy information_schema_disks/libinformation_schema_disks.so to the plugin - directory of the server: - $ cd information_schema_disks - $ sudo cp libinformation_schema_disks.so plugin-directory-of-server +- Use "install plugin" or "install soname" command: -- Using mysql, install the plugin: + MariaDB [(none)]> install plugin disks soname 'disks.so'; - MariaDB [(none)]> install plugin disks soname 'libinformation_schema_disks.so'; + or + + MariaDB [(none)]> install soname 'disks.so'; Usage ----- diff --git a/plugin/information_schema_disks/information_schema_disks.cc b/plugin/disks/information_schema_disks.cc similarity index 92% rename from plugin/information_schema_disks/information_schema_disks.cc rename to plugin/disks/information_schema_disks.cc index b5e3a6dc728..122b3d3f17f 100644 --- a/plugin/information_schema_disks/information_schema_disks.cc +++ b/plugin/disks/information_schema_disks.cc @@ -19,7 +19,8 @@ #include #include #include -#include + +bool schema_table_store_record(THD *thd, TABLE *table); namespace { @@ -133,21 +134,21 @@ int disks_table_init(void *ptr) extern "C" { -mysql_declare_plugin(disks_library) +maria_declare_plugin(disks) { MYSQL_INFORMATION_SCHEMA_PLUGIN, &disks_table_info, /* type-specific descriptor */ "DISKS", /* table name */ - "MariaDB", /* author */ + "Johan Wikman", /* author */ "Disk space information", /* description */ PLUGIN_LICENSE_GPL, /* license type */ disks_table_init, /* init function */ - NULL, + NULL, /* deinit function */ 0x0100, /* version = 1.0 */ NULL, /* no status variables */ NULL, /* no system variables */ - NULL, /* no reserved information */ - 0 /* no flags */ + "1.0", /* String version representation */ + MariaDB_PLUGIN_MATURITY_BETA /* Maturity (see include/mysql/plugin.h)*/ } mysql_declare_plugin_end; From de55a7d1f923849e221ca44d9f8e375569ffe5f0 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 20 Mar 2018 20:54:58 +0100 Subject: [PATCH 112/139] Allow table-less selects even when wsrep is not ready It doesn't make sense to allow selects from I_S but disallow selects that don't use any tables at all, because any (disallowed) select that doesn't use tables can be made allowed by adding "FROM I_S.COLLATIONS LIMIT 1" to the end. And it break mysql-test rather badly, even check-testcase.test fails on its first `SELECT '$tmp' = 'No such row'` This reverts 9a896148576, c5dd2abf4ca, and 33028f7c4bc: Refs: MW-245 - changed logic so that in non primary node it is possible to do SET + SHOW + SELECT from information and pfs schema, when dirty reads are not enabled - however, non table selects are not allowed (e.g. SELECT 1) Refs MW-245 - logic was wrong in detecting if queries are allowed in non primary node. it allowed select with no table list to execute even if dirty reads was not specified Refs: MW-245 - Adjust tests to account for the new behavior. --- mysql-test/suite/galera/r/galera_var_dirty_reads.result | 4 ++-- mysql-test/suite/galera/t/galera_as_slave_nonprim.test | 7 +++---- mysql-test/suite/galera/t/galera_var_dirty_reads.test | 2 -- sql/sql_parse.cc | 4 +--- 4 files changed, 6 insertions(+), 11 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_var_dirty_reads.result b/mysql-test/suite/galera/r/galera_var_dirty_reads.result index da842e7254a..5a108ddfcaa 100644 --- a/mysql-test/suite/galera/r/galera_var_dirty_reads.result +++ b/mysql-test/suite/galera/r/galera_var_dirty_reads.result @@ -30,9 +30,9 @@ SET @@session.wsrep_dirty_reads=OFF; SELECT i, variable_name, variable_value FROM t1, information_schema.session_variables WHERE variable_name LIKE "wsrep_dirty_reads" AND i = 1; ERROR 08S01: WSREP has not yet prepared node for application use SELECT 1; -ERROR 08S01: WSREP has not yet prepared node for application use +1 +1 USE information_schema; -ERROR 08S01: WSREP has not yet prepared node for application use SELECT * FROM information_schema.session_variables WHERE variable_name LIKE "wsrep_dirty_reads"; VARIABLE_NAME VARIABLE_VALUE WSREP_DIRTY_READS OFF diff --git a/mysql-test/suite/galera/t/galera_as_slave_nonprim.test b/mysql-test/suite/galera/t/galera_as_slave_nonprim.test index 26fec05dfe5..46a93458271 100644 --- a/mysql-test/suite/galera/t/galera_as_slave_nonprim.test +++ b/mysql-test/suite/galera/t/galera_as_slave_nonprim.test @@ -51,7 +51,7 @@ INSERT INTO t1 VALUES (1),(2),(3),(4),(5); --let $value = query_get_value(SHOW SLAVE STATUS, Last_SQL_Error, 1) --connection node_3 --disable_query_log ---eval SELECT "$value" IN ("Error 'WSREP has not yet prepared node for application use' on query. Default database: 'test'. Query: 'BEGIN'", "Node has dropped from cluster") AS expected_error +--eval SELECT "$value" IN ("Error 'Unknown command' on query. Default database: 'test'. Query: 'BEGIN'", "Node has dropped from cluster") AS expected_error --enable_query_log # Step #4. Bring back the async slave and restart replication @@ -85,9 +85,8 @@ DROP TABLE t1; STOP SLAVE; RESET SLAVE ALL; -CALL mtr.add_suppression("Slave SQL: Error 'WSREP has not yet prepared node for application use' on query"); -CALL mtr.add_suppression("Slave: WSREP has not yet prepared node for application use Error_code: 1047"); -CALL mtr.add_suppression("TORDERED} returned -107 \\(Transport endpoint is not connected\\)"); +CALL mtr.add_suppression("Slave SQL: Error 'Unknown command' on query"); +CALL mtr.add_suppression("Slave: Unknown command Error_code: 1047"); CALL mtr.add_suppression("Transport endpoint is not connected"); CALL mtr.add_suppression("Slave SQL: Error in Xid_log_event: Commit could not be completed, 'Deadlock found when trying to get lock; try restarting transaction', Error_code: 1213"); CALL mtr.add_suppression("Slave SQL: Node has dropped from cluster, Error_code: 1047"); diff --git a/mysql-test/suite/galera/t/galera_var_dirty_reads.test b/mysql-test/suite/galera/t/galera_var_dirty_reads.test index 85d759e4a27..138b7c1c703 100644 --- a/mysql-test/suite/galera/t/galera_var_dirty_reads.test +++ b/mysql-test/suite/galera/t/galera_var_dirty_reads.test @@ -47,10 +47,8 @@ SET @@session.wsrep_dirty_reads=OFF; --error ER_UNKNOWN_COM_ERROR SELECT i, variable_name, variable_value FROM t1, information_schema.session_variables WHERE variable_name LIKE "wsrep_dirty_reads" AND i = 1; ---error ER_UNKNOWN_COM_ERROR SELECT 1; ---error ER_UNKNOWN_COM_ERROR USE information_schema; SELECT * FROM information_schema.session_variables WHERE variable_name LIKE "wsrep_dirty_reads"; diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 0a68d8ccea8..9271e231197 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -893,7 +893,6 @@ void cleanup_items(Item *item) #ifdef WITH_WSREP static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables) { - bool has_tables = false; for (const TABLE_LIST *table= tables; table; table= table->next_global) { TABLE_CATEGORY c; @@ -906,9 +905,8 @@ static bool wsrep_tables_accessible_when_detached(const TABLE_LIST *tables) { return false; } - has_tables = true; } - return has_tables; + return true; } #endif /* WITH_WSREP */ #ifndef EMBEDDED_LIBRARY From 89b0d5cb6e3db2736cdc5b66fd1f24e4455bc634 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 16 Oct 2017 17:49:52 +0200 Subject: [PATCH 113/139] MDEV-13968 sst fails with "WSREP_SST_OPT_PORT: readonly variable" Backport from 10.2: 4c2c057d404 and f7090df712d --- scripts/wsrep_sst_common.sh | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh index a8f671de224..db0543a4d0f 100644 --- a/scripts/wsrep_sst_common.sh +++ b/scripts/wsrep_sst_common.sh @@ -43,7 +43,7 @@ case "$1" in else readonly WSREP_SST_OPT_HOST_UNESCAPED=${WSREP_SST_OPT_HOST} fi - readonly WSREP_SST_OPT_PORT=$(echo $WSREP_SST_OPT_ADDR | \ + readonly WSREP_SST_OPT_ADDR_PORT=$(echo $WSREP_SST_OPT_ADDR | \ cut -d ']' -f 2 | cut -s -d ':' -f 2 | cut -d '/' -f 1) readonly WSREP_SST_OPT_PATH=${WSREP_SST_OPT_ADDR#*/} readonly WSREP_SST_OPT_MODULE=${WSREP_SST_OPT_PATH%%/*} @@ -126,6 +126,17 @@ done readonly WSREP_SST_OPT_BYPASS readonly WSREP_SST_OPT_BINLOG +if [ -n "${WSREP_SST_OPT_ADDR_PORT:-}" ]; then + if [ -n "${WSREP_SST_OPT_PORT:-}" ]; then + if [ "$WSREP_SST_OPT_PORT" != "$WSREP_SST_OPT_ADDR_PORT" ]; then + wsrep_log_error "port in --port=$WSREP_SST_OPT_PORT differs from port in --address=$WSREP_SST_OPT_ADDR" + exit 2 + fi + else + readonly WSREP_SST_OPT_PORT="$WSREP_SST_OPT_ADDR_PORT" + fi +fi + # try to use my_print_defaults, mysql and mysqldump that come with the sources # (for MTR suite) SCRIPTS_DIR="$(cd $(dirname "$0"); pwd -P)" From a15ab358fc1ea75634de266fa8150b3e89ac5593 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 20 Mar 2018 22:35:19 +0100 Subject: [PATCH 114/139] wsrep_sst scripts: support traditional netcat --- scripts/wsrep_sst_mariabackup.sh | 18 ++++++++++++++++-- scripts/wsrep_sst_xtrabackup-v2.sh | 19 ++++++++++++++++--- scripts/wsrep_sst_xtrabackup.sh | 18 ++++++++++++++++-- 3 files changed, 48 insertions(+), 7 deletions(-) diff --git a/scripts/wsrep_sst_mariabackup.sh b/scripts/wsrep_sst_mariabackup.sh index 9e3fc54290d..549161aa8f3 100644 --- a/scripts/wsrep_sst_mariabackup.sh +++ b/scripts/wsrep_sst_mariabackup.sh @@ -179,12 +179,26 @@ get_transfer() wsrep_log_info "Using netcat as streamer" if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then if nc -h 2>&1 | grep -q ncat;then + # Ncat tcmd="nc -l ${TSST_PORT}" - else + elif nc -h 2>&1 | grep -q -- '-d\>';then + # Debian netcat tcmd="nc -dl ${TSST_PORT}" + else + # traditional netcat + tcmd="nc -l -p ${TSST_PORT}" fi else - tcmd="nc ${REMOTEIP} ${TSST_PORT}" + if nc -h 2>&1 | grep -q ncat;then + # Ncat + tcmd="nc ${REMOTEIP} ${TSST_PORT}" + elif nc -h 2>&1 | grep -q -- '-d\>';then + # Debian netcat + tcmd="nc ${REMOTEIP} ${TSST_PORT}" + else + # traditional netcat + tcmd="nc -q0 ${REMOTEIP} ${TSST_PORT}" + fi fi else tfmt='socat' diff --git a/scripts/wsrep_sst_xtrabackup-v2.sh b/scripts/wsrep_sst_xtrabackup-v2.sh index 9104daf19bc..00efdaeebcf 100644 --- a/scripts/wsrep_sst_xtrabackup-v2.sh +++ b/scripts/wsrep_sst_xtrabackup-v2.sh @@ -268,13 +268,26 @@ get_transfer() wsrep_log_info "Using netcat as streamer" if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then if nc -h 2>&1 | grep -q ncat; then + # Ncat tcmd="nc $sockopt -l ${TSST_PORT}" - else + elif nc -h 2>&1 | grep -q -- '-d\>';then + # Debian netcat tcmd="nc $sockopt -dl ${TSST_PORT}" + else + # traditional netcat + tcmd="nc $sockopt -l -p ${TSST_PORT}" fi else - # netcat doesn't understand [] around IPv6 address - tcmd="nc ${WSREP_SST_OPT_HOST_UNESCAPED} ${TSST_PORT}" + if nc -h 2>&1 | grep -q ncat;then + # Ncat + tcmd="nc ${WSREP_SST_OPT_HOST_UNESCAPED} ${TSST_PORT}" + elif nc -h 2>&1 | grep -q -- '-d\>';then + # Debian netcat + tcmd="nc ${WSREP_SST_OPT_HOST_UNESCAPED} ${TSST_PORT}" + else + # traditional netcat + tcmd="nc -q0 ${WSREP_SST_OPT_HOST_UNESCAPED} ${TSST_PORT}" + fi fi else tfmt='socat' diff --git a/scripts/wsrep_sst_xtrabackup.sh b/scripts/wsrep_sst_xtrabackup.sh index 867aab622ed..628c1f8b06c 100644 --- a/scripts/wsrep_sst_xtrabackup.sh +++ b/scripts/wsrep_sst_xtrabackup.sh @@ -144,12 +144,26 @@ get_transfer() wsrep_log_info "Using netcat as streamer" if [[ "$WSREP_SST_OPT_ROLE" == "joiner" ]];then if nc -h 2>&1 | grep -q ncat;then + # Ncat tcmd="nc -l ${TSST_PORT}" - else + elif nc -h 2>&1 | grep -q -- '-d\>';then + # Debian netcat tcmd="nc -dl ${TSST_PORT}" + else + # traditional netcat + tcmd="nc -l -p ${TSST_PORT}" fi else - tcmd="nc ${WSREP_SST_OPT_HOST_UNESCAPED} ${TSST_PORT}" + if nc -h 2>&1 | grep -q ncat;then + # Ncat + tcmd="nc ${REMOTEIP} ${TSST_PORT}" + elif nc -h 2>&1 | grep -q -- '-d\>';then + # Debian netcat + tcmd="nc ${REMOTEIP} ${TSST_PORT}" + else + # traditional netcat + tcmd="nc -q0 ${REMOTEIP} ${TSST_PORT}" + fi fi else tfmt='socat' From ccd5c9c64e17f33bec73f8dbee8f17cbc8a8f09d Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 19 Mar 2018 23:06:59 +0100 Subject: [PATCH 115/139] mysql: don't prepare strings if they won't be used in particular, don't call server_version_string() unnecessary, because it runs 'SELECT @@version_comment' and this might block under certain galera settings (wsrep_sync_wait). --- client/mysql.cc | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/client/mysql.cc b/client/mysql.cc index 0b8be00c542..2cbb6ae25dd 100644 --- a/client/mysql.cc +++ b/client/mysql.cc @@ -1225,15 +1225,17 @@ int main(int argc,char *argv[]) window_resize(0); #endif - put_info("Welcome to the MariaDB monitor. Commands end with ; or \\g.", - INFO_INFO); - my_snprintf((char*) glob_buffer.ptr(), glob_buffer.alloced_length(), - "Your %s connection id is %lu\nServer version: %s\n", - mysql_get_server_name(&mysql), - mysql_thread_id(&mysql), server_version_string(&mysql)); - put_info((char*) glob_buffer.ptr(),INFO_INFO); - - put_info(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000"), INFO_INFO); + if (!status.batch) + { + put_info("Welcome to the MariaDB monitor. Commands end with ; or \\g.", + INFO_INFO); + my_snprintf((char*) glob_buffer.ptr(), glob_buffer.alloced_length(), + "Your %s connection id is %lu\nServer version: %s\n", + mysql_get_server_name(&mysql), + mysql_thread_id(&mysql), server_version_string(&mysql)); + put_info((char*) glob_buffer.ptr(),INFO_INFO); + put_info(ORACLE_WELCOME_COPYRIGHT_NOTICE("2000"), INFO_INFO); + } #ifdef HAVE_READLINE initialize_readline((char*) my_progname); From 7e300424a39f8b99ef73e037861f454268ba6fe7 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 19 Mar 2018 23:23:51 +0100 Subject: [PATCH 116/139] wsrep_sst_auth: fix a memory leak wsrep_sst_auth_init() is always invoked with value==wsrep_sst_auth. Old code was leaking value, because it was never freed. --- sql/wsrep_sst.cc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index 94804a6d3c7..8db100f3dc1 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -182,8 +182,8 @@ bool wsrep_sst_auth_update (sys_var *self, THD* thd, enum_var_type type) void wsrep_sst_auth_init (const char* value) { - if (wsrep_sst_auth == value) wsrep_sst_auth = NULL; - if (value) sst_auth_real_set (value); + DBUG_ASSERT(wsrep_sst_auth == value); + sst_auth_real_set (wsrep_sst_auth); } bool wsrep_sst_donor_check (sys_var *self, THD* thd, set_var* var) From 8f1014e9a0b41c3893f4b15b9db4842d816d9f3f Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 19 Mar 2018 23:27:10 +0100 Subject: [PATCH 117/139] MDEV-15409 make sure every sst script is tested in buildbot fix galera.galera_sst_mysqldump test to work: * must connect to 127.0.0.1, where mysqld is listening * disable wsrep_sync_wait in wsrep_sst_mysqldump, otherwise sst can deadlock * allow 127.0.0.1 for bind_address and wsrep_sst_receive_address. (it's useful in tests, or when two nodes are on the same box, or when nodes are on different boxes, but the connection is tunelled, or whatever. Don't judge user's setup). MDEV-14070 * don't wait for client connections to die when doing mysqldump sst. they'll die in a due time, and if needed mysql will wait on locks until they do. MDEV-14069 Also don't mark it big, to make sure it's sufficiently tested --- .../include/galera_sst_set_mysqldump.inc | 5 +++- .../galera/r/galera_sst_mysqldump.result | 5 +++- .../suite/galera/t/galera_sst_mysqldump.test | 1 - .../r/wsrep_sst_receive_address_basic.result | 4 ---- .../t/wsrep_sst_receive_address_basic.test | 4 ---- scripts/wsrep_sst_mysqldump.sh | 4 ++-- sql/wsrep_check_opts.cc | 3 +-- sql/wsrep_mysqld.cc | 2 +- sql/wsrep_sst.cc | 23 ------------------- 9 files changed, 12 insertions(+), 39 deletions(-) diff --git a/mysql-test/suite/galera/include/galera_sst_set_mysqldump.inc b/mysql-test/suite/galera/include/galera_sst_set_mysqldump.inc index cbd2c1c817a..16af5742b9b 100644 --- a/mysql-test/suite/galera/include/galera_sst_set_mysqldump.inc +++ b/mysql-test/suite/galera/include/galera_sst_set_mysqldump.inc @@ -4,6 +4,9 @@ --echo Setting SST method to mysqldump ... +call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to '127.0.0.1'"); +call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos"); + --connection node_1 # We need a user with a password to perform SST, otherwise we hit LP #1378253 CREATE USER 'sst'; @@ -19,6 +22,6 @@ SET GLOBAL wsrep_sst_auth = 'sst:'; --disable_query_log # Set wsrep_sst_receive_address to the SQL port ---eval SET GLOBAL wsrep_sst_receive_address = '127.0.0.2:$NODE_MYPORT_2'; +--eval SET GLOBAL wsrep_sst_receive_address = '127.0.0.1:$NODE_MYPORT_2'; --enable_query_log SET GLOBAL wsrep_sst_method = 'mysqldump'; diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump.result b/mysql-test/suite/galera/r/galera_sst_mysqldump.result index 5c0d9a45d41..aeada721adc 100644 --- a/mysql-test/suite/galera/r/galera_sst_mysqldump.result +++ b/mysql-test/suite/galera/r/galera_sst_mysqldump.result @@ -1,4 +1,6 @@ Setting SST method to mysqldump ... +call mtr.add_suppression("WSREP: wsrep_sst_method is set to 'mysqldump' yet mysqld bind_address is set to '127.0.0.1'"); +call mtr.add_suppression("Failed to load slave replication state from table mysql.gtid_slave_pos"); CREATE USER 'sst'; GRANT ALL PRIVILEGES ON *.* TO 'sst'; SET GLOBAL wsrep_sst_auth = 'sst:'; @@ -369,7 +371,7 @@ INSERT INTO t1 VALUES ('node2_committed_before'); INSERT INTO t1 VALUES ('node2_committed_before'); INSERT INTO t1 VALUES ('node2_committed_before'); COMMIT; -SET GLOBAL debug = 'd,sync.alter_opened_table'; +SET GLOBAL debug_dbug = 'd,sync.alter_opened_table'; ALTER TABLE t1 ADD COLUMN f2 INTEGER; SET wsrep_sync_wait = 0; Killing server ... @@ -448,6 +450,7 @@ COUNT(*) = 0 DROP TABLE t1; COMMIT; SET AUTOCOMMIT=ON; +SET GLOBAL debug_dbug = $debug_orig; CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query"); DROP USER sst; CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query"); diff --git a/mysql-test/suite/galera/t/galera_sst_mysqldump.test b/mysql-test/suite/galera/t/galera_sst_mysqldump.test index 0b7171597dd..ce112c57745 100644 --- a/mysql-test/suite/galera/t/galera_sst_mysqldump.test +++ b/mysql-test/suite/galera/t/galera_sst_mysqldump.test @@ -1,4 +1,3 @@ ---source include/big_test.inc --source include/galera_cluster.inc --source include/have_innodb.inc diff --git a/mysql-test/suite/sys_vars/r/wsrep_sst_receive_address_basic.result b/mysql-test/suite/sys_vars/r/wsrep_sst_receive_address_basic.result index 6db52eb8150..3e1fb6cad79 100644 --- a/mysql-test/suite/sys_vars/r/wsrep_sst_receive_address_basic.result +++ b/mysql-test/suite/sys_vars/r/wsrep_sst_receive_address_basic.result @@ -30,10 +30,6 @@ SELECT @@global.wsrep_sst_receive_address; 192.168.2.254 # invalid values -SET @@global.wsrep_sst_receive_address='127.0.0.1:4444'; -ERROR 42000: Variable 'wsrep_sst_receive_address' can't be set to the value of '127.0.0.1:4444' -SET @@global.wsrep_sst_receive_address='127.0.0.1'; -ERROR 42000: Variable 'wsrep_sst_receive_address' can't be set to the value of '127.0.0.1' SELECT @@global.wsrep_sst_receive_address; @@global.wsrep_sst_receive_address 192.168.2.254 diff --git a/mysql-test/suite/sys_vars/t/wsrep_sst_receive_address_basic.test b/mysql-test/suite/sys_vars/t/wsrep_sst_receive_address_basic.test index 9e50cbf8947..59f69c14dfb 100644 --- a/mysql-test/suite/sys_vars/t/wsrep_sst_receive_address_basic.test +++ b/mysql-test/suite/sys_vars/t/wsrep_sst_receive_address_basic.test @@ -27,10 +27,6 @@ SELECT @@global.wsrep_sst_receive_address; --echo --echo # invalid values ---error ER_WRONG_VALUE_FOR_VAR -SET @@global.wsrep_sst_receive_address='127.0.0.1:4444'; ---error ER_WRONG_VALUE_FOR_VAR -SET @@global.wsrep_sst_receive_address='127.0.0.1'; SELECT @@global.wsrep_sst_receive_address; --error ER_WRONG_VALUE_FOR_VAR SET @@global.wsrep_sst_receive_address=NULL; diff --git a/scripts/wsrep_sst_mysqldump.sh b/scripts/wsrep_sst_mysqldump.sh index 358247359e6..46edefd4829 100644 --- a/scripts/wsrep_sst_mysqldump.sh +++ b/scripts/wsrep_sst_mysqldump.sh @@ -119,11 +119,11 @@ MYSQL="$MYSQL_CLIENT $WSREP_SST_OPT_CONF "\ # Check if binary logging is enabled on the joiner node. # Note: SELECT cannot be used at this point. -LOG_BIN=$(echo "SHOW VARIABLES LIKE 'log_bin'" | $MYSQL |\ +LOG_BIN=$(echo "set statement wsrep_sync_wait=0 for SHOW VARIABLES LIKE 'log_bin'" | $MYSQL |\ tail -1 | awk -F ' ' '{ print $2 }') # Check the joiner node's server version. -SERVER_VERSION=$(echo "SHOW VARIABLES LIKE 'version'" | $MYSQL |\ +SERVER_VERSION=$(echo "set statement wsrep_sync_wait=0 for SHOW VARIABLES LIKE 'version'" | $MYSQL |\ tail -1 | awk -F ' ' '{ print $2 }') RESET_MASTER="" diff --git a/sql/wsrep_check_opts.cc b/sql/wsrep_check_opts.cc index 690c1a4b2a4..28bd3a4492b 100644 --- a/sql/wsrep_check_opts.cc +++ b/sql/wsrep_check_opts.cc @@ -51,7 +51,7 @@ int wsrep_check_opts() (!strcasecmp(my_bind_addr_str, "127.0.0.1") || !strcasecmp(my_bind_addr_str, "localhost"))) { - WSREP_ERROR("wsrep_sst_method is set to 'mysqldump' yet " + WSREP_WARN("wsrep_sst_method is set to 'mysqldump' yet " "mysqld bind_address is set to '%s', which makes it " "impossible to receive state transfer from another " "node, since mysqld won't accept such connections. " @@ -59,7 +59,6 @@ int wsrep_check_opts() "set bind_address to allow mysql client connections " "from other cluster members (e.g. 0.0.0.0).", my_bind_addr_str); - return 1; } } else diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 9d4037082bc..f5ce14aed6a 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -369,7 +369,7 @@ wsrep_view_handler_cb (void* app_ctx, if (!wsrep_before_SE()) { WSREP_DEBUG("[debug]: closing client connections for PRIM"); - wsrep_close_client_connections(TRUE); + wsrep_close_client_connections(FALSE); } ssize_t const req_len= wsrep_sst_prepare (sst_req); diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index 8db100f3dc1..260755d08a8 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -85,37 +85,14 @@ static void make_wsrep_defaults_file() } -// TODO: Improve address verification. -static bool sst_receive_address_check (const char* str) -{ - if (!strncasecmp(str, "127.0.0.1", strlen("127.0.0.1")) || - !strncasecmp(str, "localhost", strlen("localhost"))) - { - return 1; - } - - return 0; -} - bool wsrep_sst_receive_address_check (sys_var *self, THD* thd, set_var* var) { - char addr_buf[FN_REFLEN]; - if ((! var->save_result.string_value.str) || (var->save_result.string_value.length > (FN_REFLEN - 1))) // safety { goto err; } - memcpy(addr_buf, var->save_result.string_value.str, - var->save_result.string_value.length); - addr_buf[var->save_result.string_value.length]= 0; - - if (sst_receive_address_check(addr_buf)) - { - goto err; - } - return 0; err: From 4b1cbff7a827f3f1cf87122a27770a12bcadf281 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 20 Mar 2018 01:49:32 +0100 Subject: [PATCH 118/139] MDEV-15409 make sure every sst script is tested in buildbot make galera.galera_sst_rsync not big --- mysql-test/suite/galera/t/galera_sst_rsync.test | 1 - 1 file changed, 1 deletion(-) diff --git a/mysql-test/suite/galera/t/galera_sst_rsync.test b/mysql-test/suite/galera/t/galera_sst_rsync.test index c6823795e59..d8fa3efb5d7 100644 --- a/mysql-test/suite/galera/t/galera_sst_rsync.test +++ b/mysql-test/suite/galera/t/galera_sst_rsync.test @@ -1,4 +1,3 @@ ---source include/big_test.inc --source include/galera_cluster.inc --source include/have_innodb.inc From 60d4abc1e506e44708b024c34a3145b648841e69 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 20 Mar 2018 13:51:55 +0100 Subject: [PATCH 119/139] MDEV-15409 make sure every sst script is tested in buildbot * make galera.galera_sst_xtrabackup* not big * auto-select between socat and nc, whatever available * auto-skip xtrabackup tests if no xtrabackup or neither socat nor nc --- .../suite/galera/include/have_xtrabackup.inc | 4 ++++ mysql-test/suite/galera/suite.pm | 20 +++++++++++++++++-- .../t/galera_autoinc_sst_xtrabackup.test | 1 + .../galera/t/galera_ist_xtrabackup-v2.test | 1 + .../t/galera_sst_xtrabackup-v2-options.cnf | 1 + .../t/galera_sst_xtrabackup-v2-options.test | 2 +- .../galera/t/galera_sst_xtrabackup-v2.cnf | 2 ++ .../galera/t/galera_sst_xtrabackup-v2.test | 2 +- ...era_sst_xtrabackup-v2_encrypt_with_key.cnf | 1 + ...ra_sst_xtrabackup-v2_encrypt_with_key.test | 1 + 10 files changed, 31 insertions(+), 4 deletions(-) create mode 100644 mysql-test/suite/galera/include/have_xtrabackup.inc diff --git a/mysql-test/suite/galera/include/have_xtrabackup.inc b/mysql-test/suite/galera/include/have_xtrabackup.inc new file mode 100644 index 00000000000..0dd693f2c63 --- /dev/null +++ b/mysql-test/suite/galera/include/have_xtrabackup.inc @@ -0,0 +1,4 @@ +# +# suite.pm will make sure that all tests including this file +# will be skipped as needed +# diff --git a/mysql-test/suite/galera/suite.pm b/mysql-test/suite/galera/suite.pm index 361743f1243..b7d157186d8 100644 --- a/mysql-test/suite/galera/suite.pm +++ b/mysql-test/suite/galera/suite.pm @@ -1,4 +1,4 @@ -package My::Suite::GALERA; +package My::Suite::Galera; use File::Basename; use My::Find; @@ -25,6 +25,8 @@ return "No scritps" unless $cpath; my ($epath) = grep { -f "$_/my_print_defaults"; } "$::bindir/extra", $::path_client_bindir; return "No my_print_defaults" unless $epath; +sub which($) { return `sh -c "command -v $_[0]"` } + push @::global_suppressions, ( qr(WSREP: wsrep_sst_receive_address is set to '127.0.0.1), @@ -82,5 +84,19 @@ $ENV{PATH}="$epath:$ENV{PATH}"; $ENV{PATH}="$spath:$ENV{PATH}" unless $epath eq $spath; $ENV{PATH}="$cpath:$ENV{PATH}" unless $cpath eq $spath; -bless { }; +if (which(socat)) { + $ENV{MTR_GALERA_TFMT}='socat'; +} elsif (which(nc)) { + $ENV{MTR_GALERA_TFMT}='nc'; +} +sub skip_combinations { + my %skip = (); + $skip{'include/have_xtrabackup.inc'} = 'Need innobackupex' + unless which(innobackupex); + $skip{'include/have_xtrabackup.inc'} = 'Need socat or nc' + unless $ENV{MTR_GALERA_TFMT}; + %skip; +} + +bless { }; diff --git a/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test b/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test index 30ce9bc4ceb..cd7621bfa6e 100644 --- a/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test +++ b/mysql-test/suite/galera/t/galera_autoinc_sst_xtrabackup.test @@ -6,6 +6,7 @@ --source include/big_test.inc --source include/galera_cluster.inc --source include/have_innodb.inc +--source include/have_xtrabackup.inc --connection node_1 --let $connection_id = `SELECT CONNECTION_ID()` diff --git a/mysql-test/suite/galera/t/galera_ist_xtrabackup-v2.test b/mysql-test/suite/galera/t/galera_ist_xtrabackup-v2.test index 8b399e77794..c44b0642342 100644 --- a/mysql-test/suite/galera/t/galera_ist_xtrabackup-v2.test +++ b/mysql-test/suite/galera/t/galera_ist_xtrabackup-v2.test @@ -1,6 +1,7 @@ --source include/big_test.inc --source include/galera_cluster.inc --source include/have_innodb.inc +--source include/have_xtrabackup.inc --source suite/galera/include/galera_st_disconnect_slave.inc --source suite/galera/include/galera_st_shutdown_slave.inc diff --git a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2-options.cnf b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2-options.cnf index 31bd1af07c2..1e29673c0ff 100644 --- a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2-options.cnf +++ b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2-options.cnf @@ -22,3 +22,4 @@ parallel=2 encrypt=1 encrypt-algo=AES256 encrypt-key=4FA92C5873672E20FB163A0BCB2BB4A4 +transferfmt=@ENV.MTR_GALERA_TFMT diff --git a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2-options.test b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2-options.test index 4573f176482..db2b706b6b8 100644 --- a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2-options.test +++ b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2-options.test @@ -3,9 +3,9 @@ # Initial SST happens via xtrabackup, so there is not much to do in the body of the test # ---source include/big_test.inc --source include/galera_cluster.inc --source include/have_innodb.inc +--source include/have_xtrabackup.inc SELECT 1; diff --git a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.cnf b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.cnf index 47cb3e02292..0025b259ec5 100644 --- a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.cnf +++ b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.cnf @@ -11,3 +11,5 @@ wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore [mysqld.2] wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore_sb=true' +[sst] +transferfmt=@ENV.MTR_GALERA_TFMT diff --git a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test index aac6822170a..f1fd0f3ddf3 100644 --- a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test +++ b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2.test @@ -1,6 +1,6 @@ ---source include/big_test.inc --source include/galera_cluster.inc --source include/have_innodb.inc +--source include/have_xtrabackup.inc # Save original auto_increment_offset values. --let $node_1=node_1 diff --git a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2_encrypt_with_key.cnf b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2_encrypt_with_key.cnf index 969516f5f3b..63d05104a37 100644 --- a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2_encrypt_with_key.cnf +++ b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2_encrypt_with_key.cnf @@ -9,3 +9,4 @@ wsrep_debug=ON tkey=@ENV.MYSQL_TEST_DIR/std_data/galera-key.pem tcert=@ENV.MYSQL_TEST_DIR/std_data/galera-cert.pem encrypt=3 +transferfmt=@ENV.MTR_GALERA_TFMT diff --git a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2_encrypt_with_key.test b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2_encrypt_with_key.test index 24d9589d111..2f685ca7184 100644 --- a/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2_encrypt_with_key.test +++ b/mysql-test/suite/galera/t/galera_sst_xtrabackup-v2_encrypt_with_key.test @@ -6,6 +6,7 @@ --source include/big_test.inc --source include/galera_cluster.inc --source include/have_innodb.inc +--source include/have_xtrabackup.inc SELECT 1; From 5ff7ed96d5e683fe689d101f9add4407c49f0e93 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 20 Mar 2018 22:42:42 +0100 Subject: [PATCH 120/139] MDEV-15409 make sure every sst script is tested in buildbot create galera.galera_sst_mariabackup --- .../suite/galera/include/have_mariabackup.inc | 4 + .../galera/r/galera_sst_mariabackup.result | 359 ++++++++++++++++++ mysql-test/suite/galera/suite.pm | 10 +- .../suite/galera/t/galera_sst_mariabackup.cnf | 16 + .../galera/t/galera_sst_mariabackup.test | 19 + 5 files changed, 407 insertions(+), 1 deletion(-) create mode 100644 mysql-test/suite/galera/include/have_mariabackup.inc create mode 100644 mysql-test/suite/galera/r/galera_sst_mariabackup.result create mode 100644 mysql-test/suite/galera/t/galera_sst_mariabackup.cnf create mode 100644 mysql-test/suite/galera/t/galera_sst_mariabackup.test diff --git a/mysql-test/suite/galera/include/have_mariabackup.inc b/mysql-test/suite/galera/include/have_mariabackup.inc new file mode 100644 index 00000000000..0dd693f2c63 --- /dev/null +++ b/mysql-test/suite/galera/include/have_mariabackup.inc @@ -0,0 +1,4 @@ +# +# suite.pm will make sure that all tests including this file +# will be skipped as needed +# diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup.result b/mysql-test/suite/galera/r/galera_sst_mariabackup.result new file mode 100644 index 00000000000..df2d9190a4b --- /dev/null +++ b/mysql-test/suite/galera/r/galera_sst_mariabackup.result @@ -0,0 +1,359 @@ +Performing State Transfer on a server that has been shut down cleanly and restarted +CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +COMMIT; +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +COMMIT; +Shutting down server ... +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_committed_during'); +INSERT INTO t1 VALUES ('node1_committed_during'); +INSERT INTO t1 VALUES ('node1_committed_during'); +INSERT INTO t1 VALUES ('node1_committed_during'); +INSERT INTO t1 VALUES ('node1_committed_during'); +COMMIT; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +Starting server ... +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node2_committed_after'); +INSERT INTO t1 VALUES ('node2_committed_after'); +INSERT INTO t1 VALUES ('node2_committed_after'); +INSERT INTO t1 VALUES ('node2_committed_after'); +INSERT INTO t1 VALUES ('node2_committed_after'); +COMMIT; +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +COMMIT; +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_committed_after'); +INSERT INTO t1 VALUES ('node1_committed_after'); +INSERT INTO t1 VALUES ('node1_committed_after'); +INSERT INTO t1 VALUES ('node1_committed_after'); +INSERT INTO t1 VALUES ('node1_committed_after'); +COMMIT; +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +ROLLBACK; +SELECT COUNT(*) = 35 FROM t1; +COUNT(*) = 35 +1 +SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; +COUNT(*) = 0 +1 +COMMIT; +SET AUTOCOMMIT=ON; +SELECT COUNT(*) = 35 FROM t1; +COUNT(*) = 35 +1 +SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; +COUNT(*) = 0 +1 +DROP TABLE t1; +COMMIT; +SET AUTOCOMMIT=ON; +Performing State Transfer on a server that starts from a clean var directory +This is accomplished by shutting down node #2 and removing its var directory before restarting it +CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +COMMIT; +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +COMMIT; +Shutting down server ... +Cleaning var directory ... +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_committed_during'); +INSERT INTO t1 VALUES ('node1_committed_during'); +INSERT INTO t1 VALUES ('node1_committed_during'); +INSERT INTO t1 VALUES ('node1_committed_during'); +INSERT INTO t1 VALUES ('node1_committed_during'); +COMMIT; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +Starting server ... +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node2_committed_after'); +INSERT INTO t1 VALUES ('node2_committed_after'); +INSERT INTO t1 VALUES ('node2_committed_after'); +INSERT INTO t1 VALUES ('node2_committed_after'); +INSERT INTO t1 VALUES ('node2_committed_after'); +COMMIT; +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +COMMIT; +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_committed_after'); +INSERT INTO t1 VALUES ('node1_committed_after'); +INSERT INTO t1 VALUES ('node1_committed_after'); +INSERT INTO t1 VALUES ('node1_committed_after'); +INSERT INTO t1 VALUES ('node1_committed_after'); +COMMIT; +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +ROLLBACK; +SELECT COUNT(*) = 35 FROM t1; +COUNT(*) = 35 +1 +SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; +COUNT(*) = 0 +1 +COMMIT; +SET AUTOCOMMIT=ON; +SELECT COUNT(*) = 35 FROM t1; +COUNT(*) = 35 +1 +SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; +COUNT(*) = 0 +1 +DROP TABLE t1; +COMMIT; +SET AUTOCOMMIT=ON; +Performing State Transfer on a server that has been killed and restarted +CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +COMMIT; +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +COMMIT; +Killing server ... +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_committed_during'); +INSERT INTO t1 VALUES ('node1_committed_during'); +INSERT INTO t1 VALUES ('node1_committed_during'); +INSERT INTO t1 VALUES ('node1_committed_during'); +INSERT INTO t1 VALUES ('node1_committed_during'); +COMMIT; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +Performing --wsrep-recover ... +Starting server ... +Using --wsrep-start-position when starting mysqld ... +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node2_committed_after'); +INSERT INTO t1 VALUES ('node2_committed_after'); +INSERT INTO t1 VALUES ('node2_committed_after'); +INSERT INTO t1 VALUES ('node2_committed_after'); +INSERT INTO t1 VALUES ('node2_committed_after'); +COMMIT; +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 VALUES ('node1_to_be_committed_after'); +COMMIT; +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_committed_after'); +INSERT INTO t1 VALUES ('node1_committed_after'); +INSERT INTO t1 VALUES ('node1_committed_after'); +INSERT INTO t1 VALUES ('node1_committed_after'); +INSERT INTO t1 VALUES ('node1_committed_after'); +COMMIT; +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 VALUES ('node1_to_be_rollbacked_after'); +ROLLBACK; +SELECT COUNT(*) = 35 FROM t1; +COUNT(*) = 35 +1 +SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; +COUNT(*) = 0 +1 +COMMIT; +SET AUTOCOMMIT=ON; +SELECT COUNT(*) = 35 FROM t1; +COUNT(*) = 35 +1 +SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; +COUNT(*) = 0 +1 +DROP TABLE t1; +COMMIT; +SET AUTOCOMMIT=ON; +Performing State Transfer on a server that has been killed and restarted +while a DDL was in progress on it +CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +INSERT INTO t1 VALUES ('node1_committed_before'); +START TRANSACTION; +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +INSERT INTO t1 VALUES ('node2_committed_before'); +COMMIT; +SET GLOBAL debug_dbug = 'd,sync.alter_opened_table'; +ALTER TABLE t1 ADD COLUMN f2 INTEGER; +SET wsrep_sync_wait = 0; +Killing server ... +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 (f1) VALUES ('node1_committed_during'); +INSERT INTO t1 (f1) VALUES ('node1_committed_during'); +INSERT INTO t1 (f1) VALUES ('node1_committed_during'); +INSERT INTO t1 (f1) VALUES ('node1_committed_during'); +INSERT INTO t1 (f1) VALUES ('node1_committed_during'); +COMMIT; +START TRANSACTION; +INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); +Performing --wsrep-recover ... +Starting server ... +Using --wsrep-start-position when starting mysqld ... +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 (f1) VALUES ('node2_committed_after'); +INSERT INTO t1 (f1) VALUES ('node2_committed_after'); +INSERT INTO t1 (f1) VALUES ('node2_committed_after'); +INSERT INTO t1 (f1) VALUES ('node2_committed_after'); +INSERT INTO t1 (f1) VALUES ('node2_committed_after'); +COMMIT; +INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); +COMMIT; +SET AUTOCOMMIT=OFF; +START TRANSACTION; +INSERT INTO t1 (f1) VALUES ('node1_committed_after'); +INSERT INTO t1 (f1) VALUES ('node1_committed_after'); +INSERT INTO t1 (f1) VALUES ('node1_committed_after'); +INSERT INTO t1 (f1) VALUES ('node1_committed_after'); +INSERT INTO t1 (f1) VALUES ('node1_committed_after'); +COMMIT; +INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); +INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); +ROLLBACK; +SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; +COUNT(*) = 2 +1 +SELECT COUNT(*) = 35 FROM t1; +COUNT(*) = 35 +1 +SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; +COUNT(*) = 0 +1 +COMMIT; +SET AUTOCOMMIT=ON; +SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; +COUNT(*) = 2 +1 +SELECT COUNT(*) = 35 FROM t1; +COUNT(*) = 35 +1 +SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; +COUNT(*) = 0 +1 +DROP TABLE t1; +COMMIT; +SET AUTOCOMMIT=ON; +SET GLOBAL debug_dbug = $debug_orig; diff --git a/mysql-test/suite/galera/suite.pm b/mysql-test/suite/galera/suite.pm index b7d157186d8..5cdb751a434 100644 --- a/mysql-test/suite/galera/suite.pm +++ b/mysql-test/suite/galera/suite.pm @@ -25,6 +25,8 @@ return "No scritps" unless $cpath; my ($epath) = grep { -f "$_/my_print_defaults"; } "$::bindir/extra", $::path_client_bindir; return "No my_print_defaults" unless $epath; +my ($bpath) = grep { -f "$_/mariabackup"; } "$::bindir/extra/mariabackup", $::path_client_bindir; + sub which($) { return `sh -c "command -v $_[0]"` } push @::global_suppressions, @@ -79,10 +81,10 @@ push @::global_suppressions, qr|WSREP: JOIN message from member .* in non-primary configuration. Ignored.|, ); - $ENV{PATH}="$epath:$ENV{PATH}"; $ENV{PATH}="$spath:$ENV{PATH}" unless $epath eq $spath; $ENV{PATH}="$cpath:$ENV{PATH}" unless $cpath eq $spath; +$ENV{PATH}="$bpath:$ENV{PATH}" unless $bpath eq $spath; if (which(socat)) { $ENV{MTR_GALERA_TFMT}='socat'; @@ -96,6 +98,12 @@ sub skip_combinations { unless which(innobackupex); $skip{'include/have_xtrabackup.inc'} = 'Need socat or nc' unless $ENV{MTR_GALERA_TFMT}; + $skip{'include/have_mariabackup.inc'} = 'Need mariabackup' + unless which(mariabackup); + $skip{'include/have_mariabackup.inc'} = 'Need ss' + unless which(ss); + $skip{'include/have_mariabackup.inc'} = 'Need socat or nc' + unless $ENV{MTR_GALERA_TFMT}; %skip; } diff --git a/mysql-test/suite/galera/t/galera_sst_mariabackup.cnf b/mysql-test/suite/galera/t/galera_sst_mariabackup.cnf new file mode 100644 index 00000000000..336296e9bfe --- /dev/null +++ b/mysql-test/suite/galera/t/galera_sst_mariabackup.cnf @@ -0,0 +1,16 @@ +!include ../galera_2nodes.cnf + +[mysqld] +wsrep_sst_method=mariabackup +wsrep_sst_auth="root:" +wsrep_debug=ON + +[mysqld.1] +wsrep_provider_options='base_port=@mysqld.1.#galera_port;gcache.size=1;pc.ignore_sb=true' + +[mysqld.2] +wsrep_provider_options='base_port=@mysqld.2.#galera_port;gcache.size=1;pc.ignore_sb=true' + +[sst] +transferfmt=@ENV.MTR_GALERA_TFMT +streamfmt=xbstream diff --git a/mysql-test/suite/galera/t/galera_sst_mariabackup.test b/mysql-test/suite/galera/t/galera_sst_mariabackup.test new file mode 100644 index 00000000000..0e7ac487700 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_sst_mariabackup.test @@ -0,0 +1,19 @@ +--source include/galera_cluster.inc +--source include/have_innodb.inc +--source include/have_mariabackup.inc + +# Save original auto_increment_offset values. +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc + +--source suite/galera/include/galera_st_shutdown_slave.inc +--source suite/galera/include/galera_st_clean_slave.inc + +--source suite/galera/include/galera_st_kill_slave.inc +--source suite/galera/include/galera_st_kill_slave_ddl.inc + +# Restore original auto_increment_offset values. +--source include/auto_increment_offset_restore.inc + +--source include/galera_end.inc From 4092f90655626aa36d3f9952d0287afdc1ceeef1 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 22 Mar 2018 13:48:31 +0100 Subject: [PATCH 121/139] MDEV-15409 make sure every sst script is tested in buildbot galera SST tests have a debug part, but we don't want to limit them to fulltest2 builder. So, add support for test files that have a debug part: * add maybe_debug.inc and maybe_debug.combinations * 'debug' combination is run when debug is available * 'release' combination is run otherwise * test wraps debug parts in if($with_debug) { ... } * and creates ,debug.rdiff for debug results --- mysql-test/include/maybe_debug.combinations | 5 + mysql-test/include/maybe_debug.inc | 3 + mysql-test/suite.pm | 3 + .../include/galera_st_kill_slave_ddl.inc | 3 + .../r/galera_sst_mariabackup,debug.rdiff | 103 +++++++++++++++++ .../galera/r/galera_sst_mariabackup.result | 97 ---------------- .../galera/r/galera_sst_mysqldump,debug.rdiff | 106 ++++++++++++++++++ .../galera/r/galera_sst_mysqldump.result | 97 ---------------- .../galera/r/galera_sst_rsync,debug.rdiff | 103 +++++++++++++++++ .../suite/galera/r/galera_sst_rsync.result | 97 ---------------- .../r/galera_sst_xtrabackup-v2,debug.rdiff | 103 +++++++++++++++++ .../galera/r/galera_sst_xtrabackup-v2.result | 97 ---------------- 12 files changed, 429 insertions(+), 388 deletions(-) create mode 100644 mysql-test/include/maybe_debug.combinations create mode 100644 mysql-test/include/maybe_debug.inc create mode 100644 mysql-test/suite/galera/r/galera_sst_mariabackup,debug.rdiff create mode 100644 mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff create mode 100644 mysql-test/suite/galera/r/galera_sst_rsync,debug.rdiff create mode 100644 mysql-test/suite/galera/r/galera_sst_xtrabackup-v2,debug.rdiff diff --git a/mysql-test/include/maybe_debug.combinations b/mysql-test/include/maybe_debug.combinations new file mode 100644 index 00000000000..5ee57c0bfd8 --- /dev/null +++ b/mysql-test/include/maybe_debug.combinations @@ -0,0 +1,5 @@ +[debug] +--enable-gdb + +[release] +--disable-gdb diff --git a/mysql-test/include/maybe_debug.inc b/mysql-test/include/maybe_debug.inc new file mode 100644 index 00000000000..2f6c2848f9f --- /dev/null +++ b/mysql-test/include/maybe_debug.inc @@ -0,0 +1,3 @@ +# include file for test files that can be run with and without debug +# having debug and non-debug tests. +let $have_debug=`select version() like '%debug%'`; diff --git a/mysql-test/suite.pm b/mysql-test/suite.pm index 4d921d1b049..cb9ab0c375d 100644 --- a/mysql-test/suite.pm +++ b/mysql-test/suite.pm @@ -33,6 +33,9 @@ sub skip_combinations { die "unknown value max-binlog-stmt-cache-size=$longsysvar" unless $val_map{$longsysvar}; $skip{'include/word_size.combinations'} = [ $val_map{$longsysvar} ]; + $skip{'include/maybe_debug.combinations'} = + [ defined $::mysqld_variables{'debug-dbug'} ? 'release' : 'debug' ]; + # as a special case, disable certain include files as a whole $skip{'include/not_embedded.inc'} = 'Not run for embedded server' if $::opt_embedded_server; diff --git a/mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc b/mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc index b8dd0fda987..72e80505870 100644 --- a/mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc +++ b/mysql-test/suite/galera/include/galera_st_kill_slave_ddl.inc @@ -1,3 +1,5 @@ +source include/maybe_debug.inc; +if ($have_debug) { --echo Performing State Transfer on a server that has been killed and restarted --echo while a DDL was in progress on it @@ -121,3 +123,4 @@ COMMIT; SET AUTOCOMMIT=ON; SET GLOBAL debug_dbug = $debug_orig; +} diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_mariabackup,debug.rdiff new file mode 100644 index 00000000000..efedb1b469a --- /dev/null +++ b/mysql-test/suite/galera/r/galera_sst_mariabackup,debug.rdiff @@ -0,0 +1,103 @@ +--- galera_sst_mariabackup.reject ++++ galera_sst_mariabackup.result +@@ -260,3 +260,100 @@ + DROP TABLE t1; + COMMIT; + SET AUTOCOMMIT=ON; ++Performing State Transfer on a server that has been killed and restarted ++while a DDL was in progress on it ++CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++START TRANSACTION; ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++COMMIT; ++SET GLOBAL debug_dbug = 'd,sync.alter_opened_table'; ++ALTER TABLE t1 ADD COLUMN f2 INTEGER; ++SET wsrep_sync_wait = 0; ++Killing server ... ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++COMMIT; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++Performing --wsrep-recover ... ++Starting server ... ++Using --wsrep-start-position when starting mysqld ... ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++COMMIT; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++COMMIT; ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++COMMIT; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++ROLLBACK; ++SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; ++COUNT(*) = 2 ++1 ++SELECT COUNT(*) = 35 FROM t1; ++COUNT(*) = 35 ++1 ++SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; ++COUNT(*) = 0 ++1 ++COMMIT; ++SET AUTOCOMMIT=ON; ++SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; ++COUNT(*) = 2 ++1 ++SELECT COUNT(*) = 35 FROM t1; ++COUNT(*) = 35 ++1 ++SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; ++COUNT(*) = 0 ++1 ++DROP TABLE t1; ++COMMIT; ++SET AUTOCOMMIT=ON; ++SET GLOBAL debug_dbug = $debug_orig; diff --git a/mysql-test/suite/galera/r/galera_sst_mariabackup.result b/mysql-test/suite/galera/r/galera_sst_mariabackup.result index df2d9190a4b..cec0f21ee22 100644 --- a/mysql-test/suite/galera/r/galera_sst_mariabackup.result +++ b/mysql-test/suite/galera/r/galera_sst_mariabackup.result @@ -260,100 +260,3 @@ COUNT(*) = 0 DROP TABLE t1; COMMIT; SET AUTOCOMMIT=ON; -Performing State Transfer on a server that has been killed and restarted -while a DDL was in progress on it -CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -START TRANSACTION; -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -COMMIT; -SET GLOBAL debug_dbug = 'd,sync.alter_opened_table'; -ALTER TABLE t1 ADD COLUMN f2 INTEGER; -SET wsrep_sync_wait = 0; -Killing server ... -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -COMMIT; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -Performing --wsrep-recover ... -Starting server ... -Using --wsrep-start-position when starting mysqld ... -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -COMMIT; -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -COMMIT; -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -COMMIT; -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -ROLLBACK; -SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; -COUNT(*) = 2 -1 -SELECT COUNT(*) = 35 FROM t1; -COUNT(*) = 35 -1 -SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; -COUNT(*) = 0 -1 -COMMIT; -SET AUTOCOMMIT=ON; -SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; -COUNT(*) = 2 -1 -SELECT COUNT(*) = 35 FROM t1; -COUNT(*) = 35 -1 -SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; -COUNT(*) = 0 -1 -DROP TABLE t1; -COMMIT; -SET AUTOCOMMIT=ON; -SET GLOBAL debug_dbug = $debug_orig; diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff new file mode 100644 index 00000000000..a7ed54af860 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_sst_mysqldump,debug.rdiff @@ -0,0 +1,106 @@ +--- galera_sst_mysqldump.reject ++++ galera_sst_mysqldump.result +@@ -354,6 +354,103 @@ + DROP TABLE t1; + COMMIT; + SET AUTOCOMMIT=ON; ++Performing State Transfer on a server that has been killed and restarted ++while a DDL was in progress on it ++CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++START TRANSACTION; ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++COMMIT; ++SET GLOBAL debug_dbug = 'd,sync.alter_opened_table'; ++ALTER TABLE t1 ADD COLUMN f2 INTEGER; ++SET wsrep_sync_wait = 0; ++Killing server ... ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++COMMIT; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++Performing --wsrep-recover ... ++Starting server ... ++Using --wsrep-start-position when starting mysqld ... ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++COMMIT; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++COMMIT; ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++COMMIT; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++ROLLBACK; ++SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; ++COUNT(*) = 2 ++1 ++SELECT COUNT(*) = 35 FROM t1; ++COUNT(*) = 35 ++1 ++SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; ++COUNT(*) = 0 ++1 ++COMMIT; ++SET AUTOCOMMIT=ON; ++SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; ++COUNT(*) = 2 ++1 ++SELECT COUNT(*) = 35 FROM t1; ++COUNT(*) = 35 ++1 ++SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; ++COUNT(*) = 0 ++1 ++DROP TABLE t1; ++COMMIT; ++SET AUTOCOMMIT=ON; ++SET GLOBAL debug_dbug = $debug_orig; + CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query"); + DROP USER sst; + CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query"); diff --git a/mysql-test/suite/galera/r/galera_sst_mysqldump.result b/mysql-test/suite/galera/r/galera_sst_mysqldump.result index aeada721adc..2369c1d6d73 100644 --- a/mysql-test/suite/galera/r/galera_sst_mysqldump.result +++ b/mysql-test/suite/galera/r/galera_sst_mysqldump.result @@ -354,103 +354,6 @@ COUNT(*) = 0 DROP TABLE t1; COMMIT; SET AUTOCOMMIT=ON; -Performing State Transfer on a server that has been killed and restarted -while a DDL was in progress on it -CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -START TRANSACTION; -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -COMMIT; -SET GLOBAL debug_dbug = 'd,sync.alter_opened_table'; -ALTER TABLE t1 ADD COLUMN f2 INTEGER; -SET wsrep_sync_wait = 0; -Killing server ... -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -COMMIT; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -Performing --wsrep-recover ... -Starting server ... -Using --wsrep-start-position when starting mysqld ... -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -COMMIT; -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -COMMIT; -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -COMMIT; -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -ROLLBACK; -SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; -COUNT(*) = 2 -1 -SELECT COUNT(*) = 35 FROM t1; -COUNT(*) = 35 -1 -SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; -COUNT(*) = 0 -1 -COMMIT; -SET AUTOCOMMIT=ON; -SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; -COUNT(*) = 2 -1 -SELECT COUNT(*) = 35 FROM t1; -COUNT(*) = 35 -1 -SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; -COUNT(*) = 0 -1 -DROP TABLE t1; -COMMIT; -SET AUTOCOMMIT=ON; -SET GLOBAL debug_dbug = $debug_orig; CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query"); DROP USER sst; CALL mtr.add_suppression("Slave SQL: Error 'The MySQL server is running with the --skip-grant-tables option so it cannot execute this statement' on query"); diff --git a/mysql-test/suite/galera/r/galera_sst_rsync,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_rsync,debug.rdiff new file mode 100644 index 00000000000..323a0a92a35 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_sst_rsync,debug.rdiff @@ -0,0 +1,103 @@ +--- galera_sst_rsync.reject ++++ galera_sst_rsync.result +@@ -260,3 +260,100 @@ + DROP TABLE t1; + COMMIT; + SET AUTOCOMMIT=ON; ++Performing State Transfer on a server that has been killed and restarted ++while a DDL was in progress on it ++CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++START TRANSACTION; ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++COMMIT; ++SET GLOBAL debug_dbug = 'd,sync.alter_opened_table'; ++ALTER TABLE t1 ADD COLUMN f2 INTEGER; ++SET wsrep_sync_wait = 0; ++Killing server ... ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++COMMIT; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++Performing --wsrep-recover ... ++Starting server ... ++Using --wsrep-start-position when starting mysqld ... ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++COMMIT; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++COMMIT; ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++COMMIT; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++ROLLBACK; ++SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; ++COUNT(*) = 2 ++1 ++SELECT COUNT(*) = 35 FROM t1; ++COUNT(*) = 35 ++1 ++SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; ++COUNT(*) = 0 ++1 ++COMMIT; ++SET AUTOCOMMIT=ON; ++SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; ++COUNT(*) = 2 ++1 ++SELECT COUNT(*) = 35 FROM t1; ++COUNT(*) = 35 ++1 ++SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; ++COUNT(*) = 0 ++1 ++DROP TABLE t1; ++COMMIT; ++SET AUTOCOMMIT=ON; ++SET GLOBAL debug_dbug = $debug_orig; diff --git a/mysql-test/suite/galera/r/galera_sst_rsync.result b/mysql-test/suite/galera/r/galera_sst_rsync.result index df2d9190a4b..cec0f21ee22 100644 --- a/mysql-test/suite/galera/r/galera_sst_rsync.result +++ b/mysql-test/suite/galera/r/galera_sst_rsync.result @@ -260,100 +260,3 @@ COUNT(*) = 0 DROP TABLE t1; COMMIT; SET AUTOCOMMIT=ON; -Performing State Transfer on a server that has been killed and restarted -while a DDL was in progress on it -CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -START TRANSACTION; -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -COMMIT; -SET GLOBAL debug_dbug = 'd,sync.alter_opened_table'; -ALTER TABLE t1 ADD COLUMN f2 INTEGER; -SET wsrep_sync_wait = 0; -Killing server ... -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -COMMIT; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -Performing --wsrep-recover ... -Starting server ... -Using --wsrep-start-position when starting mysqld ... -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -COMMIT; -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -COMMIT; -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -COMMIT; -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -ROLLBACK; -SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; -COUNT(*) = 2 -1 -SELECT COUNT(*) = 35 FROM t1; -COUNT(*) = 35 -1 -SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; -COUNT(*) = 0 -1 -COMMIT; -SET AUTOCOMMIT=ON; -SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; -COUNT(*) = 2 -1 -SELECT COUNT(*) = 35 FROM t1; -COUNT(*) = 35 -1 -SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; -COUNT(*) = 0 -1 -DROP TABLE t1; -COMMIT; -SET AUTOCOMMIT=ON; -SET GLOBAL debug_dbug = $debug_orig; diff --git a/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2,debug.rdiff b/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2,debug.rdiff new file mode 100644 index 00000000000..2692482f448 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2,debug.rdiff @@ -0,0 +1,103 @@ +--- galera_sst_xtrabackup-v2.reject ++++ galera_sst_xtrabackup-v2.result +@@ -260,3 +260,100 @@ + DROP TABLE t1; + COMMIT; + SET AUTOCOMMIT=ON; ++Performing State Transfer on a server that has been killed and restarted ++while a DDL was in progress on it ++CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++INSERT INTO t1 VALUES ('node1_committed_before'); ++START TRANSACTION; ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++INSERT INTO t1 VALUES ('node2_committed_before'); ++COMMIT; ++SET GLOBAL debug_dbug = 'd,sync.alter_opened_table'; ++ALTER TABLE t1 ADD COLUMN f2 INTEGER; ++SET wsrep_sync_wait = 0; ++Killing server ... ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_during'); ++COMMIT; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++Performing --wsrep-recover ... ++Starting server ... ++Using --wsrep-start-position when starting mysqld ... ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node2_committed_after'); ++COMMIT; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); ++COMMIT; ++SET AUTOCOMMIT=OFF; ++START TRANSACTION; ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++INSERT INTO t1 (f1) VALUES ('node1_committed_after'); ++COMMIT; ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); ++ROLLBACK; ++SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; ++COUNT(*) = 2 ++1 ++SELECT COUNT(*) = 35 FROM t1; ++COUNT(*) = 35 ++1 ++SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; ++COUNT(*) = 0 ++1 ++COMMIT; ++SET AUTOCOMMIT=ON; ++SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; ++COUNT(*) = 2 ++1 ++SELECT COUNT(*) = 35 FROM t1; ++COUNT(*) = 35 ++1 ++SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; ++COUNT(*) = 0 ++1 ++DROP TABLE t1; ++COMMIT; ++SET AUTOCOMMIT=ON; ++SET GLOBAL debug_dbug = $debug_orig; diff --git a/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result b/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result index df2d9190a4b..cec0f21ee22 100644 --- a/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result +++ b/mysql-test/suite/galera/r/galera_sst_xtrabackup-v2.result @@ -260,100 +260,3 @@ COUNT(*) = 0 DROP TABLE t1; COMMIT; SET AUTOCOMMIT=ON; -Performing State Transfer on a server that has been killed and restarted -while a DDL was in progress on it -CREATE TABLE t1 (f1 CHAR(255)) ENGINE=InnoDB; -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -INSERT INTO t1 VALUES ('node1_committed_before'); -START TRANSACTION; -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -INSERT INTO t1 VALUES ('node2_committed_before'); -COMMIT; -SET GLOBAL debug_dbug = 'd,sync.alter_opened_table'; -ALTER TABLE t1 ADD COLUMN f2 INTEGER; -SET wsrep_sync_wait = 0; -Killing server ... -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -INSERT INTO t1 (f1) VALUES ('node1_committed_during'); -COMMIT; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -Performing --wsrep-recover ... -Starting server ... -Using --wsrep-start-position when starting mysqld ... -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -INSERT INTO t1 (f1) VALUES ('node2_committed_after'); -COMMIT; -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_committed_after'); -COMMIT; -SET AUTOCOMMIT=OFF; -START TRANSACTION; -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -INSERT INTO t1 (f1) VALUES ('node1_committed_after'); -COMMIT; -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -INSERT INTO t1 (f1) VALUES ('node1_to_be_rollbacked_after'); -ROLLBACK; -SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; -COUNT(*) = 2 -1 -SELECT COUNT(*) = 35 FROM t1; -COUNT(*) = 35 -1 -SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; -COUNT(*) = 0 -1 -COMMIT; -SET AUTOCOMMIT=ON; -SELECT COUNT(*) = 2 FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = 't1'; -COUNT(*) = 2 -1 -SELECT COUNT(*) = 35 FROM t1; -COUNT(*) = 35 -1 -SELECT COUNT(*) = 0 FROM (SELECT COUNT(*) AS c, f1 FROM t1 GROUP BY f1 HAVING c NOT IN (5, 10)) AS a1; -COUNT(*) = 0 -1 -DROP TABLE t1; -COMMIT; -SET AUTOCOMMIT=ON; -SET GLOBAL debug_dbug = $debug_orig; From f249d8467ae7fadafc7c52234c403eec3bb769ca Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 22 Mar 2018 15:07:33 +0100 Subject: [PATCH 122/139] MDEV-15570 Assertion `Item_cache_temporal::field_type() != MYSQL_TYPE_TIME' failed in Item_cache_temporal::val_datetime_packed remove an assert. TIME value can be used (and cached) in a datetime context --- mysql-test/r/type_temporal_innodb.result | 6 ++++++ mysql-test/t/type_temporal_innodb.test | 8 ++++++++ sql/item.cc | 1 - 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/type_temporal_innodb.result b/mysql-test/r/type_temporal_innodb.result index ce2b3a4e53f..b869822722d 100644 --- a/mysql-test/r/type_temporal_innodb.result +++ b/mysql-test/r/type_temporal_innodb.result @@ -154,3 +154,9 @@ SELECT 1 FROM t1 WHERE (SELECT a FROM t1 group by c) = b; Warnings: Warning 1292 Incorrect datetime value: '' DROP TABLE t1; +CREATE TABLE t1 (d DATE) ENGINE=InnoDB; +INSERT INTO t1 VALUES ('2012-12-21'); +SELECT * FROM t1 WHERE LEAST( UTC_TIME(), d ); +d +2012-12-21 +DROP TABLE t1; diff --git a/mysql-test/t/type_temporal_innodb.test b/mysql-test/t/type_temporal_innodb.test index 1ab68961eaa..81f2f586c51 100644 --- a/mysql-test/t/type_temporal_innodb.test +++ b/mysql-test/t/type_temporal_innodb.test @@ -58,3 +58,11 @@ SELECT * FROM t1 IGNORE KEY (b) WHERE b=''; SELECT * FROM t1 WHERE a=b; SELECT 1 FROM t1 WHERE (SELECT a FROM t1 group by c) = b; DROP TABLE t1; + +# +# MDEV-15570 Assertion `Item_cache_temporal::field_type() != MYSQL_TYPE_TIME' failed in Item_cache_temporal::val_datetime_packed +# +CREATE TABLE t1 (d DATE) ENGINE=InnoDB; +INSERT INTO t1 VALUES ('2012-12-21'); +SELECT * FROM t1 WHERE LEAST( UTC_TIME(), d ); +DROP TABLE t1; diff --git a/sql/item.cc b/sql/item.cc index dead75f548c..007b4f4bd54 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -9707,7 +9707,6 @@ Item_cache_temporal::Item_cache_temporal(THD *thd, longlong Item_cache_temporal::val_datetime_packed() { DBUG_ASSERT(fixed == 1); - DBUG_ASSERT(Item_cache_temporal::field_type() != MYSQL_TYPE_TIME); if ((!value_cached && !cache_value()) || null_value) { null_value= TRUE; From 7003067a09979429ce5b9456aa98283652714657 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Fri, 23 Mar 2018 13:27:33 +0530 Subject: [PATCH 123/139] - Fixing innodb.purge_secondary test case. --- mysql-test/suite/innodb/r/purge_secondary.result | 11 +++++------ mysql-test/suite/innodb/t/purge_secondary.test | 9 ++++----- 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/mysql-test/suite/innodb/r/purge_secondary.result b/mysql-test/suite/innodb/r/purge_secondary.result index 67f06194119..a83e2363a20 100644 --- a/mysql-test/suite/innodb/r/purge_secondary.result +++ b/mysql-test/suite/innodb/r/purge_secondary.result @@ -146,12 +146,7 @@ INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; -INSERT INTO t1 (a) SELECT NULL FROM t1; -INSERT INTO t1 (a) SELECT NULL FROM t1; -SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS -WHERE NAME="buffer_LRU_batch_evict_total_pages" AND COUNT > 0; -NAME SUBSYSTEM -buffer_LRU_batch_evict_total_pages buffer +UPDATE t1 SET c=true, l=ST_linefromtext('linestring(0 0,1 1,2 2)'); SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE NAME="buffer_LRU_batch_flush_total_pages" AND COUNT > 0; NAME SUBSYSTEM @@ -160,6 +155,10 @@ SELECT (variable_value > 0) FROM information_schema.global_status WHERE LOWER(variable_name) LIKE 'INNODB_BUFFER_POOL_PAGES_FLUSHED'; (variable_value > 0) 1 +SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS +WHERE NAME="buffer_LRU_batch_evict_total_pages" AND COUNT > 0; +NAME SUBSYSTEM +buffer_LRU_batch_evict_total_pages buffer # Note: The OTHER_INDEX_SIZE does not cover any SPATIAL INDEX. # To test that all indexes were emptied, replace DROP TABLE # with the following, and examine the root pages in t1.ibd: diff --git a/mysql-test/suite/innodb/t/purge_secondary.test b/mysql-test/suite/innodb/t/purge_secondary.test index ce2f48e0d40..973c7800162 100644 --- a/mysql-test/suite/innodb/t/purge_secondary.test +++ b/mysql-test/suite/innodb/t/purge_secondary.test @@ -129,11 +129,7 @@ INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; -INSERT INTO t1 (a) SELECT NULL FROM t1; -INSERT INTO t1 (a) SELECT NULL FROM t1; - -SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS -WHERE NAME="buffer_LRU_batch_evict_total_pages" AND COUNT > 0; +UPDATE t1 SET c=true, l=ST_linefromtext('linestring(0 0,1 1,2 2)'); SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE NAME="buffer_LRU_batch_flush_total_pages" AND COUNT > 0; @@ -141,6 +137,9 @@ WHERE NAME="buffer_LRU_batch_flush_total_pages" AND COUNT > 0; SELECT (variable_value > 0) FROM information_schema.global_status WHERE LOWER(variable_name) LIKE 'INNODB_BUFFER_POOL_PAGES_FLUSHED'; +SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS +WHERE NAME="buffer_LRU_batch_evict_total_pages" AND COUNT > 0; + --echo # Note: The OTHER_INDEX_SIZE does not cover any SPATIAL INDEX. --echo # To test that all indexes were emptied, replace DROP TABLE --echo # with the following, and examine the root pages in t1.ibd: From ca291015bcb900bcfe3fb1418ba6a345d3cea2fd Mon Sep 17 00:00:00 2001 From: Sergey Vojtovich Date: Tue, 20 Mar 2018 17:25:49 +0400 Subject: [PATCH 124/139] MDEV-10269 - Killed queries from I_S stay in 'Killed' state for long time and don't let server shut down Queries from I_S in "Filling schema table" state didn't check killed flag. For large tables this phase may take a while to complete. Fixed by adding thd->killed flag check for each processed row. --- sql/sql_show.cc | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 5a78a27a907..c152efdc082 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3483,6 +3483,13 @@ extern ST_SCHEMA_TABLE schema_tables[]; bool schema_table_store_record(THD *thd, TABLE *table) { int error; + + if (thd->killed) + { + thd->send_kill_message(); + return 1; + } + if ((error= table->file->ha_write_tmp_row(table->record[0]))) { TMP_TABLE_PARAM *param= table->pos_in_table_list->schema_table_param; From af86422f0881cbe6b21a62a3bb7bbd93f57599ed Mon Sep 17 00:00:00 2001 From: Vladislav Vaintroub Date: Wed, 21 Mar 2018 22:29:00 +0000 Subject: [PATCH 125/139] MDEV-13023 mariabackup does not preserve holes for page compressed tables. Changed "local" datasink logic to detect page compressed Innodb tables. Whenever such table is detected, holes in the copied files are created by skipping over binary zeros at the end of each compressed page. --- extra/mariabackup/CMakeLists.txt | 5 +- extra/mariabackup/datasink.c | 2 +- extra/mariabackup/datasink.h | 2 +- extra/mariabackup/ds_buffer.c | 4 +- extra/mariabackup/ds_compress.c | 4 +- extra/mariabackup/ds_local.c | 151 ------------------ extra/mariabackup/ds_local.cc | 259 +++++++++++++++++++++++++++++++ extra/mariabackup/ds_local.h | 8 +- extra/mariabackup/ds_stdout.c | 4 +- extra/mariabackup/ds_tmpfile.c | 4 +- extra/mariabackup/ds_xbstream.c | 4 +- 11 files changed, 281 insertions(+), 166 deletions(-) delete mode 100644 extra/mariabackup/ds_local.c create mode 100644 extra/mariabackup/ds_local.cc diff --git a/extra/mariabackup/CMakeLists.txt b/extra/mariabackup/CMakeLists.txt index ac15460660c..0e6336fe5e6 100644 --- a/extra/mariabackup/CMakeLists.txt +++ b/extra/mariabackup/CMakeLists.txt @@ -61,7 +61,7 @@ MYSQL_ADD_EXECUTABLE(mariabackup datasink.c ds_buffer.c ds_compress.c - ds_local.c + ds_local.cc ds_stdout.c ds_tmpfile.c ds_xbstream.c @@ -98,7 +98,7 @@ ENDIF() ######################################################################## MYSQL_ADD_EXECUTABLE(mbstream ds_buffer.c - ds_local.c + ds_local.cc ds_stdout.c datasink.c xbstream.c @@ -112,6 +112,7 @@ TARGET_LINK_LIBRARIES(mbstream mysys crc ) +ADD_DEPENDENCIES(mbstream GenError) IF(MSVC) SET_TARGET_PROPERTIES(mbstream PROPERTIES LINK_FLAGS setargv.obj) diff --git a/extra/mariabackup/datasink.c b/extra/mariabackup/datasink.c index 460e0e8ca19..1459da2fb57 100644 --- a/extra/mariabackup/datasink.c +++ b/extra/mariabackup/datasink.c @@ -108,7 +108,7 @@ Write to a datasink file. int ds_write(ds_file_t *file, const void *buf, size_t len) { - return file->datasink->write(file, buf, len); + return file->datasink->write(file, (const uchar *)buf, len); } /************************************************************************ diff --git a/extra/mariabackup/datasink.h b/extra/mariabackup/datasink.h index 8bf1321aad1..5962e9ba4b7 100644 --- a/extra/mariabackup/datasink.h +++ b/extra/mariabackup/datasink.h @@ -48,7 +48,7 @@ typedef struct { struct datasink_struct { ds_ctxt_t *(*init)(const char *root); ds_file_t *(*open)(ds_ctxt_t *ctxt, const char *path, MY_STAT *stat); - int (*write)(ds_file_t *file, const void *buf, size_t len); + int (*write)(ds_file_t *file, const unsigned char *buf, size_t len); int (*close)(ds_file_t *file); void (*deinit)(ds_ctxt_t *ctxt); }; diff --git a/extra/mariabackup/ds_buffer.c b/extra/mariabackup/ds_buffer.c index 4bb314c0f50..13c05f38918 100644 --- a/extra/mariabackup/ds_buffer.c +++ b/extra/mariabackup/ds_buffer.c @@ -45,7 +45,7 @@ typedef struct { static ds_ctxt_t *buffer_init(const char *root); static ds_file_t *buffer_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat); -static int buffer_write(ds_file_t *file, const void *buf, size_t len); +static int buffer_write(ds_file_t *file, const uchar *buf, size_t len); static int buffer_close(ds_file_t *file); static void buffer_deinit(ds_ctxt_t *ctxt); @@ -119,7 +119,7 @@ buffer_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat) } static int -buffer_write(ds_file_t *file, const void *buf, size_t len) +buffer_write(ds_file_t *file, const uchar *buf, size_t len) { ds_buffer_file_t *buffer_file; diff --git a/extra/mariabackup/ds_compress.c b/extra/mariabackup/ds_compress.c index 15801c8abd4..88f50857362 100644 --- a/extra/mariabackup/ds_compress.c +++ b/extra/mariabackup/ds_compress.c @@ -65,7 +65,7 @@ extern ulonglong xtrabackup_compress_chunk_size; static ds_ctxt_t *compress_init(const char *root); static ds_file_t *compress_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat); -static int compress_write(ds_file_t *file, const void *buf, size_t len); +static int compress_write(ds_file_t *file, const uchar *buf, size_t len); static int compress_close(ds_file_t *file); static void compress_deinit(ds_ctxt_t *ctxt); @@ -178,7 +178,7 @@ err: static int -compress_write(ds_file_t *file, const void *buf, size_t len) +compress_write(ds_file_t *file, const uchar *buf, size_t len) { ds_compress_file_t *comp_file; ds_compress_ctxt_t *comp_ctxt; diff --git a/extra/mariabackup/ds_local.c b/extra/mariabackup/ds_local.c deleted file mode 100644 index 3e2b1e0129b..00000000000 --- a/extra/mariabackup/ds_local.c +++ /dev/null @@ -1,151 +0,0 @@ -/****************************************************** -Copyright (c) 2011-2013 Percona LLC and/or its affiliates. - -Local datasink implementation for XtraBackup. - -This program is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; version 2 of the License. - -This program is distributed in the hope that it will be useful, -but WITHOUT ANY WARRANTY; without even the implied warranty of -MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -GNU General Public License for more details. - -You should have received a copy of the GNU General Public License -along with this program; if not, write to the Free Software -Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA - -*******************************************************/ - -#include -#include -#include -#include "common.h" -#include "datasink.h" - -typedef struct { - File fd; -} ds_local_file_t; - -static ds_ctxt_t *local_init(const char *root); -static ds_file_t *local_open(ds_ctxt_t *ctxt, const char *path, - MY_STAT *mystat); -static int local_write(ds_file_t *file, const void *buf, size_t len); -static int local_close(ds_file_t *file); -static void local_deinit(ds_ctxt_t *ctxt); - -datasink_t datasink_local = { - &local_init, - &local_open, - &local_write, - &local_close, - &local_deinit -}; - -static -ds_ctxt_t * -local_init(const char *root) -{ - ds_ctxt_t *ctxt; - - if (my_mkdir(root, 0777, MYF(0)) < 0 - && my_errno != EEXIST && my_errno != EISDIR) - { - char errbuf[MYSYS_STRERROR_SIZE]; - my_strerror(errbuf, sizeof(errbuf),my_errno); - my_error(EE_CANT_MKDIR, MYF(ME_BELL | ME_WAITTANG), - root, my_errno,errbuf, my_errno); - return NULL; - } - - ctxt = my_malloc(sizeof(ds_ctxt_t), MYF(MY_FAE)); - - ctxt->root = my_strdup(root, MYF(MY_FAE)); - - return ctxt; -} - -static -ds_file_t * -local_open(ds_ctxt_t *ctxt, const char *path, - MY_STAT *mystat __attribute__((unused))) -{ - char fullpath[FN_REFLEN]; - char dirpath[FN_REFLEN]; - size_t dirpath_len; - size_t path_len; - ds_local_file_t *local_file; - ds_file_t *file; - File fd; - - fn_format(fullpath, path, ctxt->root, "", MYF(MY_RELATIVE_PATH)); - - /* Create the directory if needed */ - dirname_part(dirpath, fullpath, &dirpath_len); - if (my_mkdir(dirpath, 0777, MYF(0)) < 0 && my_errno != EEXIST) { - char errbuf[MYSYS_STRERROR_SIZE]; - my_strerror(errbuf, sizeof(errbuf), my_errno); - my_error(EE_CANT_MKDIR, MYF(ME_BELL | ME_WAITTANG), - dirpath, my_errno, errbuf); - return NULL; - } - - fd = my_create(fullpath, 0, O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW, - MYF(MY_WME)); - if (fd < 0) { - return NULL; - } - - path_len = strlen(fullpath) + 1; /* terminating '\0' */ - - file = (ds_file_t *) my_malloc(sizeof(ds_file_t) + - sizeof(ds_local_file_t) + - path_len, - MYF(MY_FAE)); - local_file = (ds_local_file_t *) (file + 1); - - local_file->fd = fd; - - file->path = (char *) local_file + sizeof(ds_local_file_t); - memcpy(file->path, fullpath, path_len); - - file->ptr = local_file; - - return file; -} - -static -int -local_write(ds_file_t *file, const void *buf, size_t len) -{ - File fd = ((ds_local_file_t *) file->ptr)->fd; - - if (!my_write(fd, buf, len, MYF(MY_WME | MY_NABP))) { - posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED); - return 0; - } - - return 1; -} - -static -int -local_close(ds_file_t *file) -{ - File fd = ((ds_local_file_t *) file->ptr)->fd; - - my_free(file); - - my_sync(fd, MYF(MY_WME)); - - return my_close(fd, MYF(MY_WME)); -} - -static -void -local_deinit(ds_ctxt_t *ctxt) -{ - my_free(ctxt->root); - my_free(ctxt); -} diff --git a/extra/mariabackup/ds_local.cc b/extra/mariabackup/ds_local.cc new file mode 100644 index 00000000000..f1068d251dc --- /dev/null +++ b/extra/mariabackup/ds_local.cc @@ -0,0 +1,259 @@ +/****************************************************** +Copyright (c) 2011-2013 Percona LLC and/or its affiliates. + +Local datasink implementation for XtraBackup. + +This program is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; version 2 of the License. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program; if not, write to the Free Software +Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA + +*******************************************************/ + +#include +#include +#include +#include +#include "common.h" +#include "datasink.h" +#include "univ.i" +#include "fsp0fsp.h" +#ifdef _WIN32 +#include +#endif + +typedef struct { + File fd; + my_bool init_ibd_done; + my_bool is_ibd; + my_bool compressed; + size_t pagesize; +} ds_local_file_t; + +static ds_ctxt_t *local_init(const char *root); +static ds_file_t *local_open(ds_ctxt_t *ctxt, const char *path, + MY_STAT *mystat); +static int local_write(ds_file_t *file, const uchar *buf, size_t len); +static int local_close(ds_file_t *file); +static void local_deinit(ds_ctxt_t *ctxt); + +extern "C" { +datasink_t datasink_local = { + &local_init, + &local_open, + &local_write, + &local_close, + &local_deinit +}; +} + +static +ds_ctxt_t * +local_init(const char *root) +{ + ds_ctxt_t *ctxt; + + if (my_mkdir(root, 0777, MYF(0)) < 0 + && my_errno != EEXIST && my_errno != EISDIR) + { + char errbuf[MYSYS_STRERROR_SIZE]; + my_strerror(errbuf, sizeof(errbuf),my_errno); + my_error(EE_CANT_MKDIR, MYF(ME_BELL | ME_WAITTANG), + root, my_errno,errbuf, my_errno); + return NULL; + } + + ctxt = (ds_ctxt_t *)my_malloc(sizeof(ds_ctxt_t), MYF(MY_FAE)); + + ctxt->root = my_strdup(root, MYF(MY_FAE)); + + return ctxt; +} + +static +ds_file_t * +local_open(ds_ctxt_t *ctxt, const char *path, + MY_STAT *mystat __attribute__((unused))) +{ + char fullpath[FN_REFLEN]; + char dirpath[FN_REFLEN]; + size_t dirpath_len; + size_t path_len; + ds_local_file_t *local_file; + ds_file_t *file; + File fd; + + fn_format(fullpath, path, ctxt->root, "", MYF(MY_RELATIVE_PATH)); + + /* Create the directory if needed */ + dirname_part(dirpath, fullpath, &dirpath_len); + if (my_mkdir(dirpath, 0777, MYF(0)) < 0 && my_errno != EEXIST) { + char errbuf[MYSYS_STRERROR_SIZE]; + my_strerror(errbuf, sizeof(errbuf), my_errno); + my_error(EE_CANT_MKDIR, MYF(ME_BELL | ME_WAITTANG), + dirpath, my_errno, errbuf); + return NULL; + } + + fd = my_create(fullpath, 0, O_WRONLY | O_BINARY | O_EXCL | O_NOFOLLOW, + MYF(MY_WME)); + if (fd < 0) { + return NULL; + } + + path_len = strlen(fullpath) + 1; /* terminating '\0' */ + + file = (ds_file_t *) my_malloc(sizeof(ds_file_t) + + sizeof(ds_local_file_t) + + path_len, + MYF(MY_FAE)); + local_file = (ds_local_file_t *) (file + 1); + + local_file->fd = fd; + local_file->init_ibd_done = 0; + local_file->is_ibd = (path_len > 5) && !strcmp(fullpath + path_len - 5, ".ibd"); + local_file->compressed = 0; + local_file->pagesize = 0; + file->path = (char *) local_file + sizeof(ds_local_file_t); + memcpy(file->path, fullpath, path_len); + + file->ptr = local_file; + + return file; +} + +/* Calculate size of data without trailing zero bytes. */ +static size_t trim_binary_zeros(uchar *buf, size_t pagesize) +{ + size_t i; + for (i = pagesize; (i > 0) && (buf[i - 1] == 0); i--) {}; + return i; +} + + +/* Write data to the output file, and punch "holes" if needed. */ +static int write_compressed(File fd, uchar *data, size_t len, size_t pagesize) +{ + uchar *ptr = data; + for (size_t written= 0; written < len;) + { + size_t n_bytes = MY_MIN(pagesize, len - written); + size_t datasize= trim_binary_zeros(ptr,n_bytes); + if (datasize > 0) { + if (!my_write(fd, ptr, datasize, MYF(MY_WME | MY_NABP))) + posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED); + else + return 1; + } + if (datasize < n_bytes) { + /* This punches a "hole" in the file. */ + size_t hole_bytes = n_bytes - datasize; + if (my_seek(fd, hole_bytes, MY_SEEK_CUR, MYF(MY_WME | MY_NABP)) + == MY_FILEPOS_ERROR) + return 1; + } + written += n_bytes; + ptr += n_bytes; + } + return 0; +} + + +/* Calculate Innodb tablespace specific data, when first page is written. + We're interested in page compression and page size. +*/ +static void init_ibd_data(ds_local_file_t *local_file, const uchar *buf, size_t len) +{ + if (len < FIL_PAGE_DATA + FSP_SPACE_FLAGS) { + /* Weird, bail out.*/ + return; + } + + ulint flags = mach_read_from_4(&buf[FIL_PAGE_DATA + FSP_SPACE_FLAGS]); + ulint ssize = FSP_FLAGS_GET_PAGE_SSIZE(flags); + local_file->pagesize= ssize == 0 ? UNIV_PAGE_SIZE_ORIG : ((UNIV_ZIP_SIZE_MIN >> 1) << ssize); + local_file->compressed = (my_bool)FSP_FLAGS_HAS_PAGE_COMPRESSION(flags); + +#if defined(_WIN32) && (MYSQL_VERSION_ID > 100200) + /* Make compressed file sparse, on Windows. + In 10.1, we do not use sparse files. */ + if (local_file->compressed) { + HANDLE handle= my_get_osfhandle(local_file->fd); + if (!DeviceIoControl(handle, FSCTL_SET_SPARSE, NULL, 0, NULL, 0, NULL, 0)) { + fprintf(stderr, "Warning: cannot make file sparse"); + local_file->compressed = 0; + } + } +#endif +} + + +static +int +local_write(ds_file_t *file, const uchar *buf, size_t len) +{ + uchar *b = (uchar*)buf; + ds_local_file_t *local_file= (ds_local_file_t *)file->ptr; + File fd = local_file->fd; + + if (local_file->is_ibd && !local_file->init_ibd_done) { + init_ibd_data(local_file, b , len); + local_file->init_ibd_done= 1; + } + + if (local_file->compressed) { + return write_compressed(fd, b, len, local_file->pagesize); + } + + if (!my_write(fd, b , len, MYF(MY_WME | MY_NABP))) { + posix_fadvise(fd, 0, 0, POSIX_FADV_DONTNEED); + return 0; + } + return 1; +} + +/* Set EOF at file's current position.*/ +static int set_eof(File fd) +{ +#ifdef _WIN32 + return !SetEndOfFile(my_get_osfhandle(fd)); +#elif defined(HAVE_FTRUNCATE) + return ftruncate(fd, my_tell(fd, MYF(MY_WME))); +#else +#error no ftruncate +#endif +} + + +static +int +local_close(ds_file_t *file) +{ + ds_local_file_t *local_file= (ds_local_file_t *)file->ptr; + File fd = local_file->fd; + int ret= 0; + + if (local_file->compressed) { + ret = set_eof(fd); + } + + my_close(fd, MYF(MY_WME)); + my_free(file); + return ret; +} + +static +void +local_deinit(ds_ctxt_t *ctxt) +{ + my_free(ctxt->root); + my_free(ctxt); +} diff --git a/extra/mariabackup/ds_local.h b/extra/mariabackup/ds_local.h index b0f0f04030c..e30906b575d 100644 --- a/extra/mariabackup/ds_local.h +++ b/extra/mariabackup/ds_local.h @@ -23,6 +23,12 @@ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA #include "datasink.h" -extern datasink_t datasink_local; +#ifdef __cplusplus +extern "C" +#else +extern +#endif + +datasink_t datasink_local; #endif diff --git a/extra/mariabackup/ds_stdout.c b/extra/mariabackup/ds_stdout.c index 91a514ddf64..391a3455195 100644 --- a/extra/mariabackup/ds_stdout.c +++ b/extra/mariabackup/ds_stdout.c @@ -30,7 +30,7 @@ typedef struct { static ds_ctxt_t *stdout_init(const char *root); static ds_file_t *stdout_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat); -static int stdout_write(ds_file_t *file, const void *buf, size_t len); +static int stdout_write(ds_file_t *file, const uchar *buf, size_t len); static int stdout_close(ds_file_t *file); static void stdout_deinit(ds_ctxt_t *ctxt); @@ -91,7 +91,7 @@ stdout_open(ds_ctxt_t *ctxt __attribute__((unused)), static int -stdout_write(ds_file_t *file, const void *buf, size_t len) +stdout_write(ds_file_t *file, const uchar *buf, size_t len) { File fd = ((ds_stdout_file_t *) file->ptr)->fd; diff --git a/extra/mariabackup/ds_tmpfile.c b/extra/mariabackup/ds_tmpfile.c index b039d83ba03..27a8d9688f4 100644 --- a/extra/mariabackup/ds_tmpfile.c +++ b/extra/mariabackup/ds_tmpfile.c @@ -41,7 +41,7 @@ typedef struct { static ds_ctxt_t *tmpfile_init(const char *root); static ds_file_t *tmpfile_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat); -static int tmpfile_write(ds_file_t *file, const void *buf, size_t len); +static int tmpfile_write(ds_file_t *file, const uchar *buf, size_t len); static int tmpfile_close(ds_file_t *file); static void tmpfile_deinit(ds_ctxt_t *ctxt); @@ -143,7 +143,7 @@ tmpfile_open(ds_ctxt_t *ctxt, const char *path, } static int -tmpfile_write(ds_file_t *file, const void *buf, size_t len) +tmpfile_write(ds_file_t *file, const uchar *buf, size_t len) { File fd = ((ds_tmp_file_t *) file->ptr)->fd; diff --git a/extra/mariabackup/ds_xbstream.c b/extra/mariabackup/ds_xbstream.c index 42924a72d7f..544929fb24c 100644 --- a/extra/mariabackup/ds_xbstream.c +++ b/extra/mariabackup/ds_xbstream.c @@ -41,7 +41,7 @@ General streaming interface */ static ds_ctxt_t *xbstream_init(const char *root); static ds_file_t *xbstream_open(ds_ctxt_t *ctxt, const char *path, MY_STAT *mystat); -static int xbstream_write(ds_file_t *file, const void *buf, size_t len); +static int xbstream_write(ds_file_t *file, const uchar *buf, size_t len); static int xbstream_close(ds_file_t *file); static void xbstream_deinit(ds_ctxt_t *ctxt); @@ -166,7 +166,7 @@ err: static int -xbstream_write(ds_file_t *file, const void *buf, size_t len) +xbstream_write(ds_file_t *file, const uchar *buf, size_t len) { ds_stream_file_t *stream_file; xb_wstream_file_t *xbstream_file; From d3681c18f96ebf9b66c7541459b2eb34a4827734 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 14 Nov 2017 07:22:25 +0800 Subject: [PATCH 126/139] followup for 89b0d5cb6e3, backport 8c422bf48d7 --- scripts/wsrep_sst_common.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/wsrep_sst_common.sh b/scripts/wsrep_sst_common.sh index db0543a4d0f..47d9021fe55 100644 --- a/scripts/wsrep_sst_common.sh +++ b/scripts/wsrep_sst_common.sh @@ -126,9 +126,9 @@ done readonly WSREP_SST_OPT_BYPASS readonly WSREP_SST_OPT_BINLOG -if [ -n "${WSREP_SST_OPT_ADDR_PORT:-}" ]; then +if [ -n "${WSREP_SST_OPT_ADDR:-}" ]; then if [ -n "${WSREP_SST_OPT_PORT:-}" ]; then - if [ "$WSREP_SST_OPT_PORT" != "$WSREP_SST_OPT_ADDR_PORT" ]; then + if [ -n "$WSREP_SST_OPT_ADDR_PORT" -a "$WSREP_SST_OPT_PORT" != "$WSREP_SST_OPT_ADDR_PORT" ]; then wsrep_log_error "port in --port=$WSREP_SST_OPT_PORT differs from port in --address=$WSREP_SST_OPT_ADDR" exit 2 fi From 3b644ac1f71f2259f66f53a19c78eed90f7a692c Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Sat, 24 Mar 2018 00:30:28 +0400 Subject: [PATCH 127/139] MDEV-14533 Provide information_schema tables using which hardware information can be obtained. disks.test moved to plugin's directory. --- .../r => plugin/disks/mysql-test/disks}/disks.result | 0 .../t => plugin/disks/mysql-test/disks}/disks.test | 0 plugin/disks/mysql-test/disks/suite.opt | 1 + plugin/disks/mysql-test/disks/suite.pm | 10 ++++++++++ 4 files changed, 11 insertions(+) rename {mysql-test/suite/plugins/r => plugin/disks/mysql-test/disks}/disks.result (100%) rename {mysql-test/suite/plugins/t => plugin/disks/mysql-test/disks}/disks.test (100%) create mode 100644 plugin/disks/mysql-test/disks/suite.opt create mode 100644 plugin/disks/mysql-test/disks/suite.pm diff --git a/mysql-test/suite/plugins/r/disks.result b/plugin/disks/mysql-test/disks/disks.result similarity index 100% rename from mysql-test/suite/plugins/r/disks.result rename to plugin/disks/mysql-test/disks/disks.result diff --git a/mysql-test/suite/plugins/t/disks.test b/plugin/disks/mysql-test/disks/disks.test similarity index 100% rename from mysql-test/suite/plugins/t/disks.test rename to plugin/disks/mysql-test/disks/disks.test diff --git a/plugin/disks/mysql-test/disks/suite.opt b/plugin/disks/mysql-test/disks/suite.opt new file mode 100644 index 00000000000..afbbe2b0163 --- /dev/null +++ b/plugin/disks/mysql-test/disks/suite.opt @@ -0,0 +1 @@ +--plugin-load-add=$DISKS_SO diff --git a/plugin/disks/mysql-test/disks/suite.pm b/plugin/disks/mysql-test/disks/suite.pm new file mode 100644 index 00000000000..c64ef3b3133 --- /dev/null +++ b/plugin/disks/mysql-test/disks/suite.pm @@ -0,0 +1,10 @@ +package My::Suite::Disks; + +@ISA = qw(My::Suite); + +return "No Disks plugin" unless $ENV{DISKS_SO}; + +sub is_default { 1 } + +bless { }; + From 0b74a1fa64fdb21a43c8880274d1639fb9ef816e Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Sat, 24 Mar 2018 00:37:38 +0400 Subject: [PATCH 128/139] MDEV-14533 Provide information_schema tables using which hardware information can be obtained. plugin only enabled for Linux, as it fails building on BSD/MacOSX. disks.test fixed. --- plugin/disks/CMakeLists.txt | 2 +- plugin/disks/mysql-test/disks/disks.result | 2 -- plugin/disks/mysql-test/disks/disks.test | 9 --------- 3 files changed, 1 insertion(+), 12 deletions(-) diff --git a/plugin/disks/CMakeLists.txt b/plugin/disks/CMakeLists.txt index a0ed929c62c..446c64d0fdd 100644 --- a/plugin/disks/CMakeLists.txt +++ b/plugin/disks/CMakeLists.txt @@ -1,4 +1,4 @@ -IF(NOT WIN32) +IF("${CMAKE_SYSTEM}" MATCHES "Linux") INCLUDE_DIRECTORIES(${CMAKE_SOURCE_DIR}/sql) MYSQL_ADD_PLUGIN(DISKS information_schema_disks.cc MODULE_ONLY RECOMPILE_FOR_EMBEDDED) ENDIF() diff --git a/plugin/disks/mysql-test/disks/disks.result b/plugin/disks/mysql-test/disks/disks.result index 53e73ec6f66..bd6befc5e11 100644 --- a/plugin/disks/mysql-test/disks/disks.result +++ b/plugin/disks/mysql-test/disks/disks.result @@ -1,4 +1,3 @@ -install plugin DISKS soname 'disks'; show create table information_schema.disks; Table Create Table DISKS CREATE TEMPORARY TABLE `DISKS` ( @@ -11,4 +10,3 @@ DISKS CREATE TEMPORARY TABLE `DISKS` ( select sum(Total) > sum(Available), sum(Total)>sum(Used) from information_schema.disks; sum(Total) > sum(Available) sum(Total)>sum(Used) 1 1 -uninstall plugin DISKS; diff --git a/plugin/disks/mysql-test/disks/disks.test b/plugin/disks/mysql-test/disks/disks.test index a2371b97584..13a0762ae01 100644 --- a/plugin/disks/mysql-test/disks/disks.test +++ b/plugin/disks/mysql-test/disks/disks.test @@ -1,11 +1,2 @@ ---source include/not_windows.inc - -if (!$DISKS_SO) { - skip No DISKS plugin; -} - -install plugin DISKS soname 'disks'; show create table information_schema.disks; select sum(Total) > sum(Available), sum(Total)>sum(Used) from information_schema.disks; - -uninstall plugin DISKS; From 5fdbc3f66b68c049546afdec94aa469c6266beb2 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 24 Mar 2018 13:50:52 +0100 Subject: [PATCH 129/139] compiler warning extra/mariabackup/ds_buffer.c:145:9: warning: pointer targets in assignment differ in signedness [-Wpointer-sign] --- extra/mariabackup/ds_buffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extra/mariabackup/ds_buffer.c b/extra/mariabackup/ds_buffer.c index 13c05f38918..8e13e878953 100644 --- a/extra/mariabackup/ds_buffer.c +++ b/extra/mariabackup/ds_buffer.c @@ -142,7 +142,7 @@ buffer_write(ds_file_t *file, const uchar *buf, size_t len) buffer_file->pos = 0; - buf = (const char *) buf + bytes; + buf += bytes; len -= bytes; } else { /* We don't have any buffered bytes, just write From 7454d5f95276e84a42cdb08c1d2c23aa578f63bd Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 24 Mar 2018 12:34:14 +0100 Subject: [PATCH 130/139] save/restore auto_inc settings in galera_sst_mysqldump test and remove redundant have_innodb.inc, it's included in galera_cluster.inc anyway. --- mysql-test/suite/galera/t/galera_sst_mysqldump.test | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mysql-test/suite/galera/t/galera_sst_mysqldump.test b/mysql-test/suite/galera/t/galera_sst_mysqldump.test index ce112c57745..390e9815b20 100644 --- a/mysql-test/suite/galera/t/galera_sst_mysqldump.test +++ b/mysql-test/suite/galera/t/galera_sst_mysqldump.test @@ -1,8 +1,11 @@ --source include/galera_cluster.inc ---source include/have_innodb.inc --source suite/galera/include/galera_sst_set_mysqldump.inc +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc + --source suite/galera/include/galera_st_disconnect_slave.inc # We set the required mysqldump SST options here so that they are used every time the server is restarted during the test @@ -14,4 +17,5 @@ --source suite/galera/include/galera_st_kill_slave.inc --source suite/galera/include/galera_st_kill_slave_ddl.inc +--source include/auto_increment_offset_restore.inc --source suite/galera/include/galera_sst_restore.inc From 15795b9f9a850d9587f17ef18e1ac7b5af31ec1e Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 24 Mar 2018 14:24:20 +0100 Subject: [PATCH 131/139] save/restore auto_inc settings in galera_sst_rsync test and remove redundant have_innodb.inc, it's included in galera_cluster.inc anyway. --- mysql-test/suite/galera/t/galera_sst_rsync.test | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/mysql-test/suite/galera/t/galera_sst_rsync.test b/mysql-test/suite/galera/t/galera_sst_rsync.test index d8fa3efb5d7..f796356cac7 100644 --- a/mysql-test/suite/galera/t/galera_sst_rsync.test +++ b/mysql-test/suite/galera/t/galera_sst_rsync.test @@ -1,8 +1,12 @@ --source include/galera_cluster.inc ---source include/have_innodb.inc + +--let $node_1=node_1 +--let $node_2=node_2 +--source include/auto_increment_offset_save.inc --source suite/galera/include/galera_st_shutdown_slave.inc --source suite/galera/include/galera_st_clean_slave.inc --source suite/galera/include/galera_st_kill_slave.inc --source suite/galera/include/galera_st_kill_slave_ddl.inc +--source include/auto_increment_offset_restore.inc From d702e463902bca7d94ff8a1a49468a5b7bdb4ba0 Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Sun, 25 Mar 2018 00:15:11 +0400 Subject: [PATCH 132/139] MDEV-15561 json_extract returns NULL with numbers in scientific notation. Scientific notation handling fixed. --- mysql-test/r/func_json.result | 3 ++ mysql-test/suite/json/r/json_no_table.result | 40 +++++--------------- mysql-test/t/func_json.test | 6 +++ strings/json_lib.c | 4 +- 4 files changed, 21 insertions(+), 32 deletions(-) diff --git a/mysql-test/r/func_json.result b/mysql-test/r/func_json.result index b60f6e970e1..2ffeb83b0de 100644 --- a/mysql-test/r/func_json.result +++ b/mysql-test/r/func_json.result @@ -736,3 +736,6 @@ insert into t1 values (2),(1); select 1 from t1 where json_extract(a,'$','$[81]'); 1 drop table t1; +select json_extract('{"test":8.437e-5}','$.test'); +json_extract('{"test":8.437e-5}','$.test') +8.437e-5 diff --git a/mysql-test/suite/json/r/json_no_table.result b/mysql-test/suite/json/r/json_no_table.result index 034a4e6c0a6..41150032e51 100644 --- a/mysql-test/suite/json/r/json_no_table.result +++ b/mysql-test/suite/json/r/json_no_table.result @@ -1061,9 +1061,7 @@ json_type(json_compact(3.14)) DOUBLE select json_type(json_compact(3.14E30)); json_type(json_compact(3.14E30)) -NULL -Warnings: -Warning 4038 Syntax error in JSON text in argument 1 to function 'json_type' at position 7 +DOUBLE select json_type(json_compact(cast('10101abcde' as binary))); json_type(json_compact(cast('10101abcde' as binary))) INTEGER @@ -3445,52 +3443,34 @@ JSON_ARRAY(CASE WHEN 1 THEN NULL ELSE NULL END) # SELECT JSON_EXTRACT('-1E-36181012216111515851075235238', '$'); JSON_EXTRACT('-1E-36181012216111515851075235238', '$') -NULL -Warnings: -Warning 4038 Syntax error in JSON text in argument 1 to function 'json_extract' at position 33 +-1E-36181012216111515851075235238 SELECT JSON_EXTRACT('1E-36181012216111515851075235238', '$'); JSON_EXTRACT('1E-36181012216111515851075235238', '$') -NULL -Warnings: -Warning 4038 Syntax error in JSON text in argument 1 to function 'json_extract' at position 32 +1E-36181012216111515851075235238 SELECT JSON_EXTRACT('1E-325', '$'); JSON_EXTRACT('1E-325', '$') -NULL -Warnings: -Warning 4038 Syntax error in JSON text in argument 1 to function 'json_extract' at position 6 +1E-325 SELECT JSON_EXTRACT('1E-324', '$'); JSON_EXTRACT('1E-324', '$') -NULL -Warnings: -Warning 4038 Syntax error in JSON text in argument 1 to function 'json_extract' at position 6 +1E-324 SELECT JSON_EXTRACT('1E-323', '$'); JSON_EXTRACT('1E-323', '$') -NULL -Warnings: -Warning 4038 Syntax error in JSON text in argument 1 to function 'json_extract' at position 6 +1E-323 SELECT JSON_EXTRACT('1E+308', '$'); JSON_EXTRACT('1E+308', '$') -NULL -Warnings: -Warning 4038 Syntax error in JSON text in argument 1 to function 'json_extract' at position 6 +1E+308 error ER_INVALID_JSON_TEXT_IN_PARAM SELECT JSON_EXTRACT('1E+309', '$'); JSON_EXTRACT('1E+309', '$') -NULL -Warnings: -Warning 4038 Syntax error in JSON text in argument 1 to function 'json_extract' at position 6 +1E+309 error ER_INVALID_JSON_TEXT_IN_PARAM SELECT JSON_EXTRACT('1E+36181012216111515851075235238', '$'); JSON_EXTRACT('1E+36181012216111515851075235238', '$') -NULL -Warnings: -Warning 4038 Syntax error in JSON text in argument 1 to function 'json_extract' at position 32 +1E+36181012216111515851075235238 error ER_INVALID_JSON_TEXT_IN_PARAM SELECT JSON_EXTRACT('-1E+36181012216111515851075235238', '$'); JSON_EXTRACT('-1E+36181012216111515851075235238', '$') -NULL -Warnings: -Warning 4038 Syntax error in JSON text in argument 1 to function 'json_extract' at position 33 +-1E+36181012216111515851075235238 # # Bug#21383284: ASSERTION IN SELECT_LEX::SETUP_CONDS # diff --git a/mysql-test/t/func_json.test b/mysql-test/t/func_json.test index 0b3cb938098..a6ae934f7b4 100644 --- a/mysql-test/t/func_json.test +++ b/mysql-test/t/func_json.test @@ -392,3 +392,9 @@ insert into t1 values (2),(1); select 1 from t1 where json_extract(a,'$','$[81]'); drop table t1; +# +# MDEV-15561 json_extract returns NULL with numbers in scientific notation. +# + +select json_extract('{"test":8.437e-5}','$.test'); + diff --git a/strings/json_lib.c b/strings/json_lib.c index 413ce128149..cbf672f5887 100644 --- a/strings/json_lib.c +++ b/strings/json_lib.c @@ -475,8 +475,8 @@ static int json_num_states[NS_NUM_STATES][N_NUM_CLASSES]= /*ZE1*/ { JE_SYN, JE_SYN, JE_SYN, JE_SYN, NS_FRAC, JE_SYN, NS_OK, JE_BAD_CHR }, /*INT*/ { JE_SYN, JE_SYN, NS_INT, NS_INT, NS_FRAC, NS_EX, NS_OK, JE_BAD_CHR }, /*FRAC*/ { JE_SYN, JE_SYN, NS_FRAC, NS_FRAC,JE_SYN, NS_EX, NS_OK, JE_BAD_CHR }, -/*EX*/ { NS_EX1, NS_EX1, NS_EX1, NS_EX1, JE_SYN, JE_SYN, JE_SYN, JE_BAD_CHR }, -/*EX1*/ { JE_SYN, JE_SYN, NS_EX1, NS_EX1, JE_SYN, JE_SYN, JE_SYN, JE_BAD_CHR } +/*EX*/ { NS_EX, NS_EX, NS_EX1, NS_EX1, JE_SYN, JE_SYN, JE_SYN, JE_BAD_CHR }, +/*EX1*/ { JE_SYN, JE_SYN, NS_EX1, NS_EX1, JE_SYN, JE_SYN, NS_OK, JE_BAD_CHR } }; From e27535093d1670fde6034c92366e8128b683b903 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Mon, 26 Mar 2018 15:48:27 +0530 Subject: [PATCH 133/139] - Follow-up fix to MDEV-15229 --- mysql-test/suite/encryption/r/innodb-discard-import.result | 1 + mysql-test/suite/encryption/t/innodb-discard-import.test | 3 +-- storage/innobase/buf/buf0buf.cc | 2 ++ 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/encryption/r/innodb-discard-import.result b/mysql-test/suite/encryption/r/innodb-discard-import.result index 69641be92f1..3d07706ef09 100644 --- a/mysql-test/suite/encryption/r/innodb-discard-import.result +++ b/mysql-test/suite/encryption/r/innodb-discard-import.result @@ -130,5 +130,6 @@ NOT FOUND /barfoo/ in t2.ibd NOT FOUND /tmpres/ in t3.ibd # t4 yes on expecting NOT FOUND # MDEV-15527 FIXME: Enable this test! +NOT FOUND /mysql/ in t4.ibd DROP PROCEDURE innodb_insert_proc; DROP TABLE t1,t2,t3,t4; diff --git a/mysql-test/suite/encryption/t/innodb-discard-import.test b/mysql-test/suite/encryption/t/innodb-discard-import.test index 9e78df813f9..e105cf82b67 100644 --- a/mysql-test/suite/encryption/t/innodb-discard-import.test +++ b/mysql-test/suite/encryption/t/innodb-discard-import.test @@ -111,8 +111,7 @@ SELECT COUNT(*) FROM t4; --let SEARCH_PATTERN=mysql --echo # t4 yes on expecting NOT FOUND -- let SEARCH_FILE=$t4_IBD ---echo # MDEV-15527 FIXME: Enable this test! -#-- source include/search_pattern_in_file.inc +-- source include/search_pattern_in_file.inc DROP PROCEDURE innodb_insert_proc; DROP TABLE t1,t2,t3,t4; diff --git a/storage/innobase/buf/buf0buf.cc b/storage/innobase/buf/buf0buf.cc index 1326ec2ff71..88705d85f1a 100644 --- a/storage/innobase/buf/buf0buf.cc +++ b/storage/innobase/buf/buf0buf.cc @@ -7402,6 +7402,8 @@ buf_page_encrypt_before_write( bpage->real_size = out_len; + /* Workaround for MDEV-15527. */ + memset(tmp + out_len, 0 , srv_page_size - out_len); #ifdef UNIV_DEBUG fil_page_type_validate(tmp); #endif From c813d9485a3d0c95c4c3085335b5e7d1031f81f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 26 Mar 2018 13:37:45 +0300 Subject: [PATCH 134/139] Fix result after commit e27535093d1670fde6034c92366e8128b683b903 --- mysql-test/suite/encryption/r/innodb-discard-import.result | 1 - 1 file changed, 1 deletion(-) diff --git a/mysql-test/suite/encryption/r/innodb-discard-import.result b/mysql-test/suite/encryption/r/innodb-discard-import.result index 3d07706ef09..91314a77177 100644 --- a/mysql-test/suite/encryption/r/innodb-discard-import.result +++ b/mysql-test/suite/encryption/r/innodb-discard-import.result @@ -129,7 +129,6 @@ NOT FOUND /barfoo/ in t2.ibd # t3 yes on expecting NOT FOUND NOT FOUND /tmpres/ in t3.ibd # t4 yes on expecting NOT FOUND -# MDEV-15527 FIXME: Enable this test! NOT FOUND /mysql/ in t4.ibd DROP PROCEDURE innodb_insert_proc; DROP TABLE t1,t2,t3,t4; From dcb59373d54d2889204b859a2e50dc286b1441a2 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Mon, 26 Mar 2018 17:36:04 +0530 Subject: [PATCH 135/139] - Fixing innodb.purge_secondary test case failure --- mysql-test/suite/innodb/r/purge_secondary.result | 3 ++- mysql-test/suite/innodb/t/purge_secondary.test | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/mysql-test/suite/innodb/r/purge_secondary.result b/mysql-test/suite/innodb/r/purge_secondary.result index a83e2363a20..8f20f5baacb 100644 --- a/mysql-test/suite/innodb/r/purge_secondary.result +++ b/mysql-test/suite/innodb/r/purge_secondary.result @@ -146,7 +146,8 @@ INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; -UPDATE t1 SET c=true, l=ST_linefromtext('linestring(0 0,1 1,2 2)'); +ALTER TABLE t1 FORCE, ALGORITHM=INPLACE; +ALTER TABLE t1 FORCE, ALGORITHM=INPLACE; SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE NAME="buffer_LRU_batch_flush_total_pages" AND COUNT > 0; NAME SUBSYSTEM diff --git a/mysql-test/suite/innodb/t/purge_secondary.test b/mysql-test/suite/innodb/t/purge_secondary.test index 973c7800162..bf702b6b737 100644 --- a/mysql-test/suite/innodb/t/purge_secondary.test +++ b/mysql-test/suite/innodb/t/purge_secondary.test @@ -129,7 +129,8 @@ INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; INSERT INTO t1 (a) SELECT NULL FROM t1; -UPDATE t1 SET c=true, l=ST_linefromtext('linestring(0 0,1 1,2 2)'); +ALTER TABLE t1 FORCE, ALGORITHM=INPLACE; +ALTER TABLE t1 FORCE, ALGORITHM=INPLACE; SELECT NAME, SUBSYSTEM FROM INFORMATION_SCHEMA.INNODB_METRICS WHERE NAME="buffer_LRU_batch_flush_total_pages" AND COUNT > 0; From b3cdafcb93da2e57d49f26f0846dc957458ee72c Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Mon, 26 Mar 2018 19:36:39 +0300 Subject: [PATCH 136/139] Updated list of unstable tests for 10.2.14 --- mysql-test/unstable-tests | 160 ++++++++++++++++++++------------------ 1 file changed, 86 insertions(+), 74 deletions(-) diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests index 0c564f54678..0bae37d2540 100644 --- a/mysql-test/unstable-tests +++ b/mysql-test/unstable-tests @@ -23,45 +23,40 @@ # ############################################################################## -# Based on 10.2 49bcc82686c9c305d376183ba4f7bafcbab96bc3 +# Based on 10.2 dcb59373d54d2889204b859a2e50dc286b1441a2 main.alter_table : Modified in 10.2.13 main.analyze_stmt_slow_query_log : MDEV-12237 - Wrong result main.auth_named_pipe : MDEV-14724 - System error 2 +main.case : Modified in 10.2.14 +main.check_constraint : Modified in 10.2.14 main.connect2 : MDEV-13885 - Server crash main.create : Modified in 10.2.13 -main.create_or_replace : Modified in 10.2.12 -main.cte_nonrecursive : Modified in 10.2.13 +main.cte_nonrecursive : Modified in 10.2.14 main.cte_recursive : Modified in 10.2.13 -main.ctype_latin1 : Modified in 10.2.12 -main.ctype_like_range : Modified in 10.2.12 -main.ctype_ucs2_uca : Modified in 10.2.12 -main.ctype_utf16_uca : Modified in 10.2.12 -main.ctype_utf32_uca : Modified in 10.2.12 -main.ctype_utf8 : Modified in 10.2.12 -main.ctype_utf8_uca : Modified in 10.2.12 -main.ctype_utf8mb4 : Modified in 10.2.12 -main.ctype_utf8mb4_uca : Modified in 10.2.12 +main.ctype_latin1 : Modified in 10.2.14 +main.ctype_utf8 : Modified in 10.2.14 main.derived : Modified in 10.2.13 main.derived_cond_pushdown : Modified in 10.2.13 main.distinct : MDEV-14194 - Crash +main.drop_bad_db_type : MDEV-15676 - Wrong result main.dyncol : Modified in 10.2.13 main.events_2 : MDEV-13277 - Crash main.events_slowlog : MDEV-12821 - Wrong result +main.fast_prefix_index_fetch_innodb : Modified in 10.2.14 main.fulltext : Modified in 10.2.13 main.func_concat : Modified in 10.2.13 +main.func_date_add : Modified in 10.2.14 main.func_isnull : Modified in 10.2.13 -main.func_json : Modified in 10.2.12 -main.func_misc : Modified in 10.2.12 -main.func_set : Modified in 10.2.12 -main.func_str : Modified in 10.2.12 +main.func_json : Modified in 10.2.14 +main.func_str : Modified in 10.2.13 +main.func_time : Modified in 10.2.14 main.gis-rtree : Modified in 10.2.13 -main.group_by : Modified in 10.2.12 -main.having : Modified in 10.2.12 +main.having : Modified in 10.2.14 main.index_merge_innodb : MDEV-7142 - Plan mismatch main.innodb_mysql_lock : MDEV-7861 - Wrong result main.join_cache : Modified in 10.2.13 -main.join_outer : Modified in 10.2.12 +main.join_outer : Modified in 10.2.14 main.kill-2 : MDEV-13257 - Wrong result main.kill_processlist-6619 : MDEV-10793 - Wrong result main.log_slow : MDEV-13263 - Wrong result @@ -81,15 +76,22 @@ main.openssl_1 : MDEV-13492 - Unknown SSL error main.order_by : Modified in 10.2.13 main.order_by_optimizer_innodb : MDEV-10683 - Wrong result main.partition : Modified in 10.2.13 +main.partition_debug_sync : MDEV-15669 - Deadlock found when trying to get lock main.partition_innodb : Modified in 10.2.13 main.ps : MDEV-11017 - Wrong result; modified in 10.2.13 +main.ps_qc_innodb : Added in 10.2.14 +main.query_cache : Modified in 10.2.14 main.query_cache_debug : MDEV-15281 - Query cache is disabled; modified in 10.2.13 main.range_vs_index_merge_innodb : MDEV-15283 - Server has gone away main.repair : Modified in 10.2.13 +main.select : MDEV-15430 - Wrong result with clang-4 +main.select_jcl6 : MDEV-15430 - Wrong result with clang-4 +main.select_pkeycache : MDEV-15430 - Wrong result with clang-4 main.set_statement : MDEV-13183 - Wrong result +main.shutdown : Modified in 10.2.14 main.shm : MDEV-12727 - Mismatch, ERROR 2013 main.show_explain : MDEV-10674 - Wrong result code -main.sp : MDEV-7866 - Mismatch; modified in 10.2.13 +main.sp : MDEV-7866 - Mismatch; modified in 10.2.14 main.ssl_ca : MDEV-10895 - SSL connection error on Power main.ssl_cert_verify : MDEV-13735 - Server crash main.ssl_connect : MDEV-13492 - Unknown SSL error @@ -97,18 +99,28 @@ main.ssl_timeout : MDEV-11244 - Crash main.stat_tables_par : MDEV-13266 - Wrong result main.status : MDEV-13255 - Wrong result main.subselect : Modified in 10.2.13 +main.subselect4 : Modified in 10.2.14 main.subselect_innodb : MDEV-10614 - Wrong result +main.subselect_mat : Modified in 10.2.14 main.symlink-myisam-11902 : MDEV-15098 - Error 40 from storage engine main.tc_heuristic_recover : MDEV-14189 - Wrong result main.thread_id_overflow : Added in 10.2.13 main.type_blob : MDEV-15195 - Wrong result +main.type_datetime_hires : MDEV-15430 - Wrong result with clang-4 +main.type_decimal : Modified in 10.2.14 +main.type_float : MDEV-15430 - Wrong result with clang-4 +main.type_temporal_innodb : Modified in 10.2.14 +main.type_time : Modified in 10.2.14 main.type_time_6065 : Modified in 10.2.13 +main.type_time_hires : MDEV-15430 - Wrong result with clang-4 +main.type_timestamp_hires : MDEV-15430 - Wrong result with clang-4 main.union : Modified in 10.2.13 main.update_innodb : Modified in 10.2.13 main.userstat : MDEV-12904 - SSL errors -main.view : Modified in 10.2.12 +main.view : Modified in 10.2.14 +main.warnings : Modified in 10.2.14 main.win : Modified in 10.2.13 -main.xa : Modified in 10.2.13 +main.xa : Modified in 10.2.14 main.xml : Modified in 10.2.13 #---------------------------------------------------------------- @@ -120,6 +132,7 @@ archive.mysqlhotcopy_archive : MDEV-10995 - Hang on debug binlog.binlog_commit_wait : MDEV-10150 - Mismatch binlog.binlog_flush_binlogs_delete_domain : MDEV-14431 - Wrong exit code +binlog.binlog_stm_datetime_ranges_mdev15289 : Added in 10.2.14 binlog.binlog_xa_recover : MDEV-8517 - Extra checkpoint #---------------------------------------------------------------- @@ -139,34 +152,45 @@ binlog_encryption.rpl_sync : MDEV-13830 - Assertion failure #---------------------------------------------------------------- +connect.jdbc_postgresql : Modified in 10.2.14 +connect.json_udf : Modified in 10.2.14 connect.pivot : MDEV-14803 - Failed to discover table -connect.vcol : MDEV-12374 - Fails on Windows +connect.tbl_thread : Modified in 10.2.14 +connect.vcol : MDEV-12374 - Fails on Windows; modified in 10.2.14 + +#---------------------------------------------------------------- + +disks.disks : Added in 10.2.14 #---------------------------------------------------------------- encryption.create_or_replace : MDEV-9359, MDEV-13516 - Assertion failure, MDEV-12694 - Timeout encryption.debug_key_management : MDEV-13841 - Timeout; modified in 10.2.13 -encryption.encrypt_and_grep : MDEV-13765 - Wrong result; modified in 10.2.13 +encryption.encrypt_and_grep : MDEV-13765 - Wrong result; modified in 10.2.14 encryption.innochecksum : MDEV-13644 - Assertion failure -encryption.innodb-bad-key-change : Modified in 10.2.13 +encryption.innodb-bad-key-change : Modified in 10.2.14 encryption.innodb-compressed-blob : MDEV-14728 - Unable to get certificate encryption.innodb-discard-import-change : MDEV-12632 - Valgrind encryption.innodb_encrypt_log : MDEV-13725 - Wrong result -encryption.innodb_encryption : Modified in 10.2.13 +encryption.innodb_encryption : MDEV-15675 - Timeout; modified in 10.2.14 encryption.innodb-encryption-alter : MDEV-13566 - Lock wait timeout encryption.innodb_encryption_discard_import : MDEV-12903 - Wrong result, MDEV-14045 - Error 192 -encryption.innodb_encryption_filekeys : MDEV-9962 - Timeout -encryption.innodb_encryption-page-compression : MDEV-14814 - Timeout in wait condition +encryption.innodb_encryption_filekeys : MDEV-15673 - Timeout; modified in 10.2.14 +encryption.innodb_encryption-page-compression : MDEV-12630 - crash or assertion failure; modified in 10.2.14 encryption.innodb_encryption_tables : MDEV-9359 - Assertion failure encryption.innodb-first-page-read : MDEV-14356 - Timeout in wait condition encryption.innodb_lotoftables : MDEV-11531 - Operation on a dropped tablespace encryption.innodb-missing-key : MDEV-9359 - assertion failure encryption.innodb-redo-badkey : MDEV-13893 - Page cannot be decrypted -encryption.innodb-spatial-index : MDEV-13746 - Wrong result +encryption.innodb-redo-nokeys : Modified in 10.2.14 +encryption.innodb_scrub_background : Modified in 10.2.14 +encryption.innodb-spatial-index : MDEV-13746 - Wrong result; modified in 10.2.14 encryption.tempfiles : Modified in 10.2.13 #---------------------------------------------------------------- +engines/iuds.update_time : Modified in 10.2.14 + engines/rr_trx.* : MDEV-10998 - Not maintained #---------------------------------------------------------------- @@ -188,8 +212,6 @@ galera_3nodes.* : Suite is not stable yet #---------------------------------------------------------------- gcol.innodb_virtual_debug_purge : Modified in 10.2.13 -gcol.innodb_virtual_index : Modified in 10.2.12 -gcol.innodb_virtual_stats : Added in 10.2.12 #---------------------------------------------------------------- @@ -197,13 +219,16 @@ innodb.101_compatibility : MDEV-13891 - Wrong result innodb.alter_copy : Added in 10.2.13 innodb.autoinc_persist : MDEV-15282 - Assertion failure innodb.deadlock_detect : Modified in 10.2.13 +innodb.default_row_format_alter : Added in 10.2.14 +innodb.default_row_format_compatibility : Added in 10.2.14 +innodb.default_row_format_create : Added in 10.2.14 innodb.doublewrite : MDEV-12905 - Server crash +innodb.file_format_defaults : Added in 10.2.14 innodb.foreign_key : Modified in 10.2.13 innodb.group_commit_crash : MDEV-14191 - InnoDB registration failed innodb.group_commit_crash_no_optimize_thread : MDEV-13830 - Assertion failure innodb.innodb : Modified in 10.2.13 innodb.innodb-alter-tempfile : MDEV-15285 - Table already exists -innodb.innodb-autoinc : Modified in 10.2.12 innodb.innodb_bug14147491 : MDEV-11808 - Index is corrupt; modified in 10.2.13 innodb.innodb_bug30423 : MDEV-7311 - Wrong result innodb.innodb_bug48024 : MDEV-14352 - Assertion failure @@ -214,12 +239,11 @@ innodb.innodb_bulk_create_index_replication : MDEV-15273 - Slave failed to start innodb.innodb_corrupt_bit : Modified in 10.2.13 innodb.innodb_defrag_stats_many_tables : MDEV-14198 - Table is full innodb.innodb-get-fk : MDEV-13276 - Server crash -innodb.innodb-index-debug : Modified in 10.2.12 innodb.innodb-index-online : MDEV-14809 - Cannot save statistics; modified in 10.2.13 innodb.innodb_information_schema : MDEV-8851 - Wrong result innodb.innodb-lru-force-no-free-page : Added in 10.2.13 -innodb.innodb_max_recordsize_32k : MDEV-14801 - Operation failed; modified in 10.2.12 -innodb.innodb_max_recordsize_64k : MDEV-15203 - Wrong result; modified in 10.2.12 +innodb.innodb_max_recordsize_32k : MDEV-14801 - Operation failed +innodb.innodb_max_recordsize_64k : MDEV-15203 - Wrong result innodb.innodb-page_compression_default : MDEV-13644 - Assertion failure innodb.innodb-page_compression_lzma : MDEV-14353 - Wrong result innodb.innodb-replace-debug : Modified in 10.2.13 @@ -230,24 +254,24 @@ innodb.innodb_sys_semaphore_waits : MDEV-10331 - Semaphore wait innodb.innodb-wl5522-debug : MDEV-14200 - Wrong errno innodb.innodb_zip_innochecksum2 : MDEV-13882 - Extra warnings innodb.innodb_zip_innochecksum3 : MDEV-14486 - Resource temporarily unavailable -innodb.lock_deleted : Added in 10.2.12 innodb.log_corruption : MDEV-13251 - Wrong result; modified in 10.2.13 innodb.log_data_file_size : MDEV-14204 - Server failed to start -innodb.log_file_name : MDEV-14193 - Exception -innodb.log_file_size : MDEV-15202 - Can't initiate database recovery; modified in 10.2.13 +innodb.log_file_name : MDEV-14193 - Exception; MDEV-15325 - Assertion failure +innodb.log_file_size : MDEV-15668 - Not found pattern; modified in 10.2.13 innodb.mvcc : Added in 10.2.13 -innodb.purge_secondary : Added in 10.2.12 +innodb.purge_secondary : MDEV-15681 - Wrong result; modified in 10.2.14 innodb.purge_thread_shutdown : MDEV-13792 - Wrong result innodb.read_only_recovery : MDEV-13886 - Server crash; modified in 10.2.13 -innodb.recovery_shutdown : Modified in 10.2.13 +innodb.read_only_recover_committed : Added in 10.2.14 +innodb.recovery_shutdown : MDEV-15671 - Checksum mismatch in datafile; modified in 10.2.13 +innodb.restart : Added in 10.2.14 innodb.row_format_redundant : MDEV-15192 - Trying to access missing tablespace innodb.table_definition_cache_debug : MDEV-14206 - Extra warning; opt file modified in 10.2.13 innodb.table_flags : MDEV-13572 - Wrong result innodb.temporary_table : MDEV-13265 - Wrong result innodb.truncate_inject : Added in 10.2.13 -innodb.truncate_restart : Modified in 10.2.12 innodb.update-cascade : Added in 10.2.13 -innodb.update_time : MDEV-14804 - Wrong result; modified in 10.2.12 +innodb.update_time : MDEV-14804 - Wrong result innodb.xa_recovery : MDEV-15279 - mysqld got exception innodb_fts.fulltext2 : MDEV-14727 - Long semaphore wait @@ -260,7 +284,6 @@ innodb_gis.bug17057168 : Re-enabled in 10.2.13 innodb_gis.geometry : Modified in 10.2.13 innodb_gis.gis_split_inf : Modified in 10.2.13 innodb_gis.innodb_gis_rtree : Added in 10.2.13 -innodb_gis.kill_server : Modified in 10.2.12 innodb_gis.rtree_compress : Modified in 10.2.13 innodb_gis.rtree_compress2 : Modified in 10.2.13 innodb_gis.rtree_concurrent_srch : MDEV-15284 - Wrong result with embedded; modified in 10.2.13 @@ -273,7 +296,7 @@ innodb_gis.rtree_search : Modified in 10.2.13 innodb_gis.rtree_split : MDEV-14208 - Too many arguments; modified in 10.2.13 innodb_gis.rtree_undo : MDEV-14456 - Timeout in include file; modified in 10.2.13 innodb_gis.tree_search : Re-enabled in 10.2.13 -innodb_gis.types : Modified in 10.2.13 +innodb_gis.types : MDEV-15679 - Table is marked as crashed; modified in 10.2.13 innodb_zip.cmp_per_index : MDEV-14490 - Table is marked as crashed innodb_zip.innochecksum_3 : MDEV-13279 - Extra warnings @@ -285,6 +308,7 @@ innodb_zip.wl6501_scale_1 : MDEV-13254 - Timeout, MDEV-14104 - Error #---------------------------------------------------------------- +maria.dynamic : Added in 10.2.14 maria.insert_select : MDEV-12757 - Timeout maria.lock : Modified in 10.2.13 maria.maria : MDEV-14430 - Extra warning; modified in 10.2.13 @@ -296,19 +320,19 @@ maria.repair : Added in 10.2.13 mariabackup.apply-log-only : MDEV-14192 - Assertion failure; modified in 10.2.13 mariabackup.apply-log-only-incr : MDEV-14192 - Assertion failure; modified in 10.2.13 mariabackup.data_directory : MDEV-15270 - Error on exec -mariabackup.full_backup : MDEV-13889 - Timeout +mariabackup.extra_lsndir : Added in 10.2.14 mariabackup.huge_lsn : Modified in 10.2.13 mariabackup.incremental_backup : MDEV-14192 - Assertion failure -mariabackup.incremental_encrypted : MDEV-14188 - Wrong result -mariabackup.log_checksum_mismatch : Added in 10.2.12 +mariabackup.incremental_encrypted : MDEV-14188 - Wrong result, MDEV-15667 - timeout mariabackup.mdev-14447 : MDEV-15201 - Timeout mariabackup.missing_ibd : Added in 10.2.13 mariabackup.partial_exclude : MDEV-15270 - Error on exec +mariabackup.undo_space_id : Added in 10.2.14 +mariabackup.unsupported_redo : MDEV-15682 - Wrong result code; added in 10.2.14 mariabackup.xbstream : MDEV-14192 - Crash -mariabackup.xb_aws_key_management : MDEV-15276 - Wrong result; modified in 10.2.13 +mariabackup.xb_aws_key_management : MDEV-15680 - Error: xtrabackup_copy_logfile() failed; modified in 10.2.13 mariabackup.xb_page_compress : MDEV-14810 - status: 1, errno: 11 mariabackup.xb_compressed_encrypted : MDEV-14812 - Segmentation fault -mariabackup.xb_file_key_management : MDEV-15277 - Assertion failure #---------------------------------------------------------------- @@ -323,9 +347,8 @@ multi_source.simple : MDEV-4633 - Wrong result #---------------------------------------------------------------- -parts.partition_alter_innodb : Added in 10.2.12 -parts.partition_alter_maria : Modified in 10.2.12 -parts.partition_alter_myisam : Added in 10.2.12 +parts.partition_alter_maria : Modified in 10.2.14 +parts.partition_alter_myisam : Modified in 10.2.14 parts.partition_auto_increment_maria : MDEV-14430 - Extra warning parts.partition_basic_symlink_innodb : Modified in 10.2.13 parts.partition_debug_innodb : MDEV-10891 - Can't create UNIX socket; MDEV-15095 - Table doesn't exist @@ -344,8 +367,8 @@ perfschema.hostcache_ipv4_addrinfo_again_allow : MDEV-12759 - Crash perfschema.hostcache_ipv6_addrinfo_again_allow : MDEV-12752 - Crash perfschema.hostcache_ipv6_addrinfo_bad_allow : MDEV-13260 - Crash perfschema.hostcache_ipv6_ssl : MDEV-10696 - Crash -perfschema.misc : Modified in 10.2.12 perfschema.setup_actors : MDEV-10679 - Crash +perfschema.socket_connect : MDEV-15677 - Wrong result perfschema.socket_summary_by_event_name_func : MDEV-10622 - Wrong result perfschema.stage_mdl_procedure : MDEV-11545 - Missing row perfschema.threads_mysql : MDEV-10677 - Wrong result @@ -356,25 +379,20 @@ perfschema_stress.* : MDEV-10996 - Not maintained #---------------------------------------------------------------- -plugins.binlog-simple_plugin_check : Added in 10.2.12 plugins.feedback_plugin_send : MDEV-7932, MDEV-11118 - Connection problems and such +plugins.server_audit : Modified in 10.2.14 plugins.thread_pool_server_audit : MDEV-14295 - Wrong result #---------------------------------------------------------------- -rocksdb.* : MyRocks is beta-quality and tests are unstable -rocksdb_sys_vars.* : MyRocks is beta-quality and tests are unstable - -#---------------------------------------------------------------- - -roles.flush_roles-12366 : Added in 10.2.12 -roles.set_role-13655 : Added in 10.2.12 +rocksdb.* : Tests are unstable +rocksdb_sys_vars.* : Tests are unstable #---------------------------------------------------------------- rpl.rpl_binlog_errors : MDEV-12742 - Crash rpl.rpl_binlog_index : MDEV-9501 - Failed registering on master -rpl.rpl_ctype_latin1 : MDEV-14813 - Wrong result on Mac; added in 10.2.12 +rpl.rpl_ctype_latin1 : MDEV-14813 - Wrong result on Mac rpl.rpl_domain_id_filter_io_crash : MDEV-12729 - Timeout in include file, MDEV-13677 - Server crash rpl.rpl_domain_id_filter_restart : MDEV-10684 - Wrong result rpl.rpl_extra_col_master_myisam : MDEV-14203 - Extra warning @@ -390,18 +408,15 @@ rpl.rpl_mixed_mixing_engines : MDEV-14489 - Sync slave with master faile rpl.rpl_non_direct_mixed_mixing_engines : MDEV-14489 - Sync slave with master failed rpl.rpl_non_direct_row_mixing_engines : MDEV-14491 - Long semaphore wait rpl.rpl_non_direct_stm_mixing_engines : MDEV-14489 - Failed sync_slave_with_master -rpl.rpl_parallel : MDEV-12730 - Assertion failure rpl.rpl_parallel_conflicts : MDEV-15272 - Server crash rpl.rpl_parallel_mdev6589 : MDEV-12979 - Assertion failure rpl.rpl_parallel_optimistic : MDEV-15278 - Failed to sync with master -rpl.rpl_parallel_optimistic_nobinlog : MDEV-12746 - Timeouts, mismatch -rpl.rpl_parallel_retry : MDEV-11119 - Crash +rpl.rpl_parallel_optimistic_nobinlog : MDEV-15278 - Failed to sync with master +rpl.rpl_parallel_retry : MDEV-11119 - Crash; modified in 10.2.14 rpl.rpl_parallel_temptable : MDEV-10356 - Crash rpl.rpl_row_drop_create_temp_table : MDEV-14487 - Wrong result rpl.rpl_row_img_eng_min : MDEV-13875 - diff_files failed rpl.rpl_row_index_choice : MDEV-15196 - Slave crash -rpl.rpl_row_log : Included test modified in 10.2.12 -rpl.rpl_row_log_innodb : Included test modified in 10.2.12 rpl.rpl_row_mixing_engines : MDEV-14491 - Long semaphore wait rpl.rpl_semi_sync : MDEV-11220 - Wrong result rpl.rpl_semi_sync_after_sync : MDEV-14366 - Wrong result @@ -416,7 +431,6 @@ rpl.rpl_slave_load_tmpdir_not_exist : MDEV-14203 - Extra warning rpl.rpl_slow_query_log : MDEV-13250 - Test abort rpl.rpl_sp_effects : MDEV-13249 - Crash rpl.rpl_start_stop_slave : MDEV-13567 - Sync slave timeout -rpl.rpl_stm_log : Included test modified in 10.2.12 rpl.rpl_stm_mixing_engines : MDEV-14489 - Sync slave with master failed rpl.rpl_stm_multi_query : MDEV-9501 - Failed registering on master rpl.rpl_stm_relay_ign_space : MDEV-14360 - Test assertion @@ -454,7 +468,7 @@ sys_vars.innodb_print_lock_wait_timeout_info_basic : Added in 10.2.13 sys_vars.rpl_init_slave_func : MDEV-10149 - Test assertion sys_vars.slow_query_log_func : MDEV-14273 - Wrong result sys_vars.thread_cache_size_func : MDEV-11775 - Wrong result -sys_vars.wsrep_on_basic : Opt file added in 10.2.12 +sys_vars.wsrep_sst_receive_address_basic : Modified in 10.2.14 #---------------------------------------------------------------- @@ -480,15 +494,12 @@ tokudb_alter_table.hcad_all_add2 : MDEV-15269 - Timeout tokudb_bugs.xa : MDEV-11804 - Lock wait timeout -tokudb_mariadb.mdev6657 : MDEV-12737 - Mismatch or valgrind - -tokudb-rpl.rpl_tokudb_row_log : Included test modified in 10.2.12 -tokudb-rpl.rpl_tokudb_stm_log : Included test modified in 10.2.12 - tokudb_backup.* : MDEV-11001 - Missing include file tokudb_sys_vars.* : MDEV-11001 - Missing include file tokudb_rpl.* : MDEV-11001 - Missing include file +tokudb_mariadb.mdev6657 : Modified in 10.2.14 + tokudb_parts.nonflushing_analyze_debug : Added in 10.2.13 tokudb_parts.partition_alter4_tokudb : MDEV-12640 - Lost connection @@ -501,6 +512,7 @@ unit.conc_basic-t : MDEV-15286 - not ok 7 - test_reconnect_maxp unit.conc_misc : MDEV-14811 - not ok 12 - test_conc49 unit.conc_ps_bugs : MDEV-13252 - not ok 44 test_bug4236 unit.lf : MDEV-12897 - Signal 11 thrown +unit.my_atomic : MDEV-15670 - Signal 11 thrown #---------------------------------------------------------------- From 60438451c3aaa14d7979e2d2b6976da240b4949e Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Mon, 26 Mar 2018 21:22:40 +0300 Subject: [PATCH 137/139] MDEV-14843: Assertion `s_tx_list.size() == 0' failed in myrocks::Rdb_transaction::term_mutex When the plugin is unloaded, walk the s_trx_list and delete the left over Rdb_transaction objects. It is responsibility of the SQL layer to make sure that the storage engine has no open tables when the plugin is being unloaded. --- storage/rocksdb/ha_rocksdb.cc | 43 +++++++++++++++++++ .../rocksdb/r/mariadb_plugin.result | 12 ++++++ .../rocksdb/t/mariadb_plugin-master.opt | 1 + .../mysql-test/rocksdb/t/mariadb_plugin.test | 21 +++++++++ 4 files changed, 77 insertions(+) create mode 100644 storage/rocksdb/mysql-test/rocksdb/r/mariadb_plugin.result create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin-master.opt create mode 100644 storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin.test diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index 5e4152f7dcf..e77de87cf2c 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -4416,6 +4416,49 @@ static int rocksdb_done_func(void *const p) { error = 1; } + /* + MariaDB: When the plugin is unloaded with UNINSTALL SONAME command, some + connections may still have Rdb_transaction objects. + + These objects are not genuine transactions (as SQL layer makes sure that + a plugin that is being unloaded has no open tables), they are empty + Rdb_transaction objects that were left there to save on object + creation/deletion. + + Go through the list and delete them. + */ + { + class Rdb_trx_deleter: public Rdb_tx_list_walker { + public: + std::set rdb_trxs; + + void process_tran(const Rdb_transaction *const tx) override { + /* + Check if the transaction is really empty. We only check + non-WriteBatch-based transactions, because there is no easy way to + check WriteBatch-based transactions. + */ + if (!tx->is_writebatch_trx()) { + const auto tx_impl = static_cast(tx); + DBUG_ASSERT(tx_impl); + if (tx_impl->get_rdb_trx()) + DBUG_ASSERT(0); + } + rdb_trxs.insert((Rdb_transaction*)tx); + }; + } deleter; + + Rdb_transaction::walk_tx_list(&deleter); + + for (std::set::iterator it= deleter.rdb_trxs.begin(); + it != deleter.rdb_trxs.end(); + ++it) + { + // When a transaction is deleted, it removes itself from s_tx_list. + delete *it; + } + } + /* destructors for static objects can be called at _exit(), but we want to free the memory at dlclose() diff --git a/storage/rocksdb/mysql-test/rocksdb/r/mariadb_plugin.result b/storage/rocksdb/mysql-test/rocksdb/r/mariadb_plugin.result new file mode 100644 index 00000000000..bb06c4be2e5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/r/mariadb_plugin.result @@ -0,0 +1,12 @@ +# +# MDEV-14843: Assertion `s_tx_list.size() == 0' failed in myrocks::Rdb_transaction::term_mutex +# +INSTALL SONAME 'ha_rocksdb'; +CREATE TABLE t1 (i INT) ENGINE=RocksDB; +insert into t1 values (1); +connect con1,localhost,root,,; +connection con1; +insert into test.t1 values (1); +connection default; +DROP TABLE t1; +UNINSTALL SONAME 'ha_rocksdb'; diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin-master.opt b/storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin-master.opt new file mode 100644 index 00000000000..0f0a3ef33e5 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin-master.opt @@ -0,0 +1 @@ +--default-storage-engine=myisam --plugin-load='' --ignore-db-dirs=#rocksdb diff --git a/storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin.test b/storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin.test new file mode 100644 index 00000000000..303c4a5e0f9 --- /dev/null +++ b/storage/rocksdb/mysql-test/rocksdb/t/mariadb_plugin.test @@ -0,0 +1,21 @@ +--source include/have_log_bin.inc +--source include/have_binlog_format_row.inc + +--echo # +--echo # MDEV-14843: Assertion `s_tx_list.size() == 0' failed in myrocks::Rdb_transaction::term_mutex +--echo # + +INSTALL SONAME 'ha_rocksdb'; + +CREATE TABLE t1 (i INT) ENGINE=RocksDB; +insert into t1 values (1); + +connect (con1,localhost,root,,); +connection con1; +insert into test.t1 values (1); + +connection default; + +# Cleanup +DROP TABLE t1; +UNINSTALL SONAME 'ha_rocksdb'; From 73af8af094d65d1d8b8dfcdabf72e825e7cb7de5 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Tue, 27 Mar 2018 13:47:56 +0530 Subject: [PATCH 138/139] MDEV-15325 Incomplete validation of missing tablespace during recovery Problem: ======= During validation of missing tablespace, missing tablespace id is being compared with hash table of redo logs (recv_sys->addr_hash). But if the hash table ran out of memory then there is a possibility that it will not contain the redo logs of all tablespace. In that case, Server will load the InnoDB even though there is a missing tablespace. Solution: ======== If the recv_sys->addr_hash hash table ran out of memory then InnoDB needs to scan the remaining redo log again to validate the missing tablespace. --- mysql-test/suite/innodb/r/innodb-index.result | 17 ++ mysql-test/suite/innodb/t/innodb-index.test | 48 ++++ storage/innobase/include/log0recv.h | 3 + storage/innobase/log/log0recv.cc | 245 ++++++++++++------ 4 files changed, 230 insertions(+), 83 deletions(-) diff --git a/mysql-test/suite/innodb/r/innodb-index.result b/mysql-test/suite/innodb/r/innodb-index.result index 54ad4e8a927..df27769b810 100644 --- a/mysql-test/suite/innodb/r/innodb-index.result +++ b/mysql-test/suite/innodb/r/innodb-index.result @@ -1848,3 +1848,20 @@ create table t1(o1 int, o2 int, o3 int, primary key(o1,o2,o3)) engine = innodb; insert into t1 values(1,1,2),(2,2,1); alter table t1 drop primary key, add primary key(o1), lock=none; drop table t1; +# +# MDEV-15325 Incomplete validation of missing tablespace during recovery +# +CREATE TABLE t1(f1 INT PRIMARY KEY)ENGINE=InnoDB; +CREATE TABLE t2(f1 INT PRIMARY KEY)ENGINE=InnoDB; +# Kill the server +# Wrong space_id in a dirty file and a missing file +SELECT * FROM INFORMATION_SCHEMA.ENGINES +WHERE engine = 'innodb' +AND support IN ('YES', 'DEFAULT', 'ENABLED'); +ENGINE SUPPORT COMMENT TRANSACTIONS XA SAVEPOINTS +# Restore t1 and t2 +SELECT * FROM t1; +f1 +SELECT * FROM t2; +f1 +DROP TABLE t1, t2; diff --git a/mysql-test/suite/innodb/t/innodb-index.test b/mysql-test/suite/innodb/t/innodb-index.test index 721808c038c..8b9d2068499 100644 --- a/mysql-test/suite/innodb/t/innodb-index.test +++ b/mysql-test/suite/innodb/t/innodb-index.test @@ -1076,3 +1076,51 @@ drop table t1; # no skip sort cases --source suite/innodb/include/alter_table_pk_no_sort.inc + +--echo # +--echo # MDEV-15325 Incomplete validation of missing tablespace during recovery +--echo # + +--source include/no_checkpoint_start.inc +CREATE TABLE t1(f1 INT PRIMARY KEY)ENGINE=InnoDB; + +CREATE TABLE t2(f1 INT PRIMARY KEY)ENGINE=InnoDB; + +--let CLEANUP_IF_CHECKPOINT=DROP TABLE t1, t2; +--source include/no_checkpoint_end.inc + +let SEARCH_FILE= $MYSQLTEST_VARDIR/log/mysqld.1.err; +let $check_no_innodb=SELECT * FROM INFORMATION_SCHEMA.ENGINES +WHERE engine = 'innodb' +AND support IN ('YES', 'DEFAULT', 'ENABLED'); + +--echo # Wrong space_id in a dirty file and a missing file + +--copy_file $MYSQLD_DATADIR/test/t1.ibd $MYSQLD_DATADIR/test/t0.ibd +--move_file $MYSQLD_DATADIR/test/t2.ibd $MYSQLD_DATADIR/test/t1.ibd + +--source include/start_mysqld.inc +--eval $check_no_innodb +--source include/shutdown_mysqld.inc + +--echo # Restore t1 and t2 + +--move_file $MYSQLD_DATADIR/test/t1.ibd $MYSQLD_DATADIR/test/t2.ibd +--move_file $MYSQLD_DATADIR/test/t0.ibd $MYSQLD_DATADIR/test/t1.ibd + +--source include/start_mysqld.inc + +SELECT * FROM t1; +SELECT * FROM t2; + +DROP TABLE t1, t2; + +--disable_query_log + +call mtr.add_suppression("InnoDB: Tablespace .* was not found at .*t[12].ibd."); +call mtr.add_suppression("InnoDB: Set innodb_force_recovery=1 to ignore this and to permanently lose all changes to the tablespace"); +call mtr.add_suppression("InnoDB: Plugin initialization aborted"); +call mtr.add_suppression("Plugin 'InnoDB' init function returned error"); +call mtr.add_suppression("Plugin 'InnoDB' registration as a STORAGE ENGINE failed"); + +--enable_query_log diff --git a/storage/innobase/include/log0recv.h b/storage/innobase/include/log0recv.h index 065326ead88..6dcaaedd765 100644 --- a/storage/innobase/include/log0recv.h +++ b/storage/innobase/include/log0recv.h @@ -291,6 +291,9 @@ struct recv_sys_t{ recv_dblwr_t dblwr; + /** Lastly added LSN to the hash table of log records. */ + lsn_t last_stored_lsn; + /** Determine whether redo log recovery progress should be reported. @param[in] time the current time @return whether progress should be reported diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index b41ea4dabee..80cf9f1ac4b 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -141,12 +141,23 @@ struct file_name_t { std::string name; /** Tablespace object (NULL if not valid or not found) */ fil_space_t* space; - /** Whether the tablespace has been deleted */ - bool deleted; + + /** Tablespace status. */ + enum fil_status { + /** Normal tablespace */ + NORMAL, + /** Deleted tablespace */ + DELETED, + /** Missing tablespace */ + MISSING + }; + + /** Status of the tablespace */ + fil_status status; /** Constructor */ - file_name_t(std::string name_, bool deleted_) : - name(name_), space(NULL), deleted (deleted_) {} + file_name_t(std::string name_, bool deleted) : + name(name_), space(NULL), status(deleted ? DELETED: NORMAL) {} }; /** Map of dirty tablespaces during recovery */ @@ -202,8 +213,8 @@ fil_name_process( if (deleted) { /* Got MLOG_FILE_DELETE */ - if (!p.second && !f.deleted) { - f.deleted = true; + if (!p.second && f.status != file_name_t::DELETED) { + f.status = file_name_t::DELETED; if (f.space != NULL) { fil_space_free(space_id, false); f.space = NULL; @@ -226,7 +237,7 @@ fil_name_process( if (f.space == NULL || f.space == space) { f.name = fname.name; f.space = space; - f.deleted = false; + f.status = file_name_t::NORMAL; } else { ib::error() << "Tablespace " << space_id << " has been found in two places: '" @@ -2905,6 +2916,12 @@ recv_scan_log_recs( if (*store_to_hash != STORE_NO && mem_heap_get_size(recv_sys->heap) > available_memory) { + + DBUG_PRINT("ib_log", ("Ran out of memory and last " + "stored lsn " LSN_PF, + recv_sys->recovered_lsn)); + + recv_sys->last_stored_lsn = recv_sys->recovered_lsn; *store_to_hash = STORE_NO; } @@ -3037,15 +3054,99 @@ recv_init_missing_space(dberr_t err, const recv_spaces_t::const_iterator& i) return(err); } +/** Report the missing tablespace and discard the redo logs for the deleted +tablespace. +@param[in] rescan rescan of redo logs is needed + if hash table ran out of memory +@param[out] missing_tablespace missing tablespace exists or not +@return error code or DB_SUCCESS. */ +static MY_ATTRIBUTE((warn_unused_result)) +dberr_t +recv_validate_tablespace(bool rescan, bool& missing_tablespace) +{ + dberr_t err = DB_SUCCESS; + + for (ulint h = 0; h < hash_get_n_cells(recv_sys->addr_hash); h++) { + + for (recv_addr_t* recv_addr = static_cast( + HASH_GET_FIRST(recv_sys->addr_hash, h)); + recv_addr != 0; + recv_addr = static_cast( + HASH_GET_NEXT(addr_hash, recv_addr))) { + + const ulint space = recv_addr->space; + + if (is_predefined_tablespace(space)) { + continue; + } + + recv_spaces_t::iterator i + = recv_spaces.find(space); + ut_ad(i != recv_spaces.end()); + + switch(i->second.status) { + case file_name_t::MISSING: + err = recv_init_missing_space(err, i); + i->second.status = file_name_t::DELETED; + case file_name_t::DELETED: + recv_addr->state = RECV_DISCARDED; + case file_name_t::NORMAL: + break; + default: + ut_ad(0); + } + } + } + + if (err != DB_SUCCESS) { + return(err); + } + + /* When rescan is not needed then recv_sys->addr_hash will have + all space id belongs to redo log. If rescan is needed and + innodb_force_recovery > 0 then InnoDB can ignore missing tablespace. */ + for (recv_spaces_t::iterator i = recv_spaces.begin(); + i != recv_spaces.end(); i++) { + + if (i->second.status != file_name_t::MISSING) { + continue; + } + + missing_tablespace = true; + + if (srv_force_recovery > 0) { + ib::warn() << "Tablespace " << i->first + <<" was not found at " << i->second.name + <<", and innodb_force_recovery was set." + <<" All redo log for this tablespace" + <<" will be ignored!"; + continue; + } + + if (!rescan) { + ib::info() << "Tablespace " << i->first + << " was not found at '" + << i->second.name << "', but there" + <<" were no modifications either."; + } + } + + if (!rescan || srv_force_recovery > 0) { + missing_tablespace = false; + } + + return DB_SUCCESS; +} + /** Check if all tablespaces were found for crash recovery. +@param[in] rescan rescan of redo logs is needed +@param[out] missing_tablespace missing table exists @return error code or DB_SUCCESS */ static MY_ATTRIBUTE((warn_unused_result)) dberr_t -recv_init_crash_recovery_spaces() +recv_init_crash_recovery_spaces(bool rescan, bool& missing_tablespace) { - typedef std::set space_set_t; bool flag_deleted = false; - space_set_t missing_spaces; ut_ad(!srv_read_only_mode); ut_ad(recv_needed_recovery); @@ -3053,9 +3154,9 @@ recv_init_crash_recovery_spaces() for (recv_spaces_t::iterator i = recv_spaces.begin(); i != recv_spaces.end(); i++) { ut_ad(!is_predefined_tablespace(i->first)); - ut_ad(!i->second.deleted || !i->second.space); + ut_ad(i->second.status != file_name_t::DELETED || !i->second.space); - if (i->second.deleted) { + if (i->second.status == file_name_t::DELETED) { /* The tablespace was deleted, so we can ignore any redo log for it. */ flag_deleted = true; @@ -3071,84 +3172,18 @@ recv_init_crash_recovery_spaces() recv_sys->found_corrupt_log = true; return(DB_CORRUPTION); } else { - missing_spaces.insert(i->first); + i->second.status = file_name_t::MISSING; flag_deleted = true; } - ut_ad(i->second.deleted || i->second.name != ""); + ut_ad(i->second.status == file_name_t::DELETED || i->second.name != ""); } if (flag_deleted) { - dberr_t err = DB_SUCCESS; - - for (ulint h = 0; - h < hash_get_n_cells(recv_sys->addr_hash); - h++) { - for (recv_addr_t* recv_addr - = static_cast( - HASH_GET_FIRST( - recv_sys->addr_hash, h)); - recv_addr != 0; - recv_addr = static_cast( - HASH_GET_NEXT(addr_hash, recv_addr))) { - const ulint space = recv_addr->space; - - if (is_predefined_tablespace(space)) { - continue; - } - - recv_spaces_t::iterator i - = recv_spaces.find(space); - ut_ad(i != recv_spaces.end()); - - if (i->second.deleted) { - ut_ad(missing_spaces.find(space) - == missing_spaces.end()); - recv_addr->state = RECV_DISCARDED; - continue; - } - - space_set_t::iterator m = missing_spaces.find( - space); - - if (m != missing_spaces.end()) { - missing_spaces.erase(m); - err = recv_init_missing_space(err, i); - recv_addr->state = RECV_DISCARDED; - /* All further redo log for this - tablespace should be removed. */ - i->second.deleted = true; - } - } - } - - if (err != DB_SUCCESS) { - return(err); - } + return recv_validate_tablespace(rescan, missing_tablespace); } - for (space_set_t::const_iterator m = missing_spaces.begin(); - m != missing_spaces.end(); m++) { - recv_spaces_t::iterator i = recv_spaces.find(*m); - ut_ad(i != recv_spaces.end()); - - ib::info() << "Tablespace " << i->first - << " was not found at '" << i->second.name - << "', but there were no modifications either."; - } - - if (srv_operation == SRV_OPERATION_NORMAL) { - buf_dblwr_process(); - } - - if (srv_force_recovery < SRV_FORCE_NO_LOG_REDO) { - /* Spawn the background thread to flush dirty pages - from the buffer pools. */ - recv_writer_thread_active = true; - os_thread_create(recv_writer_thread, 0, 0); - } - - return(DB_SUCCESS); + return DB_SUCCESS; } /** Start recovering from a redo log checkpoint. @@ -3324,13 +3359,57 @@ recv_recovery_from_checkpoint_start(lsn_t flush_lsn) log_sys->lsn = recv_sys->recovered_lsn; if (recv_needed_recovery) { - err = recv_init_crash_recovery_spaces(); + bool missing_tablespace = false; + + err = recv_init_crash_recovery_spaces( + rescan, missing_tablespace); if (err != DB_SUCCESS) { log_mutex_exit(); return(err); } + /* If there is any missing tablespace and rescan is needed + then there is a possiblity that hash table will not contain + all space ids redo logs. Rescan the remaining unstored + redo logs for the validation of missing tablespace. */ + while (missing_tablespace) { + DBUG_PRINT("ib_log", ("Rescan of redo log to validate " + "the missing tablespace. Scan " + "from last stored LSN " LSN_PF, + recv_sys->last_stored_lsn)); + + lsn_t recent_stored_lsn = recv_sys->last_stored_lsn; + rescan = recv_group_scan_log_recs( + group, checkpoint_lsn, + &recent_stored_lsn, false); + + ut_ad(!recv_sys->found_corrupt_fs); + + missing_tablespace = false; + + err = recv_sys->found_corrupt_log + ? DB_ERROR + : recv_validate_tablespace( + rescan, missing_tablespace); + + if (err != DB_SUCCESS) { + log_mutex_exit(); + return err; + } + } + + if (srv_operation == SRV_OPERATION_NORMAL) { + buf_dblwr_process(); + } + + ut_ad(srv_force_recovery <= SRV_FORCE_NO_UNDO_LOG_SCAN); + + /* Spawn the background thread to flush dirty pages + from the buffer pools. */ + recv_writer_thread_active = true; + os_thread_create(recv_writer_thread, 0, 0); + if (rescan) { contiguous_lsn = checkpoint_lsn; From aafb9d44d65e42df72af28c940e5b23b4bc3bd43 Mon Sep 17 00:00:00 2001 From: Daniel Bartholomew Date: Tue, 27 Mar 2018 13:31:07 -0400 Subject: [PATCH 139/139] bump the VERSION --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 23b01fccd03..f9cc24a17b8 100644 --- a/VERSION +++ b/VERSION @@ -1,3 +1,3 @@ MYSQL_VERSION_MAJOR=10 MYSQL_VERSION_MINOR=2 -MYSQL_VERSION_PATCH=14 +MYSQL_VERSION_PATCH=15