From 28af4212b6e2afe1d42729c9c36215ed8a8d38cb Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 3 Nov 2020 18:40:28 +0100 Subject: [PATCH 001/150] - Implementation of the Json BJSON representation. VAL structures replace VALUE classes in binary trees. These parsed binary trees are swapped and saved on file Swapping is to replace pointers by offsets to make it portable. In restoring, class pointers to functions are realloced on place. Making BJSON files is done by the new UDF function jfile_bjson. modified: storage/connect/block.h modified: storage/connect/filamtxt.cpp modified: storage/connect/filamtxt.h modified: storage/connect/global.h modified: storage/connect/json.cpp modified: storage/connect/json.h modified: storage/connect/jsonudf.cpp modified: storage/connect/jsonudf.h modified: storage/connect/tabjson.cpp modified: storage/connect/tabjson.h modified: storage/connect/value.h - Make file (record) length and map memory possibly larger than MAX_INT modified: storage/connect/filamap.cpp modified: storage/connect/filamvct.cpp modified: storage/connect/maputil.cpp modified: storage/connect/maputil.h modified: storage/connect/tabdos.cpp modified: storage/connect/xindex.cpp - Make column length as bytes (not characters) This when making column definitions modified: storage/connect/ha_connect.cc - Change the message when making index fails modified: storage/connect/ha_connect.cc - Update tests and results to reflect recent changes modified: storage/connect/mysql-test/connect/r/alter_xml.result modified: storage/connect/mysql-test/connect/r/alter_xml2.result modified: storage/connect/mysql-test/connect/r/jdbc_oracle.result modified: storage/connect/mysql-test/connect/r/json.result modified: storage/connect/mysql-test/connect/r/json_java_2.result modified: storage/connect/mysql-test/connect/r/json_java_3.result modified: storage/connect/mysql-test/connect/r/json_mongo_c.result modified: storage/connect/mysql-test/connect/r/mongo_c.result modified: storage/connect/mysql-test/connect/r/mongo_java_2.result modified: storage/connect/mysql-test/connect/r/mongo_java_3.result modified: storage/connect/mysql-test/connect/r/odbc_oracle.result modified: storage/connect/mysql-test/connect/r/xml.result modified: storage/connect/mysql-test/connect/r/xml2.result modified: storage/connect/mysql-test/connect/r/xml2_html.result modified: storage/connect/mysql-test/connect/r/xml2_mult.result modified: storage/connect/mysql-test/connect/r/xml2_zip.result modified: storage/connect/mysql-test/connect/r/xml_html.result modified: storage/connect/mysql-test/connect/r/xml_mult.result modified: storage/connect/mysql-test/connect/r/xml_zip.result modified: storage/connect/mysql-test/connect/t/alter_xml.test modified: storage/connect/mysql-test/connect/t/alter_xml2.test modified: storage/connect/mysql-test/connect/t/jdbc_oracle.test modified: storage/connect/mysql-test/connect/t/json.test modified: storage/connect/mysql-test/connect/t/mongo_test.inc modified: storage/connect/mysql-test/connect/t/odbc_oracle.test modified: storage/connect/mysql-test/connect/t/xml.test modified: storage/connect/mysql-test/connect/t/xml2.test modified: storage/connect/mysql-test/connect/t/xml2_html.test modified: storage/connect/mysql-test/connect/t/xml2_mult.test modified: storage/connect/mysql-test/connect/t/xml2_zip.test modified: storage/connect/mysql-test/connect/t/xml_html.test modified: storage/connect/mysql-test/connect/t/xml_mult.test modified: storage/connect/mysql-test/connect/t/xml_zip.test - Typo modified: storage/connect/value.cpp --- storage/connect/block.h | 33 +- storage/connect/filamap.cpp | 6 +- storage/connect/filamtxt.cpp | 254 ++- storage/connect/filamtxt.h | 46 +- storage/connect/filamvct.cpp | 10 +- storage/connect/global.h | 8 +- storage/connect/ha_connect.cc | 24 +- storage/connect/json.cpp | 1690 ++++++++++------- storage/connect/json.h | 393 ++-- storage/connect/jsonudf.cpp | 304 ++- storage/connect/jsonudf.h | 31 +- storage/connect/maputil.cpp | 4 +- storage/connect/maputil.h | 3 +- .../mysql-test/connect/r/alter_xml.result | 4 +- .../mysql-test/connect/r/alter_xml2.result | 4 +- .../mysql-test/connect/r/jdbc_oracle.result | 8 +- .../connect/mysql-test/connect/r/json.result | 146 +- .../mysql-test/connect/r/json_java_2.result | 40 +- .../mysql-test/connect/r/json_java_3.result | 40 +- .../mysql-test/connect/r/json_mongo_c.result | 40 +- .../mysql-test/connect/r/mongo_c.result | 34 +- .../mysql-test/connect/r/mongo_java_2.result | 34 +- .../mysql-test/connect/r/mongo_java_3.result | 34 +- .../mysql-test/connect/r/odbc_oracle.result | 38 +- .../connect/mysql-test/connect/r/xml.result | 40 +- .../connect/mysql-test/connect/r/xml2.result | 38 +- .../mysql-test/connect/r/xml2_html.result | 6 +- .../mysql-test/connect/r/xml2_mult.result | 4 +- .../mysql-test/connect/r/xml2_zip.result | 20 +- .../mysql-test/connect/r/xml_html.result | 6 +- .../mysql-test/connect/r/xml_mult.result | 4 +- .../mysql-test/connect/r/xml_zip.result | 20 +- .../mysql-test/connect/t/alter_xml.test | 2 +- .../mysql-test/connect/t/alter_xml2.test | 2 +- .../mysql-test/connect/t/jdbc_oracle.test | 8 +- .../connect/mysql-test/connect/t/json.test | 146 +- .../mysql-test/connect/t/mongo_test.inc | 32 +- .../mysql-test/connect/t/odbc_oracle.test | 30 +- storage/connect/mysql-test/connect/t/xml.test | 46 +- .../connect/mysql-test/connect/t/xml2.test | 46 +- .../mysql-test/connect/t/xml2_html.test | 6 +- .../mysql-test/connect/t/xml2_mult.test | 4 +- .../mysql-test/connect/t/xml2_zip.test | 20 +- .../mysql-test/connect/t/xml_html.test | 6 +- .../mysql-test/connect/t/xml_mult.test | 4 +- .../connect/mysql-test/connect/t/xml_zip.test | 20 +- storage/connect/tabdos.cpp | 4 +- storage/connect/tabjson.cpp | 460 +++-- storage/connect/tabjson.h | 12 +- storage/connect/value.cpp | 4 +- storage/connect/value.h | 9 +- storage/connect/xindex.cpp | 4 +- 52 files changed, 2652 insertions(+), 1579 deletions(-) diff --git a/storage/connect/block.h b/storage/connect/block.h index 2ca9586ee3f..479bee373fa 100644 --- a/storage/connect/block.h +++ b/storage/connect/block.h @@ -1,19 +1,19 @@ /**************** Block H Declares Source Code File (.H) ***************/ -/* Name: BLOCK.H Version 2.0 */ +/* Name: BLOCK.H Version 2.1 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 1998 */ +/* (C) Copyright to the author Olivier BERTRAND 1998 - 2020 */ /* */ /* This file contains the BLOCK pure virtual class definition. */ /*---------------------------------------------------------------------*/ /* Note: one of the main purpose of this base class is to take care */ -/* of the very specific way Plug handles memory allocation. */ +/* of the very specific way Connect handles memory allocation. */ /* Instead of allocating small chunks of storage via new or malloc */ -/* Plug works in its private memory pool in which it does the sub- */ +/* Connect works in its private memory pool in which it does the sub- */ /* allocation using the function PlugSubAlloc. These are never freed */ /* separately but when a transaction is terminated, the entire pool */ /* is set to empty, resulting in a very fast and efficient allocate */ /* process, no garbage collection problem, and an automatic recovery */ -/* procedure (via LongJump) when the memory is exhausted. */ +/* procedure (via throw) when the memory is exhausted. */ /* For this to work new must be given two parameters, first the */ /* global pointer of the Plug application, and an optional pointer to */ /* the memory pool to use, defaulting to NULL meaning using the Plug */ @@ -30,6 +30,8 @@ #define DllExport #endif // !__WIN__ +typedef class JSON *PJSON; + /***********************************************************************/ /* Definition of class BLOCK with its method function new. */ /***********************************************************************/ @@ -37,21 +39,26 @@ typedef class BLOCK *PBLOCK; class DllExport BLOCK { public: - void * operator new(size_t size, PGLOBAL g, void *p = NULL) { - xtrc(256, "New BLOCK: size=%d g=%p p=%p\n", size, g, p); - return (PlugSubAlloc(g, p, size)); - } // end of new + void *operator new(size_t size, PGLOBAL g, void *mp = NULL) { + xtrc(256, "New BLOCK: size=%d g=%p p=%p\n", size, g, mp); + return PlugSubAlloc(g, mp, size); + } // end of new - virtual void Printf(PGLOBAL, FILE *, uint) {} // Produce file desc + void* operator new(size_t size, size_t mp) { + xtrc(256, "Realloc at: mp=%zd\n", mp); + return (void*)mp; + } // end of new + + virtual void Printf(PGLOBAL, FILE *, uint) {} // Produce file desc virtual void Prints(PGLOBAL, char *, uint) {} // Produce string desc #if !defined(__BORLANDC__) // Avoid warning C4291 by defining a matching dummy delete operator - void operator delete(void *, PGLOBAL, void *) {} - void operator delete(void *, size_t) {} + void operator delete(void*, PGLOBAL, void *) {} + //void operator delete(void*, size_t) {} #endif virtual ~BLOCK() {} - }; // end of class BLOCK +}; // end of class BLOCK #endif // !BLOCK_DEFINED diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp index 6e71e1bf2cd..66cf081e5af 100644 --- a/storage/connect/filamap.cpp +++ b/storage/connect/filamap.cpp @@ -102,7 +102,7 @@ int MAPFAM::GetFileLength(PGLOBAL g) bool MAPFAM::OpenTableFile(PGLOBAL g) { char filename[_MAX_PATH]; - int len; + size_t len; MODE mode = Tdbp->GetMode(); PFBLOCK fp; PDBUSER dbuserp = (PDBUSER)g->Activityp->Aptr; @@ -174,9 +174,9 @@ bool MAPFAM::OpenTableFile(PGLOBAL g) } // endif hFile /*******************************************************************/ - /* Get the file size (assuming file is smaller than 4 GB) */ + /* Get the file size. */ /*******************************************************************/ - len = mm.lenL; + len = (size_t)mm.sz.QuadPart; Memory = (char *)mm.memory; if (!len) { // Empty or deleted file diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index ca48fc765a1..28a6894325b 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -1,11 +1,11 @@ /*********** File AM Txt C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: FILAMTXT */ /* ------------- */ -/* Version 1.7 */ +/* Version 1.8 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -48,6 +48,7 @@ #include "plgdbsem.h" #include "filamtxt.h" #include "tabdos.h" +#include "tabjson.h" #if defined(UNIX) || defined(UNIV_LINUX) #include "osutil.h" @@ -1662,3 +1663,252 @@ void BLKFAM::Rewind(void) //Rbuf = 0; commented out in case we reuse last read block } // end of Rewind +/* --------------------------- Class BINFAM -------------------------- */ + +/***********************************************************************/ +/* BIN GetFileLength: returns file size in number of bytes. */ +/***********************************************************************/ +int BINFAM::GetFileLength(PGLOBAL g) { + int len; + + if (!BStream) + len = TXTFAM::GetFileLength(g); + else + if ((len = _filelength(_fileno(BStream))) < 0) + sprintf(g->Message, MSG(FILELEN_ERROR), "_filelength", To_File); + + xtrc(1, "File length=%d\n", len); + return len; +} // end of GetFileLength + +/***********************************************************************/ +/* Cardinality: returns table cardinality in number of rows. */ +/* This function can be called with a null argument to test the */ +/* availability of Cardinality implementation (1 yes, 0 no). */ +/***********************************************************************/ +int BINFAM::Cardinality(PGLOBAL g) { + return (g) ? -1 : 0; +} // end of Cardinality + +/***********************************************************************/ +/* OpenTableFile: Open a DOS/UNIX table file using C standard I/Os. */ +/***********************************************************************/ +bool BINFAM::OpenTableFile(PGLOBAL g) { + char opmode[4], filename[_MAX_PATH]; + MODE mode = Tdbp->GetMode(); + PDBUSER dbuserp = PlgGetUser(g); + + switch (mode) { + case MODE_READ: + strcpy(opmode, "rb"); + break; + case MODE_WRITE: + strcpy(opmode, "wb"); + break; + default: + sprintf(g->Message, MSG(BAD_OPEN_MODE), mode); + return true; + } // endswitch Mode + + // Now open the file stream + PlugSetPath(filename, To_File, Tdbp->GetPath()); + + if (!(BStream = PlugOpenFile(g, filename, opmode))) { + if (trace(1)) + htrc("%s\n", g->Message); + + return (mode == MODE_READ && errno == ENOENT) + ? PushWarning(g, Tdbp) : true; + } // endif BStream + + if (trace(1)) + htrc("File %s open BStream=%p mode=%s\n", filename, BStream, opmode); + + To_Fb = dbuserp->Openlist; // Keep track of File block + + /*********************************************************************/ + /* Allocate the line buffer. */ + /*********************************************************************/ + return AllocateBuffer(g); +} // end of OpenTableFile + +/***********************************************************************/ +/* Allocate the line buffer. For mode Delete a bigger buffer has to */ +/* be allocated because is it also used to move lines into the file. */ +/***********************************************************************/ +bool BINFAM::AllocateBuffer(PGLOBAL g) { + MODE mode = Tdbp->GetMode(); + + // Lrecl is Ok + Buflen = Lrecl; + + if (trace(1)) + htrc("SubAllocating a buffer of %d bytes\n", Buflen); + + To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen); + return false; +} // end of AllocateBuffer + +/***********************************************************************/ +/* GetRowID: return the RowID of last read record. */ +/***********************************************************************/ +int BINFAM::GetRowID(void) { + return Rows; +} // end of GetRowID + +/***********************************************************************/ +/* GetPos: return the position of last read record. */ +/***********************************************************************/ +int BINFAM::GetPos(void) { + return Fpos; +} // end of GetPos + +/***********************************************************************/ +/* GetNextPos: return the position of next record. */ +/***********************************************************************/ +int BINFAM::GetNextPos(void) { + return ftell(BStream); +} // end of GetNextPos + +/***********************************************************************/ +/* SetPos: Replace the table at the specified position. */ +/***********************************************************************/ +bool BINFAM::SetPos(PGLOBAL g, int pos) { + Fpos = pos; + + if (fseek(BStream, Fpos, SEEK_SET)) { + sprintf(g->Message, MSG(FSETPOS_ERROR), Fpos); + return true; + } // endif + + Placed = true; + return false; +} // end of SetPos + +/***********************************************************************/ +/* Record file position in case of UPDATE or DELETE. */ +/***********************************************************************/ +bool BINFAM::RecordPos(PGLOBAL g) { + if ((Fpos = ftell(BStream)) < 0) { + sprintf(g->Message, MSG(FTELL_ERROR), 0, strerror(errno)); + // strcat(g->Message, " (possible wrong ENDING option value)"); + return true; + } // endif Fpos + + return false; +} // end of RecordPos + +/***********************************************************************/ +/* ReadBuffer: Read one line for a text file. */ +/***********************************************************************/ +int BINFAM::ReadBuffer(PGLOBAL g) { + int rc; + + if (!BStream) + return RC_EF; + + xtrc(2, "ReadBuffer: Tdbp=%p To_Line=%p Placed=%d\n", + Tdbp, Tdbp->GetLine(), Placed); + + if (!Placed) { + /*******************************************************************/ + /* Record file position in case of UPDATE or DELETE. */ + /*******************************************************************/ + if (RecordPos(g)) + return RC_FX; + + CurBlk = (int)Rows++; + xtrc(2, "ReadBuffer: CurBlk=%d\n", CurBlk); + } else + Placed = false; + + xtrc(2, " About to read: bstream=%p To_Buf=%p Buflen=%d\n", + BStream, To_Buf, Buflen); + + // Read the prefix giving the row length + if (!fread(&Recsize, sizeof(size_t), 1, BStream)) { + if (!feof(BStream)) { + strcpy(g->Message, "Error reading line prefix\n"); + return RC_FX; + } else + return RC_EF; + + } else if (Recsize > Buflen) { + sprintf(g->Message, "Record too big (Recsize=%zd Buflen=%d)\n", Recsize, Buflen); + return RC_FX; + } // endif Recsize + + if (fread(To_Buf, Recsize, 1, BStream)) { + xtrc(2, " Read: To_Buf=%p Recsize=%zd\n", To_Buf, Recsize); + // memcpy(Tdbp->GetLine(), To_Buf, Recsize); + num_read++; + rc = RC_OK; + } else if (feof(BStream)) { + rc = RC_EF; + } else { +#if defined(__WIN__) + sprintf(g->Message, MSG(READ_ERROR), To_File, _strerror(NULL)); +#else + sprintf(g->Message, MSG(READ_ERROR), To_File, strerror(0)); +#endif + xtrc(2, "%s\n", g->Message); + rc = RC_FX; + } // endif's fread + + xtrc(2, "ReadBuffer: rc=%d\n", rc); + IsRead = true; + return rc; +} // end of ReadBuffer + +/***********************************************************************/ +/* WriteBuffer: File write routine for BIN access method. */ +/***********************************************************************/ +int BINFAM::WriteBuffer(PGLOBAL g) { + int curpos = 0; + bool moved = true; + + /*********************************************************************/ + /* Prepare writing the line. */ + /*********************************************************************/ + memcpy(To_Buf, Tdbp->GetLine(), Recsize); + + /*********************************************************************/ + /* Now start the writing process. */ + /*********************************************************************/ + if (fwrite(&Recsize, sizeof(size_t), 1, BStream) != 1) { + sprintf(g->Message, "Error %d writing prefix to %s", + errno, To_File); + return RC_FX; + } else if (fwrite(To_Buf, Recsize, 1, BStream) != 1) { + sprintf(g->Message, "Error %d writing %zd bytes to %s", + errno, Recsize, To_File); + return RC_FX; + } // endif fwrite + + xtrc(1, "write done\n"); + return RC_OK; +} // end of WriteBuffer + +/***********************************************************************/ +/* Table file close routine for DOS access method. */ +/***********************************************************************/ +void BINFAM::CloseTableFile(PGLOBAL g, bool abort) { + int rc; + + Abort = abort; + rc = PlugCloseFile(g, To_Fb); + xtrc(1, "BIN Close: closing %s rc=%d\n", To_File, rc); + BStream = NULL; // So we can know whether table is open +} // end of CloseTableFile + +/***********************************************************************/ +/* Rewind routine for BIN access method. */ +/***********************************************************************/ +void BINFAM::Rewind(void) { + if (BStream) // Can be NULL when making index on void table + rewind(BStream); + + Rows = 0; + OldBlk = CurBlk = -1; +} // end of Rewind + diff --git a/storage/connect/filamtxt.h b/storage/connect/filamtxt.h index 1fdae8fcd37..8c1fe5e7dbc 100644 --- a/storage/connect/filamtxt.h +++ b/storage/connect/filamtxt.h @@ -1,7 +1,7 @@ /************** FilAMTxt H Declares Source Code File (.H) **************/ -/* Name: FILAMTXT.H Version 1.3 */ +/* Name: FILAMTXT.H Version 1.4 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2014 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */ /* */ /* This file contains the file access method classes declares. */ /***********************************************************************/ @@ -15,6 +15,7 @@ typedef class TXTFAM *PTXF; typedef class DOSFAM *PDOSFAM; typedef class BLKFAM *PBLKFAM; +typedef class BINFAM *PBINFAM; typedef class DOSDEF *PDOSDEF; typedef class TDBDOS *PTDBDOS; @@ -210,4 +211,45 @@ class DllExport BLKFAM : public DOSFAM { bool Closing; // True when closing on Update }; // end of class BLKFAM +/***********************************************************************/ +/* This is the DOS/UNIX Access Method class declaration for binary */ +/* files with variable record format (BJSON) */ +/***********************************************************************/ +class DllExport BINFAM : public TXTFAM { +public: + // Constructor + BINFAM(PDOSDEF tdp) : TXTFAM(tdp) {BStream = NULL; Recsize = 0;} + BINFAM(PBINFAM txfp) : TXTFAM(txfp) {BStream = txfp->BStream;} + + // Implementation + virtual AMT GetAmType(void) {return TYPE_AM_BIN;} + virtual int GetPos(void); + virtual int GetNextPos(void); + virtual PTXF Duplicate(PGLOBAL g) { return (PTXF)new(g) BINFAM(this); } + + // Methods +//virtual void Reset(void) {TXTFAM::Reset();} + virtual int GetFileLength(PGLOBAL g); + virtual int Cardinality(PGLOBAL g); + virtual int MaxBlkSize(PGLOBAL g, int s) {return s;} + virtual bool AllocateBuffer(PGLOBAL g); + virtual int GetRowID(void); + virtual bool RecordPos(PGLOBAL g); + virtual bool SetPos(PGLOBAL g, int recpos); + virtual int SkipRecord(PGLOBAL g, bool header) {return 0;} + virtual bool OpenTableFile(PGLOBAL g); + virtual int ReadBuffer(PGLOBAL g); + virtual int WriteBuffer(PGLOBAL g); + virtual int DeleteRecords(PGLOBAL g, int irc) {return RC_FX;} + virtual void CloseTableFile(PGLOBAL g, bool abort); + virtual void Rewind(void); + +protected: +//virtual int InitDelete(PGLOBAL g, int fpos, int spos); + + // Members + FILE *BStream; // Points to Bin file structure + size_t Recsize; // Length of last read record +}; // end of class BINFAM + #endif // __FILAMTXT_H diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp index 6d0779b150a..bf4ef8557ad 100644 --- a/storage/connect/filamvct.cpp +++ b/storage/connect/filamvct.cpp @@ -1327,7 +1327,7 @@ VCMFAM::VCMFAM(PVCMFAM txfp) : VCTFAM(txfp) bool VCMFAM::OpenTableFile(PGLOBAL g) { char filename[_MAX_PATH]; - int len; + size_t len; MODE mode = Tdbp->GetMode(); PFBLOCK fp = NULL; PDBUSER dbuserp = (PDBUSER)g->Activityp->Aptr; @@ -1421,9 +1421,9 @@ bool VCMFAM::OpenTableFile(PGLOBAL g) } // endif hFile /*******************************************************************/ - /* Get the file size (assuming file is smaller than 4 GB) */ + /* Get the file size. */ /*******************************************************************/ - len = mm.lenL; + len = (size_t)mm.sz.QuadPart; Memory = (char *)mm.memory; if (!len) { // Empty or deleted file @@ -2762,7 +2762,7 @@ bool VMPFAM::OpenTableFile(PGLOBAL g) bool VMPFAM::MapColumnFile(PGLOBAL g, MODE mode, int i) { char filename[_MAX_PATH]; - int len; + size_t len; HANDLE hFile; MEMMAP mm; PFBLOCK fp; @@ -2816,7 +2816,7 @@ bool VMPFAM::MapColumnFile(PGLOBAL g, MODE mode, int i) /*****************************************************************/ /* Get the file size (assuming file is smaller than 4 GB) */ /*****************************************************************/ - len = mm.lenL; + len = (size_t)mm.sz.QuadPart; Memcol[i] = (char *)mm.memory; if (!len) { // Empty or deleted file diff --git a/storage/connect/global.h b/storage/connect/global.h index d17620861fa..294ad0e1d7b 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -240,10 +240,14 @@ inline void* MakePtr(void* memp, size_t offset) { /* This routine makes an offset from a pointer new format. */ /***********************************************************************/ inline size_t MakeOff(void* memp, void* ptr) { + if (ptr) { #if defined(_DEBUG) - assert(ptr > memp); + assert(ptr > memp); #endif // _DEBUG - return ((!ptr) ? 0 : (size_t)((char*)ptr - (size_t)memp)); + return (size_t)((char*)ptr - (size_t)memp); + } else + return 0; + } /* end of MakeOff */ /*-------------------------- End of Global.H --------------------------*/ diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index a3dfc50562d..859d50b9a2c 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -1574,6 +1574,7 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) // Now get column information pcf->Name= (char*)fp->field_name; + chset = (char*)fp->charset()->name; if (fop && fop->special) { pcf->Fieldfmt= (char*)fop->special; @@ -1584,8 +1585,15 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) pcf->Scale= 0; pcf->Opt= (fop) ? (int)fop->opt : 0; - if ((pcf->Length= fp->field_length) < 0) - pcf->Length= 256; // BLOB? + if (fp->field_length >= 0) { + pcf->Length = fp->field_length; + + // length is bytes for Connect, not characters + if (!strnicmp(chset, "utf8", 4)) + pcf->Length /= 3; + + } else + pcf->Length= 256; // BLOB? pcf->Precision= pcf->Length; @@ -1602,8 +1610,6 @@ void *ha_connect::GetColumnOption(PGLOBAL g, void *field, PCOLINFO pcf) pcf->Fieldfmt= NULL; } // endif fop - chset= (char *)fp->charset()->name; - if (!strcmp(chset, "binary")) v = 'B'; // Binary string @@ -4940,11 +4946,11 @@ int ha_connect::external_lock(THD *thd, int lock_type) // Here we do make the new indexes if (tdp->MakeIndex(g, adp, true) == RC_FX) { // Make it a warning to avoid crash - push_warning(thd, Sql_condition::WARN_LEVEL_WARN, - 0, g->Message); - rc= 0; - //my_message(ER_UNKNOWN_ERROR, g->Message, MYF(0)); - //rc= HA_ERR_INTERNAL_ERROR; + //push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + // 0, g->Message); + //rc= 0; + my_message(ER_TOO_MANY_KEYS, g->Message, MYF(0)); + rc= HA_ERR_INDEX_CORRUPT; } // endif MakeIndex } else if (tdbp->GetDef()->Indexable() == 3) { diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index ea3ea18da0b..ce3ddd865a5 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -1,7 +1,7 @@ /*************** json CPP Declares Source Code File (.H) ***************/ -/* Name: json.cpp Version 1.4 */ +/* Name: json.cpp Version 1.5 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2014 - 2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */ /* */ /* This file contains the JSON classes functions. */ /***********************************************************************/ @@ -21,7 +21,7 @@ #include "plgdbsem.h" #include "json.h" -#define ARGS MY_MIN(24,len-i),s+MY_MAX(i-3,0) +#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0) #if defined(__WIN__) #define EL "\r\n" @@ -38,16 +38,16 @@ class SE_Exception { public: - SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {} - ~SE_Exception() {} + SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {} + ~SE_Exception() {} - unsigned int nSE; - PEXCEPTION_RECORD eRec; + unsigned int nSE; + PEXCEPTION_RECORD eRec; }; // end of class SE_Exception void trans_func(unsigned int u, _EXCEPTION_POINTERS* pExp) { - throw SE_Exception(u, pExp->ExceptionRecord); + throw SE_Exception(u, pExp->ExceptionRecord); } // end of trans_func char *GetExceptionDesc(PGLOBAL g, unsigned int e); @@ -58,46 +58,58 @@ char *GetJsonNull(void); /***********************************************************************/ /* IsNum: check whether this string is all digits. */ /***********************************************************************/ -bool IsNum(PSZ s) -{ - for (char *p = s; *p; p++) - if (*p == ']') - break; - else if (!isdigit(*p) || *p == '-') - return false; +bool IsNum(PSZ s) { + for (char* p = s; *p; p++) + if (*p == ']') + break; + else if (!isdigit(*p) || *p == '-') + return false; - return true; -} // end of IsNum + return true; +} // end of IsNum /***********************************************************************/ /* NextChr: return the first found '[' or Sep pointer. */ /***********************************************************************/ -char *NextChr(PSZ s, char sep) +char* NextChr(PSZ s, char sep) { - char *p1 = strchr(s, '['); - char *p2 = strchr(s, sep); + char* p1 = strchr(s, '['); + char* p2 = strchr(s, sep); - if (!p2) - return p1; - else if (p1) - return MY_MIN(p1, p2); + if (!p2) + return p1; + else if (p1) + return MY_MIN(p1, p2); - return p2; -} // end of NextChr + return p2; +} // end of NextChr +/***********************************************************************/ +/* Allocate a VAL structure, make sure common field and Nd are zeroed. */ +/***********************************************************************/ +PVL AllocVal(PGLOBAL g, JTYP type) +{ + PVL vlp = (PVL)PlugSubAlloc(g, NULL, sizeof(VAL)); + + vlp->LLn = 0; + vlp->Nd = 0; + vlp->Type = type; + return vlp; +} // end of AllocVal /***********************************************************************/ /* Parse a json string. */ /* Note: when pretty is not known, the caller set pretty to 3. */ /***********************************************************************/ -PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) +PJSON ParseJson(PGLOBAL g, char* s, size_t len, int* ptyp, bool* comma) { - int i, pretty = (ptyp) ? *ptyp : 3; - bool b = false, pty[3] = {true,true,true}; - PJSON jsp = NULL, jp = NULL; + int i, pretty = (ptyp) ? *ptyp : 3; + bool b = false, pty[3] = { true,true,true }; + PJSON jsp = NULL; + PJDOC jdp = NULL; - if (trace(1)) - htrc("ParseJson: s=%.10s len=%d\n", s, len); + if (trace(1)) + htrc("ParseJson: s=%.10s len=%zd\n", s, len); if (!s || !len) { strcpy(g->Message, "Void JSON object"); @@ -105,116 +117,388 @@ PJSON ParseJson(PGLOBAL g, char *s, int len, int *ptyp, bool *comma) } else if (comma) *comma = false; - // Trying to guess the pretty format - if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n'))) - pty[0] = false; + // Trying to guess the pretty format + if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n'))) + pty[0] = false; - try { - jp = new(g) JSON(); - jp->s = s; - jp->len = len; - jp->pty = pty; + try { + jdp = new(g) JDOC; + jdp->s = s; + jdp->len = len; + jdp->pty = pty; - for (i = 0; i < jp->len; i++) - switch (s[i]) { - case '[': - if (jsp) - jsp = jp->ParseAsArray(g, i, pretty, ptyp); - else - jsp = jp->ParseArray(g, ++i); + for (i = 0; i < jdp->len; i++) + switch (s[i]) { + case '[': + if (jsp) + jsp = jdp->ParseAsArray(g, i, pretty, ptyp); + else + jsp = jdp->ParseArray(g, ++i); - break; - case '{': - if (jsp) - jsp = jp->ParseAsArray(g, i, pretty, ptyp); - else if (!(jsp = jp->ParseObject(g, ++i))) - throw 2; + break; + case '{': + if (jsp) + jsp = jdp->ParseAsArray(g, i, pretty, ptyp); + else if (!(jsp = jdp->ParseObject(g, ++i))) + throw 2; - break; - case ' ': - case '\t': - case '\n': - case '\r': - break; - case ',': - if (jsp && (pretty == 1 || pretty == 3)) { - if (comma) - *comma = true; + break; + case ' ': + case '\t': + case '\n': + case '\r': + break; + case ',': + if (jsp && (pretty == 1 || pretty == 3)) { + if (comma) + *comma = true; - pty[0] = pty[2] = false; - break; - } // endif pretty + pty[0] = pty[2] = false; + break; + } // endif pretty - sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty); - throw 3; - case '(': - b = true; - break; - case ')': - if (b) { - b = false; - break; - } // endif b + sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty); + throw 3; + case '(': + b = true; + break; + case ')': + if (b) { + b = false; + break; + } // endif b - default: - if (jsp) - jsp = jp->ParseAsArray(g, i, pretty, ptyp); - else if (!(jsp = jp->ParseValue(g, i))) - throw 4; + default: + if (jsp) + jsp = jdp->ParseAsArray(g, i, pretty, ptyp); + else if (!(jsp = jdp->ParseValue(g, i))) + throw 4; - break; - }; // endswitch s[i] + break; + }; // endswitch s[i] - if (!jsp) - sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN(len, 50), s); - else if (ptyp && pretty == 3) { - *ptyp = 3; // Not recognized pretty + if (!jsp) + sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN((int)len, 50), s); + else if (ptyp && pretty == 3) { + *ptyp = 3; // Not recognized pretty - for (i = 0; i < 3; i++) - if (pty[i]) { - *ptyp = i; - break; - } // endif pty + for (i = 0; i < 3; i++) + if (pty[i]) { + *ptyp = i; + break; + } // endif pty - } // endif ptyp + } // endif ptyp - } catch (int n) { - if (trace(1)) - htrc("Exception %d: %s\n", n, g->Message); - jsp = NULL; - } catch (const char *msg) { - strcpy(g->Message, msg); - jsp = NULL; - } // end catch + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + jsp = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + jsp = NULL; + } // end catch - return jsp; + return jsp; } // end of ParseJson +/***********************************************************************/ +/* Serialize a JSON document tree: */ +/***********************************************************************/ +PSZ Serialize(PGLOBAL g, PJSON jsp, char* fn, int pretty) { + PSZ str = NULL; + bool b = false, err = true; + JOUT* jp; + FILE* fs = NULL; + PJDOC jdp = NULL; + + g->Message[0] = 0; + + try { + jdp = new(g) JDOC; // MUST BE ALLOCATED BEFORE jp !!!!! + + if (!jsp) { + strcpy(g->Message, "Null json tree"); + throw 1; + } else if (!fn) { + // Serialize to a string + jp = new(g) JOUTSTR(g); + b = pretty == 1; + } else { + if (!(fs = fopen(fn, "wb"))) { + sprintf(g->Message, MSG(OPEN_MODE_ERROR), + "w", (int)errno, fn); + strcat(strcat(g->Message, ": "), strerror(errno)); + throw 2; + } else if (pretty >= 2) { + // Serialize to a pretty file + jp = new(g)JOUTPRT(g, fs); + } else { + // Serialize to a flat file + b = true; + jp = new(g)JOUTFILE(g, fs, pretty); + } // endif's + + } // endif's + + jdp->SetJp(jp); + + switch (jsp->GetType()) { + case TYPE_JAR: + err = jdp->SerializeArray((PJAR)jsp, b); + break; + case TYPE_JOB: + err = ((b && jp->Prty()) && jp->WriteChr('\t')); + err |= jdp->SerializeObject((PJOB)jsp); + break; + case TYPE_JVAL: + err = jdp->SerializeValue((PJVAL)jsp); + break; + default: + strcpy(g->Message, "Invalid json tree"); + } // endswitch Type + + if (fs) { + fputs(EL, fs); + fclose(fs); + str = (err) ? NULL : strcpy(g->Message, "Ok"); + } else if (!err) { + str = ((JOUTSTR*)jp)->Strp; + jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N); + } else { + if (!g->Message[0]) + strcpy(g->Message, "Error in Serialize"); + + } // endif's + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + str = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + str = NULL; + } // end catch + + return str; +} // end of Serialize + + +/* -------------------------- Class JOUTSTR -------------------------- */ + +/***********************************************************************/ +/* JOUTSTR constructor. */ +/***********************************************************************/ +JOUTSTR::JOUTSTR(PGLOBAL g) : JOUT(g) { + PPOOLHEADER pph = (PPOOLHEADER)g->Sarea; + + N = 0; + Max = pph->FreeBlk; + Max = (Max > 32) ? Max - 32 : Max; + Strp = (char*)PlugSubAlloc(g, NULL, 0); // Size not know yet +} // end of JOUTSTR constructor + +/***********************************************************************/ +/* Concatenate a string to the Serialize string. */ +/***********************************************************************/ +bool JOUTSTR::WriteStr(const char* s) { + if (s) { + size_t len = strlen(s); + + if (N + len > Max) + return true; + + memcpy(Strp + N, s, len); + N += len; + return false; + } else + return true; + +} // end of WriteStr + +/***********************************************************************/ +/* Concatenate a character to the Serialize string. */ +/***********************************************************************/ +bool JOUTSTR::WriteChr(const char c) { + if (N + 1 > Max) + return true; + + Strp[N++] = c; + return false; +} // end of WriteChr + +/***********************************************************************/ +/* Escape and Concatenate a string to the Serialize string. */ +/***********************************************************************/ +bool JOUTSTR::Escape(const char* s) { + WriteChr('"'); + + for (unsigned int i = 0; s[i]; i++) + switch (s[i]) { + case '"': + case '\\': + case '\t': + case '\n': + case '\r': + case '\b': + case '\f': WriteChr('\\'); + // fall through + default: + WriteChr(s[i]); + break; + } // endswitch s[i] + + WriteChr('"'); + return false; +} // end of Escape + +/* ------------------------- Class JOUTFILE -------------------------- */ + +/***********************************************************************/ +/* Write a string to the Serialize file. */ +/***********************************************************************/ +bool JOUTFILE::WriteStr(const char* s) { + // This is temporary + fputs(s, Stream); + return false; +} // end of WriteStr + +/***********************************************************************/ +/* Write a character to the Serialize file. */ +/***********************************************************************/ +bool JOUTFILE::WriteChr(const char c) { + // This is temporary + fputc(c, Stream); + return false; +} // end of WriteChr + +/***********************************************************************/ +/* Escape and Concatenate a string to the Serialize string. */ +/***********************************************************************/ +bool JOUTFILE::Escape(const char* s) { + // This is temporary + fputc('"', Stream); + + for (unsigned int i = 0; s[i]; i++) + switch (s[i]) { + case '"': fputs("\\\"", Stream); break; + case '\\': fputs("\\\\", Stream); break; + case '\t': fputs("\\t", Stream); break; + case '\n': fputs("\\n", Stream); break; + case '\r': fputs("\\r", Stream); break; + case '\b': fputs("\\b", Stream); break; + case '\f': fputs("\\f", Stream); break; + default: + fputc(s[i], Stream); + break; + } // endswitch s[i] + + fputc('"', Stream); + return false; +} // end of Escape + +/* ------------------------- Class JOUTPRT --------------------------- */ + +/***********************************************************************/ +/* Write a string to the Serialize pretty file. */ +/***********************************************************************/ +bool JOUTPRT::WriteStr(const char* s) { + // This is temporary + if (B) { + fputs(EL, Stream); + M--; + + for (int i = 0; i < M; i++) + fputc('\t', Stream); + + B = false; + } // endif B + + fputs(s, Stream); + return false; +} // end of WriteStr + +/***********************************************************************/ +/* Write a character to the Serialize pretty file. */ +/***********************************************************************/ +bool JOUTPRT::WriteChr(const char c) { + switch (c) { + case ':': + fputs(": ", Stream); + break; + case '{': + case '[': +#if 0 + if (M) + fputs(EL, Stream); + + for (int i = 0; i < M; i++) + fputc('\t', Stream); +#endif // 0 + + fputc(c, Stream); + fputs(EL, Stream); + M++; + + for (int i = 0; i < M; i++) + fputc('\t', Stream); + + break; + case '}': + case ']': + M--; + fputs(EL, Stream); + + for (int i = 0; i < M; i++) + fputc('\t', Stream); + + fputc(c, Stream); + B = true; + break; + case ',': + fputc(c, Stream); + fputs(EL, Stream); + + for (int i = 0; i < M; i++) + fputc('\t', Stream); + + B = false; + break; + default: + fputc(c, Stream); + } // endswitch c + + return false; +} // end of WriteChr + +/* --------------------------- Class JDOC ---------------------------- */ + /***********************************************************************/ /* Parse several items as being in an array. */ /***********************************************************************/ -PJAR JSON::ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp) +PJAR JDOC::ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp) { - if (pty[0] && (!pretty || pretty > 2)) { - PJAR jsp; + if (pty[0] && (!pretty || pretty > 2)) { + PJAR jsp; - if ((jsp = ParseArray(g, (i = 0))) && ptyp && pretty == 3) - *ptyp = (pty[0]) ? 0 : 3; + if ((jsp = ParseArray(g, (i = 0))) && ptyp && pretty == 3) + *ptyp = (pty[0]) ? 0 : 3; - return jsp; - } else - strcpy(g->Message, "More than one item in file"); + return jsp; + } else + strcpy(g->Message, "More than one item in file"); - return NULL; + return NULL; } // end of ParseAsArray /***********************************************************************/ /* Parse a JSON Array. */ /***********************************************************************/ -PJAR JSON::ParseArray(PGLOBAL g, int& i) +PJAR JDOC::ParseArray(PGLOBAL g, int& i) { - int level = 0; - bool b = (!i); + int level = 0; + bool b = (!i); PJAR jarp = new(g) JARRAY; for (; i < len; i++) @@ -235,11 +519,11 @@ PJAR JSON::ParseArray(PGLOBAL g, int& i) jarp->InitArray(g); return jarp; - case '\n': - if (!b) - pty[0] = pty[1] = false; - case '\r': - case ' ': + case '\n': + if (!b) + pty[0] = pty[1] = false; + case '\r': + case ' ': case '\t': break; default: @@ -253,11 +537,11 @@ PJAR JSON::ParseArray(PGLOBAL g, int& i) break; }; // endswitch s[i] - if (b) { - // Case of Pretty == 0 - jarp->InitArray(g); - return jarp; - } // endif b + if (b) { + // Case of Pretty == 0 + jarp->InitArray(g); + return jarp; + } // endif b throw ("Unexpected EOF in array"); } // end of ParseArray @@ -265,7 +549,7 @@ PJAR JSON::ParseArray(PGLOBAL g, int& i) /***********************************************************************/ /* Parse a JSON Object. */ /***********************************************************************/ -PJOB JSON::ParseObject(PGLOBAL g, int& i) +PJOB JDOC::ParseObject(PGLOBAL g, int& i) { PSZ key; int level = 0; @@ -276,7 +560,7 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i) switch (s[i]) { case '"': if (level < 2) { - key = ParseString(g, ++i); + key = ParseString(g, ++i); jpp = jobp->AddPair(g, key); level = 1; } else { @@ -287,7 +571,7 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i) break; case ':': if (level == 1) { - jpp->Val = ParseValue(g, ++i); + jpp->Val = ParseValue(g, ++i); level = 2; } else { sprintf(g->Message, "Unexpected ':' near %.*s", ARGS); @@ -310,10 +594,10 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i) } // endif level return jobp; - case '\n': - pty[0] = pty[1] = false; - case '\r': - case ' ': + case '\n': + pty[0] = pty[1] = false; + case '\r': + case ' ': case '\t': break; default: @@ -329,38 +613,38 @@ PJOB JSON::ParseObject(PGLOBAL g, int& i) /***********************************************************************/ /* Parse a JSON Value. */ /***********************************************************************/ -PJVAL JSON::ParseValue(PGLOBAL g, int& i) +PJVAL JDOC::ParseValue(PGLOBAL g, int& i) { - int n; PJVAL jvp = new(g) JVALUE; for (; i < len; i++) - switch (s[i]) { - case '\n': - pty[0] = pty[1] = false; - case '\r': - case ' ': - case '\t': - break; - default: - goto suite; - } // endswitch + switch (s[i]) { + case '\n': + pty[0] = pty[1] = false; + case '\r': + case ' ': + case '\t': + break; + default: + goto suite; + } // endswitch suite: switch (s[i]) { case '[': - jvp->Jsp = ParseArray(g, ++i); + jvp->Jsp = ParseArray(g, ++i); break; case '{': - jvp->Jsp = ParseObject(g, ++i); + jvp->Jsp = ParseObject(g, ++i); break; case '"': - jvp->Value = AllocateValue(g, ParseString(g, ++i), TYPE_STRING); + jvp->Val = AllocVal(g, TYPE_STRG); + jvp->Val->Strp = ParseString(g, ++i); break; case 't': if (!strncmp(s + i, "true", 4)) { - n = 1; - jvp->Value = AllocateValue(g, &n, TYPE_TINY); + jvp->Val = AllocVal(g, TYPE_BOOL); + jvp->Val->B = true; i += 3; } else goto err; @@ -368,8 +652,8 @@ PJVAL JSON::ParseValue(PGLOBAL g, int& i) break; case 'f': if (!strncmp(s + i, "false", 5)) { - n = 0; - jvp->Value = AllocateValue(g, &n, TYPE_TINY); + jvp->Val = AllocVal(g, TYPE_BOOL); + jvp->Val->B = false; i += 4; } else goto err; @@ -385,7 +669,7 @@ PJVAL JSON::ParseValue(PGLOBAL g, int& i) case '-': default: if (s[i] == '-' || isdigit(s[i])) - jvp->Value = ParseNumeric(g, i); + jvp->Val = ParseNumeric(g, i); else goto err; @@ -401,7 +685,7 @@ err: /***********************************************************************/ /* Unescape and parse a JSON string. */ /***********************************************************************/ -char *JSON::ParseString(PGLOBAL g, int& i) +char *JDOC::ParseString(PGLOBAL g, int& i) { uchar *p; int n = 0; @@ -488,15 +772,15 @@ char *JSON::ParseString(PGLOBAL g, int& i) /***********************************************************************/ /* Parse a JSON numeric value. */ /***********************************************************************/ -PVAL JSON::ParseNumeric(PGLOBAL g, int& i) +PVL JDOC::ParseNumeric(PGLOBAL g, int& i) { char buf[50]; int n = 0; short nd = 0; - bool has_dot = false; - bool has_e = false; - bool found_digit = false; - PVAL valp = NULL; + bool has_dot = false; + bool has_e = false; + bool found_digit = false; + PVL vlp = NULL; for (; i < len; i++) { switch (s[i]) { @@ -545,15 +829,24 @@ PVAL JSON::ParseNumeric(PGLOBAL g, int& i) if (has_dot || has_e) { double dv = strtod(buf, NULL); - valp = AllocateValue(g, &dv, TYPE_DOUBLE, nd); + vlp = AllocVal(g, TYPE_DBL); + vlp->F = dv; + vlp->Nd = nd; } else { long long iv = strtoll(buf, NULL, 10); - valp = AllocateValue(g, &iv, TYPE_BIGINT); + if (iv > INT_MAX32 || iv < INT_MIN32) { + vlp = AllocVal(g, TYPE_BINT); + vlp->LLn = iv; + } else { + vlp = AllocVal(g, TYPE_INTG); + vlp->N = (int)iv; + } // endif iv + } // endif has i--; // Unstack following character - return valp; + return vlp; } else throw("No digit found"); @@ -561,138 +854,60 @@ PVAL JSON::ParseNumeric(PGLOBAL g, int& i) throw("Unexpected EOF in number"); } // end of ParseNumeric -/***********************************************************************/ -/* Serialize a JSON tree: */ -/***********************************************************************/ -PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty) -{ - PSZ str = NULL; - bool b = false, err = true; - JOUT *jp; - FILE *fs = NULL; - - g->Message[0] = 0; - - try { - if (!jsp) { - strcpy(g->Message, "Null json tree"); - throw 1; - } else if (!fn) { - // Serialize to a string - jp = new(g) JOUTSTR(g); - b = pretty == 1; - } else { - if (!(fs = fopen(fn, "wb"))) { - sprintf(g->Message, MSG(OPEN_MODE_ERROR), - "w", (int)errno, fn); - strcat(strcat(g->Message, ": "), strerror(errno)); - throw 2; - } else if (pretty >= 2) { - // Serialize to a pretty file - jp = new(g)JOUTPRT(g, fs); - } else { - // Serialize to a flat file - b = true; - jp = new(g)JOUTFILE(g, fs, pretty); - } // endif's - - } // endif's - - switch (jsp->GetType()) { - case TYPE_JAR: - err = SerializeArray(jp, (PJAR)jsp, b); - break; - case TYPE_JOB: - err = ((b && jp->Prty()) && jp->WriteChr('\t')); - err |= SerializeObject(jp, (PJOB)jsp); - break; - case TYPE_JVAL: - err = SerializeValue(jp, (PJVAL)jsp); - break; - default: - strcpy(g->Message, "Invalid json tree"); - } // endswitch Type - - if (fs) { - fputs(EL, fs); - fclose(fs); - str = (err) ? NULL : strcpy(g->Message, "Ok"); - } else if (!err) { - str = ((JOUTSTR*)jp)->Strp; - jp->WriteChr('\0'); - PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N); - } else { - if (!g->Message[0]) - strcpy(g->Message, "Error in Serialize"); - - } // endif's - - } catch (int n) { - if (trace(1)) - htrc("Exception %d: %s\n", n, g->Message); - str = NULL; - } catch (const char *msg) { - strcpy(g->Message, msg); - str = NULL; - } // end catch - - return str; -} // end of Serialize - /***********************************************************************/ /* Serialize a JSON Array. */ /***********************************************************************/ -bool SerializeArray(JOUT *js, PJAR jarp, bool b) +bool JDOC::SerializeArray(PJAR jarp, bool b) { bool first = true; - if (b) { - if (js->Prty()) { - if (js->WriteChr('[')) - return true; - else if (js->Prty() == 1 && (js->WriteStr(EL) || js->WriteChr('\t'))) - return true; + if (b) { + if (js->Prty()) { + if (js->WriteChr('[')) + return true; + else if (js->Prty() == 1 && (js->WriteStr(EL) || js->WriteChr('\t'))) + return true; - } // endif Prty + } // endif Prty - } else if (js->WriteChr('[')) - return true; + } else if (js->WriteChr('[')) + return true; for (int i = 0; i < jarp->size(); i++) { if (first) first = false; - else if ((!b || js->Prty()) && js->WriteChr(',')) + else if ((!b || js->Prty()) && js->WriteChr(',')) return true; - else if (b) { - if (js->Prty() < 2 && js->WriteStr(EL)) - return true; - else if (js->Prty() == 1 && js->WriteChr('\t')) - return true; + else if (b) { + if (js->Prty() < 2 && js->WriteStr(EL)) + return true; + else if (js->Prty() == 1 && js->WriteChr('\t')) + return true; - } // endif b + } // endif b - if (SerializeValue(js, jarp->GetValue(i))) + if (SerializeValue(jarp->GetValue(i))) return true; } // endfor i - if (b && js->Prty() == 1 && js->WriteStr(EL)) + if (b && js->Prty() == 1 && js->WriteStr(EL)) return true; - return ((!b || js->Prty()) && js->WriteChr(']')); + return ((!b || js->Prty()) && js->WriteChr(']')); } // end of SerializeArray /***********************************************************************/ /* Serialize a JSON Object. */ /***********************************************************************/ -bool SerializeObject(JOUT *js, PJOB jobp) +bool JDOC::SerializeObject(PJOB jobp) { bool first = true; if (js->WriteChr('{')) return true; - for (PJPR pair = jobp->First; pair; pair = pair->Next) { + for (PJPR pair = jobp->GetFirst(); pair; pair = pair->Next) { if (first) first = false; else if (js->WriteChr(',')) @@ -702,7 +917,7 @@ bool SerializeObject(JOUT *js, PJOB jobp) js->WriteStr(pair->Key) || js->WriteChr('"') || js->WriteChr(':') || - SerializeValue(js, pair->Val)) + SerializeValue(pair->Val)) return true; } // endfor i @@ -713,259 +928,70 @@ bool SerializeObject(JOUT *js, PJOB jobp) /***********************************************************************/ /* Serialize a JSON Value. */ /***********************************************************************/ -bool SerializeValue(JOUT *js, PJVAL jvp) +bool JDOC::SerializeValue(PJVAL jvp) { + char buf[64]; PJAR jap; PJOB jop; - PVAL valp; + PVL vlp; if ((jap = jvp->GetArray())) - return SerializeArray(js, jap, false); + return SerializeArray(jap, false); else if ((jop = jvp->GetObject())) - return SerializeObject(js, jop); - else if (!(valp = jvp->Value) || valp->IsNull()) + return SerializeObject(jop); + else if (!(vlp = jvp->Val)) return js->WriteStr("null"); - else switch (valp->GetType()) { - case TYPE_TINY: - return js->WriteStr(valp->GetTinyValue() ? "true" : "false"); - case TYPE_STRING: - return js->Escape(valp->GetCharValue()); + else switch (vlp->Type) { + case TYPE_BOOL: + return js->WriteStr(vlp->B ? "true" : "false"); + case TYPE_STRG: + case TYPE_DTM: + return js->Escape(vlp->Strp); + case TYPE_INTG: + sprintf(buf, "%d", vlp->N); + return js->WriteStr(buf); + case TYPE_BINT: + sprintf(buf, "%lld", vlp->LLn); + return js->WriteStr(buf); + case TYPE_DBL: + sprintf(buf, "%.*lf", vlp->Nd, vlp->F); + return js->WriteStr(buf); + case TYPE_NULL: + return js->WriteStr("null"); default: - if (valp->IsTypeNum()) { - char buf[32]; + return js->WriteStr("???"); // TODO + } // endswitch Type - return js->WriteStr(valp->GetCharString(buf)); - } // endif valp - - } // endswitch Type - - strcpy(js->g->Message, "Unrecognized value"); - return true; + strcpy(js->g->Message, "Unrecognized value"); + return true; } // end of SerializeValue -/* -------------------------- Class JOUTSTR -------------------------- */ - -/***********************************************************************/ -/* JOUTSTR constructor. */ -/***********************************************************************/ -JOUTSTR::JOUTSTR(PGLOBAL g) : JOUT(g) -{ - PPOOLHEADER pph = (PPOOLHEADER)g->Sarea; - - N = 0; - Max = pph->FreeBlk; - Max = (Max > 32) ? Max - 32 : Max; - Strp = (char*)PlugSubAlloc(g, NULL, 0); // Size not know yet -} // end of JOUTSTR constructor - -/***********************************************************************/ -/* Concatenate a string to the Serialize string. */ -/***********************************************************************/ -bool JOUTSTR::WriteStr(const char *s) -{ - if (s) { - size_t len = strlen(s); - - if (N + len > Max) - return true; - - memcpy(Strp + N, s, len); - N += len; - return false; - } else - return true; - -} // end of WriteStr - -/***********************************************************************/ -/* Concatenate a character to the Serialize string. */ -/***********************************************************************/ -bool JOUTSTR::WriteChr(const char c) -{ - if (N + 1 > Max) - return true; - - Strp[N++] = c; - return false; -} // end of WriteChr - -/***********************************************************************/ -/* Escape and Concatenate a string to the Serialize string. */ -/***********************************************************************/ -bool JOUTSTR::Escape(const char *s) -{ - WriteChr('"'); - - for (unsigned int i = 0; s[i]; i++) - switch (s[i]) { - case '"': - case '\\': - case '\t': - case '\n': - case '\r': - case '\b': - case '\f': WriteChr('\\'); - // fall through - default: - WriteChr(s[i]); - break; - } // endswitch s[i] - - WriteChr('"'); - return false; -} // end of Escape - -/* ------------------------- Class JOUTFILE -------------------------- */ - -/***********************************************************************/ -/* Write a string to the Serialize file. */ -/***********************************************************************/ -bool JOUTFILE::WriteStr(const char *s) -{ - // This is temporary - fputs(s, Stream); - return false; -} // end of WriteStr - -/***********************************************************************/ -/* Write a character to the Serialize file. */ -/***********************************************************************/ -bool JOUTFILE::WriteChr(const char c) -{ - // This is temporary - fputc(c, Stream); - return false; -} // end of WriteChr - -/***********************************************************************/ -/* Escape and Concatenate a string to the Serialize string. */ -/***********************************************************************/ -bool JOUTFILE::Escape(const char *s) -{ - // This is temporary - fputc('"', Stream); - - for (unsigned int i = 0; s[i]; i++) - switch (s[i]) { - case '"': fputs("\\\"", Stream); break; - case '\\': fputs("\\\\", Stream); break; - case '\t': fputs("\\t", Stream); break; - case '\n': fputs("\\n", Stream); break; - case '\r': fputs("\\r", Stream); break; - case '\b': fputs("\\b", Stream); break; - case '\f': fputs("\\f", Stream); break; - default: - fputc(s[i], Stream); - break; - } // endswitch s[i] - - fputc('"', Stream); - return false; -} // end of Escape - -/* ------------------------- Class JOUTPRT --------------------------- */ - -/***********************************************************************/ -/* Write a string to the Serialize pretty file. */ -/***********************************************************************/ -bool JOUTPRT::WriteStr(const char *s) -{ - // This is temporary - if (B) { - fputs(EL, Stream); - M--; - - for (int i = 0; i < M; i++) - fputc('\t', Stream); - - B = false; - } // endif B - - fputs(s, Stream); - return false; -} // end of WriteStr - -/***********************************************************************/ -/* Write a character to the Serialize pretty file. */ -/***********************************************************************/ -bool JOUTPRT::WriteChr(const char c) -{ - switch (c) { - case ':': - fputs(": ", Stream); - break; - case '{': - case '[': -#if 0 - if (M) - fputs(EL, Stream); - - for (int i = 0; i < M; i++) - fputc('\t', Stream); -#endif // 0 - - fputc(c, Stream); - fputs(EL, Stream); - M++; - - for (int i = 0; i < M; i++) - fputc('\t', Stream); - - break; - case '}': - case ']': - M--; - fputs(EL, Stream); - - for (int i = 0; i < M; i++) - fputc('\t', Stream); - - fputc(c, Stream); - B = true; - break; - case ',': - fputc(c, Stream); - fputs(EL, Stream); - - for (int i = 0; i < M; i++) - fputc('\t', Stream); - - B = false; - break; - default: - fputc(c, Stream); - } // endswitch c - -return false; -} // end of WriteChr - /* -------------------------- Class JOBJECT -------------------------- */ /***********************************************************************/ /* Return the number of pairs in this object. */ /***********************************************************************/ -int JOBJECT::GetSize(bool b) -{ - if (b) { - // Return only non null pairs - int n = 0; +int JOBJECT::GetSize(bool b) { + int n = 0; - for (PJPR jpp = First; jpp; jpp = jpp->Next) - if (jpp->Val && !jpp->Val->IsNull()) - n++; + for (PJPR jpp = First; jpp; jpp = jpp->Next) + // If b return only non null pairs + if (!b || jpp->Val && !jpp->Val->IsNull()) + n++; - return n; - } else - return Size; - -} // end of GetSize + return n; +} // end of GetSize /***********************************************************************/ /* Add a new pair to an Object. */ /***********************************************************************/ PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key) { - PJPR jpp = new(g) JPAIR(key); + PJPR jpp = (PJPR)PlugSubAlloc(g, NULL, sizeof(JPAIR)); + + jpp->Key = key; + jpp->Next = NULL; + jpp->Val = NULL; if (Last) Last->Next = jpp; @@ -973,7 +999,6 @@ PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key) First = jpp; Last = jpp; - Size++; return jpp; } // end of AddPair @@ -982,13 +1007,13 @@ PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key) /***********************************************************************/ PJAR JOBJECT::GetKeyList(PGLOBAL g) { - PJAR jarp = new(g) JARRAY(); + PJAR jarp = new(g) JARRAY(); - for (PJPR jpp = First; jpp; jpp = jpp->Next) - jarp->AddValue(g, new(g) JVALUE(g, jpp->GetKey())); + for (PJPR jpp = First; jpp; jpp = jpp->Next) + jarp->AddValue(g, new(g) JVALUE(g, jpp->Key)); - jarp->InitArray(g); - return jarp; + jarp->InitArray(g); + return jarp; } // end of GetKeyList /***********************************************************************/ @@ -996,13 +1021,13 @@ PJAR JOBJECT::GetKeyList(PGLOBAL g) /***********************************************************************/ PJAR JOBJECT::GetValList(PGLOBAL g) { - PJAR jarp = new(g) JARRAY(); + PJAR jarp = new(g) JARRAY(); - for (PJPR jpp = First; jpp; jpp = jpp->Next) - jarp->AddValue(g, jpp->GetVal()); + for (PJPR jpp = First; jpp; jpp = jpp->Next) + jarp->AddValue(g, jpp->Val); - jarp->InitArray(g); - return jarp; + jarp->InitArray(g); + return jarp; } // end of GetValList /***********************************************************************/ @@ -1024,6 +1049,9 @@ PSZ JOBJECT::GetText(PGLOBAL g, PSZ text) { int n; + if (!First) + return text; + if (!text) { text = (char*)PlugSubAlloc(g, NULL, 0); text[0] = 0; @@ -1031,26 +1059,24 @@ PSZ JOBJECT::GetText(PGLOBAL g, PSZ text) } else n = 0; - if (!First && n) - return NULL; - else if (n == 1 && Size == 1 && !strcmp(First->GetKey(), "$date")) { - int i; + if (n == 1 && !First->Next && !strcmp(First->Key, "$date")) { + int i; - First->Val->GetText(g, text); - i = (text[1] == '-' ? 2 : 1); + First->Val->GetText(g, text); + i = (text[1] == '-' ? 2 : 1); - if (IsNum(text + i)) { - // Date is in milliseconds - int j = (int)strlen(text); + if (IsNum(text + i)) { + // Date is in milliseconds + int j = (int)strlen(text); - if (j >= 4 + i) - text[j - 3] = 0; // Change it to seconds - else - strcpy(text, " 0"); + if (j >= 4 + i) + text[j - 3] = 0; // Change it to seconds + else + strcpy(text, " 0"); - } // endif text + } // endif text - } else for (PJPR jp = First; jp; jp = jp->Next) + } else for (PJPR jp = First; jp; jp = jp->Next) jp->Val->GetText(g, text); if (n) @@ -1064,17 +1090,17 @@ PSZ JOBJECT::GetText(PGLOBAL g, PSZ text) /***********************************************************************/ bool JOBJECT::Merge(PGLOBAL g, PJSON jsp) { - if (jsp->GetType() != TYPE_JOB) { - strcpy(g->Message, "Second argument is not an object"); - return true; - } // endif Type + if (jsp->GetType() != TYPE_JOB) { + strcpy(g->Message, "Second argument is not an object"); + return true; + } // endif Type - PJOB jobp = (PJOB)jsp; + PJOB jobp = (PJOB)jsp; - for (PJPR jpp = jobp->First; jpp; jpp = jpp->Next) - SetValue(g, jpp->GetVal(), jpp->GetKey()); + for (PJPR jpp = jobp->First; jpp; jpp = jpp->Next) + SetValue(g, jpp->Val, jpp->Key); - return false; + return false; } // end of Marge; /***********************************************************************/ @@ -1082,7 +1108,7 @@ bool JOBJECT::Merge(PGLOBAL g, PJSON jsp) /***********************************************************************/ void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) { - PJPR jp; + PJPR jp; for (jp = First; jp; jp = jp->Next) if (!strcmp(jp->Key, key)) { @@ -1102,15 +1128,14 @@ void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) /***********************************************************************/ void JOBJECT::DeleteKey(PCSZ key) { - PJPR jp, *pjp = &First; + PJPR jp, *pjp = &First; - for (jp = First; jp; jp = jp->Next) - if (!strcmp(jp->Key, key)) { - *pjp = jp->Next; - Size--; - break; - } else - pjp = &jp->Next; + for (jp = First; jp; jp = jp->Next) + if (!strcmp(jp->Key, key)) { + *pjp = jp->Next; + break; + } else + pjp = &jp->Next; } // end of DeleteKey @@ -1133,19 +1158,19 @@ bool JOBJECT::IsNull(void) /***********************************************************************/ int JARRAY::GetSize(bool b) { - if (b) { - // Return only non null values - int n = 0; + if (b) { + // Return only non null values + int n = 0; - for (PJVAL jvp = First; jvp; jvp = jvp->Next) - if (!jvp->IsNull()) - n++; + for (PJVAL jvp = First; jvp; jvp = jvp->Next) + if (!jvp->IsNull()) + n++; - return n; - } else - return Size; + return n; + } else + return Size; -} // end of GetSize +} // end of GetSize /***********************************************************************/ /* Make the array of values from the values list. */ @@ -1166,12 +1191,12 @@ void JARRAY::InitArray(PGLOBAL g) } // endif Size for (i = 0, jvp = First; jvp; jvp = jvp->Next) - if (!jvp->Del) { - Mvals[i++] = jvp; - pjvp = &jvp->Next; - Last = jvp; - } else - *pjvp = jvp->Next; + if (!jvp->Del) { + Mvals[i++] = jvp; + pjvp = &jvp->Next; + Last = jvp; + } else + *pjvp = jvp->Next; } // end of InitArray @@ -1194,28 +1219,28 @@ PJVAL JARRAY::AddValue(PGLOBAL g, PJVAL jvp, int *x) if (!jvp) jvp = new(g) JVALUE; - if (x) { - int i = 0, n = *x; - PJVAL jp, *jpp = &First; + if (x) { + int i = 0, n = *x; + PJVAL jp, *jpp = &First; - for (jp = First; jp && i < n; i++, jp = *(jpp = &jp->Next)); + for (jp = First; jp && i < n; i++, jp = *(jpp = &jp->Next)); - (*jpp) = jvp; + (*jpp) = jvp; - if (!(jvp->Next = jp)) - Last = jvp; + if (!(jvp->Next = jp)) + Last = jvp; - } else { - if (!First) - First = jvp; - else if (Last == First) - First->Next = Last = jvp; - else - Last->Next = jvp; + } else { + if (!First) + First = jvp; + else if (Last == First) + First->Next = Last = jvp; + else + Last->Next = jvp; - Last = jvp; - Last->Next = NULL; - } // endif x + Last = jvp; + Last->Next = NULL; + } // endif x return jvp; } // end of AddValue @@ -1225,18 +1250,18 @@ PJVAL JARRAY::AddValue(PGLOBAL g, PJVAL jvp, int *x) /***********************************************************************/ bool JARRAY::Merge(PGLOBAL g, PJSON jsp) { - if (jsp->GetType() != TYPE_JAR) { - strcpy(g->Message, "Second argument is not an array"); - return true; - } // endif Type + if (jsp->GetType() != TYPE_JAR) { + strcpy(g->Message, "Second argument is not an array"); + return true; + } // endif Type - PJAR arp = (PJAR)jsp; + PJAR arp = (PJAR)jsp; - for (int i = 0; i < jsp->size(); i++) - AddValue(g, arp->GetValue(i)); + for (int i = 0; i < arp->size(); i++) + AddValue(g, arp->GetValue(i)); - InitArray(g); - return false; + InitArray(g); + return false; } // end of Merge /***********************************************************************/ @@ -1261,23 +1286,23 @@ bool JARRAY::SetValue(PGLOBAL g, PJVAL jvp, int n) /***********************************************************************/ PSZ JARRAY::GetText(PGLOBAL g, PSZ text) { - int n; - PJVAL jp; + int n; + PJVAL jp; - if (!text) { - text = (char*)PlugSubAlloc(g, NULL, 0); - text[0] = 0; - n = 1; - } else - n = 0; + if (!text) { + text = (char*)PlugSubAlloc(g, NULL, 0); + text[0] = 0; + n = 1; + } else + n = 0; - for (jp = First; jp; jp = jp->Next) - jp->GetText(g, text); + for (jp = First; jp; jp = jp->Next) + jp->GetText(g, text); - if (n) - PlugSubAlloc(g, NULL, strlen(text) + 1); + if (n) + PlugSubAlloc(g, NULL, strlen(text) + 1); - return text + n; + return text + n; } // end of GetText; /***********************************************************************/ @@ -1285,13 +1310,13 @@ PSZ JARRAY::GetText(PGLOBAL g, PSZ text) /***********************************************************************/ bool JARRAY::DeleteValue(int n) { - PJVAL jvp = GetValue(n); + PJVAL jvp = GetValue(n); - if (jvp) { - jvp->Del = true; - return false; - } else - return true; + if (jvp) { + jvp->Del = true; + return false; + } else + return true; } // end of DeleteValue @@ -1314,28 +1339,41 @@ bool JARRAY::IsNull(void) /***********************************************************************/ JVALUE::JVALUE(PJSON jsp) : JSON() { - if (jsp->GetType() == TYPE_JVAL) { - Jsp = jsp->GetJsp(); - Value = jsp->GetValue(); - } else { - Jsp = jsp; - Value = NULL; - } // endif Type + if (jsp->GetType() == TYPE_JVAL) { + Jsp = jsp->GetJsp(); + Val = ((PJVAL)jsp)->GetVal(); + } else { + Jsp = jsp; + Val = NULL; + } // endif Type - Next = NULL; - Del = false; - Size = 1; -} // end of JVALUE constructor + Next = NULL; + Del = false; + Type = TYPE_JVAL; +} // end of JVALUE constructor + +/***********************************************************************/ +/* Constructor for a Val with a given string or numeric value. */ +/***********************************************************************/ +JVALUE::JVALUE(PGLOBAL g, PVL vlp) : JSON() +{ + Jsp = NULL; + Val = vlp; + Next = NULL; + Del = false; + Type = TYPE_JVAL; +} // end of JVALUE constructor /***********************************************************************/ /* Constructor for a Value with a given string or numeric value. */ /***********************************************************************/ -JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() -{ +JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() { Jsp = NULL; - Value = AllocateValue(g, valp); + Val = NULL; + SetValue(g, valp); Next = NULL; Del = false; + Type = TYPE_JVAL; } // end of JVALUE constructor /***********************************************************************/ @@ -1343,10 +1381,12 @@ JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() /***********************************************************************/ JVALUE::JVALUE(PGLOBAL g, PCSZ strp) : JSON() { - Jsp = NULL; - Value = AllocateValue(g, (void*)strp, TYPE_STRING); - Next = NULL; - Del = false; + Jsp = NULL; + Val = AllocVal(g, TYPE_STRG); + Val->Strp = (char*)strp; + Next = NULL; + Del = false; + Type = TYPE_JVAL; } // end of JVALUE constructor /***********************************************************************/ @@ -1356,8 +1396,8 @@ JTYP JVALUE::GetValType(void) { if (Jsp) return Jsp->GetType(); - else if (Value) - return (JTYP)Value->GetType(); + else if (Val) + return Val->Type; else return TYPE_NULL; @@ -1385,12 +1425,42 @@ PJAR JVALUE::GetArray(void) return NULL; } // end of GetArray +/***********************************************************************/ +/* Return the Value's as a Value class. */ +/***********************************************************************/ +PVAL JVALUE::GetValue(PGLOBAL g) +{ + PVAL valp = NULL; + + if (Val) + if (Val->Type == TYPE_STRG) + valp = AllocateValue(g, Val->Strp, Val->Type, Val->Nd); + else + valp = AllocateValue(g, Val, Val->Type, Val->Nd); + + return valp; +} // end of GetValue + /***********************************************************************/ /* Return the Value's Integer value. */ /***********************************************************************/ -int JVALUE::GetInteger(void) -{ - return (Value) ? Value->GetIntValue() : 0; +int JVALUE::GetInteger(void) { + int n; + + if (!Val) + n = 0; + else switch (Val->Type) { + case TYPE_INTG: n = Val->N; break; + case TYPE_DBL: n = (int)Val->F; break; + case TYPE_DTM: + case TYPE_STRG: n = atoi(Val->Strp); break; + case TYPE_BOOL: n = (Val->B) ? 1 : 0; break; + case TYPE_BINT: n = (int)Val->LLn; break; + default: + n = 0; + } // endswitch Type + + return n; } // end of GetInteger /***********************************************************************/ @@ -1398,7 +1468,22 @@ int JVALUE::GetInteger(void) /***********************************************************************/ long long JVALUE::GetBigint(void) { - return (Value) ? Value->GetBigintValue() : 0; + long long lln; + + if (!Val) + lln = 0; + else switch (Val->Type) { + case TYPE_BINT: lln = Val->LLn; break; + case TYPE_INTG: lln = (long long)Val->N; break; + case TYPE_DBL: lln = (long long)Val->F; break; + case TYPE_DTM: + case TYPE_STRG: lln = atoll(Val->Strp); break; + case TYPE_BOOL: lln = (Val->B) ? 1 : 0; break; + default: + lln = 0; + } // endswitch Type + + return lln; } // end of GetBigint /***********************************************************************/ @@ -1406,7 +1491,22 @@ long long JVALUE::GetBigint(void) /***********************************************************************/ double JVALUE::GetFloat(void) { - return (Value) ? Value->GetFloatValue() : 0.0; + double d; + + if (!Val) + d = 0.0; + else switch (Val->Type) { + case TYPE_DBL: d = Val->F; break; + case TYPE_BINT: d = (double)Val->LLn; break; + case TYPE_INTG: d = (double)Val->N; break; + case TYPE_DTM: + case TYPE_STRG: d = atof(Val->Strp); break; + case TYPE_BOOL: d = (Val->B) ? 1.0 : 0.0; break; + default: + d = 0.0; + } // endswitch Type + + return d; } // end of GetFloat /***********************************************************************/ @@ -1414,18 +1514,38 @@ double JVALUE::GetFloat(void) /***********************************************************************/ PSZ JVALUE::GetString(PGLOBAL g) { - char *p; + char buf[32]; + char *p = buf; - if (Value) { - char buf[32]; + if (Val) { + switch (Val->Type) { + case TYPE_DTM: + case TYPE_STRG: + p = Val->Strp; + break; + case TYPE_INTG: + sprintf(buf, "%d", Val->N); + break; + case TYPE_BINT: + sprintf(buf, "%lld", Val->LLn); + break; + case TYPE_DBL: + sprintf(buf, "%.*lf", Val->Nd, Val->F); + break; + case TYPE_BOOL: + p = (Val->B) ? "true" : "false"; + break; + case TYPE_NULL: + p = "null"; + break; + default: + p = NULL; + } // endswitch Type - if ((p = Value->GetCharString(buf)) == buf) - p = PlugDup(g, buf); + } else + p = NULL; - } else - p = NULL; - - return p; + return p; } // end of GetString /***********************************************************************/ @@ -1436,8 +1556,7 @@ PSZ JVALUE::GetText(PGLOBAL g, PSZ text) if (Jsp) return Jsp->GetText(g, text); - char buf[32]; - PSZ s = (Value) ? Value->GetCharString(buf) : NULL; + PSZ s = (Val) ? GetString(g) : NULL; if (s) strcat(strcat(text, " "), s); @@ -1449,32 +1568,74 @@ PSZ JVALUE::GetText(PGLOBAL g, PSZ text) void JVALUE::SetValue(PJSON jsp) { - if (jsp && jsp->GetType() == TYPE_JVAL) { - Jsp = jsp->GetJsp(); - Value = jsp->GetValue(); - } else { - Jsp = jsp; - Value = NULL; - } // endif Type + if (jsp && jsp->GetType() == TYPE_JVAL) { + Jsp = jsp->GetJsp(); + Val = ((PJVAL)jsp)->GetVal(); + } else { + Jsp = jsp; + Val = NULL; + } // endif Type -} // end of SetValue; +} // end of SetValue; + +void JVALUE::SetValue(PGLOBAL g, PVAL valp) +{ + if (!Val) + Val = AllocVal(g, TYPE_VAL); + + if (!valp || valp->IsNull()) { + Val->Type = TYPE_NULL; + } else switch (valp->GetType()) { + case TYPE_STRING: + case TYPE_DATE: + Val->Strp = valp->GetCharValue(); + Val->Type = TYPE_STRG; + break; + case TYPE_DOUBLE: + case TYPE_DECIM: + Val->F = valp->GetFloatValue(); + + if (IsTypeNum(valp->GetType())) + Val->Nd = valp->GetValPrec(); + + Val->Type = TYPE_DBL; + break; + case TYPE_TINY: + Val->B = valp->GetTinyValue() != 0; + Val->Type = TYPE_BOOL; + case TYPE_INT: + Val->N = valp->GetIntValue(); + Val->Type = TYPE_INTG; + break; + case TYPE_BIGINT: + Val->LLn = valp->GetBigintValue(); + Val->Type = TYPE_BINT; + break; + default: + sprintf(g->Message, "Unsupported typ %d\n", valp->GetType()); + throw(777); + } // endswitch Type + +} // end of SetValue /***********************************************************************/ /* Set the Value's value as the given integer. */ /***********************************************************************/ void JVALUE::SetInteger(PGLOBAL g, int n) { - Value = AllocateValue(g, &n, TYPE_INT); - Jsp = NULL; + Val = AllocVal(g, TYPE_INTG); + Val->N = n; + Jsp = NULL; } // end of SetInteger /***********************************************************************/ /* Set the Value's Boolean value as a tiny integer. */ /***********************************************************************/ -void JVALUE::SetTiny(PGLOBAL g, char n) +void JVALUE::SetBool(PGLOBAL g, bool b) { - Value = AllocateValue(g, &n, TYPE_TINY); - Jsp = NULL; + Val = AllocVal(g, TYPE_BOOL); + Val->B = b; + Jsp = NULL; } // end of SetTiny /***********************************************************************/ @@ -1482,8 +1643,9 @@ void JVALUE::SetTiny(PGLOBAL g, char n) /***********************************************************************/ void JVALUE::SetBigint(PGLOBAL g, long long ll) { - Value = AllocateValue(g, &ll, TYPE_BIGINT); - Jsp = NULL; + Val = AllocVal(g, TYPE_BINT); + Val->LLn = ll; + Jsp = NULL; } // end of SetBigint /***********************************************************************/ @@ -1491,17 +1653,21 @@ void JVALUE::SetBigint(PGLOBAL g, long long ll) /***********************************************************************/ void JVALUE::SetFloat(PGLOBAL g, double f) { - Value = AllocateValue(g, &f, TYPE_DOUBLE, 6); - Jsp = NULL; + Val = AllocVal(g, TYPE_DBL); + Val->F = f; + Val->Nd = 6; + Jsp = NULL; } // end of SetFloat /***********************************************************************/ /* Set the Value's value as the given string. */ /***********************************************************************/ -void JVALUE::SetString(PGLOBAL g, PSZ s, short c) +void JVALUE::SetString(PGLOBAL g, PSZ s, int ci) { - Value = AllocateValue(g, s, TYPE_STRING, c); - Jsp = NULL; + Val = AllocVal(g, TYPE_STRG); + Val->Strp = s; + Val->Nd = ci; + Jsp = NULL; } // end of SetString /***********************************************************************/ @@ -1509,6 +1675,228 @@ void JVALUE::SetString(PGLOBAL g, PSZ s, short c) /***********************************************************************/ bool JVALUE::IsNull(void) { - return (Jsp) ? Jsp->IsNull() : (Value) ? Value->IsNull() : true; + return (Jsp) ? Jsp->IsNull() : (Val) ? Val->Type == TYPE_NULL : true; } // end of IsNull + +/* ---------------------------- Class SWAP --------------------------- */ + +/***********************************************************************/ +/* Replace all pointers by offsets or the opposite. */ +/***********************************************************************/ +void SWAP::SwapJson(PJSON jsp, bool move) +{ + if (move) + MoffJson(jsp); + else + MptrJson((PJSON)MakeOff(Base, jsp)); + + return; +} // end of SwapJson + +/***********************************************************************/ +/* Replace all pointers by offsets. */ +/***********************************************************************/ +size_t SWAP::MoffJson(PJSON jsp) { + size_t res; + + if (jsp) + switch (jsp->Type) { + case TYPE_JAR: + res = MoffArray((PJAR)jsp); + break; + case TYPE_JOB: + res = MoffObject((PJOB)jsp); + break; + case TYPE_JVAL: + res = MoffJValue((PJVAL)jsp); + break; + default: + throw "Invalid json tree"; + } // endswitch Type + + return res; +} // end of MoffJson + +/***********************************************************************/ +/* Replace all array pointers by offsets. */ +/***********************************************************************/ +size_t SWAP::MoffArray(PJAR jarp) +{ + if (jarp->First) { + for (int i = 0; i < jarp->Size; i++) + jarp->Mvals[i] = (PJVAL)MakeOff(Base, jarp->Mvals[i]); + + jarp->First = (PJVAL)MoffJValue(jarp->First); + jarp->Last = (PJVAL)MakeOff(Base, jarp->Last); + } // endif First + + return MakeOff(Base, jarp); +} // end of MoffArray + +/***********************************************************************/ +/* Replace all object pointers by offsets. */ +/***********************************************************************/ +size_t SWAP::MoffObject(PJOB jobp) { + if (jobp->First) { + jobp->First = (PJPR)MoffPair(jobp->First); + jobp->Last = (PJPR)MakeOff(Base, jobp->Last); + } // endif First + + return MakeOff(Base, jobp); +} // end of MoffObject + +/***********************************************************************/ +/* Replace all pair pointers by offsets. */ +/***********************************************************************/ +size_t SWAP::MoffPair(PJPR jpp) { + jpp->Key = (PCSZ)MakeOff(Base, (void*)jpp->Key); + + if (jpp->Val) + jpp->Val = (PJVAL)MoffJValue(jpp->Val); + + if (jpp->Next) + jpp->Next = (PJPR)MoffPair(jpp->Next); + + return MakeOff(Base, jpp); +} // end of MoffPair + +/***********************************************************************/ +/* Replace all jason value pointers by offsets. */ +/***********************************************************************/ +size_t SWAP::MoffJValue(PJVAL jvp) { + if (!jvp->Del) { + if (jvp->Jsp) + jvp->Jsp = (PJSON)MoffJson(jvp->Jsp); + + if (jvp->Val) + jvp->Val = (PVL)MoffVal(jvp->Val); + + } // endif Del + + if (jvp->Next) + jvp->Next = (PJVAL)MoffJValue(jvp->Next); + + return MakeOff(Base, jvp); +} // end of MoffJValue + +/***********************************************************************/ +/* Replace string pointers by offset. */ +/***********************************************************************/ +size_t SWAP::MoffVal(PVL vlp) { + if (vlp->Type == TYPE_STRG) + vlp->Strp = (PSZ)MakeOff(Base, (vlp->Strp)); + + return MakeOff(Base, vlp); +} // end of MoffVal + +/***********************************************************************/ +/* Replace all offsets by pointers. */ +/***********************************************************************/ +PJSON SWAP::MptrJson(PJSON ojp) { // ojp is an offset + PJSON jsp = (PJSON)MakePtr(Base, (size_t)ojp); + + if (ojp) + switch (jsp->Type) { + case TYPE_JAR: + jsp = MptrArray((PJAR)ojp); + break; + case TYPE_JOB: + jsp = MptrObject((PJOB)ojp); + break; + case TYPE_JVAL: + jsp = MptrJValue((PJVAL)ojp); + break; + default: + throw "Invalid json tree"; + } // endswitch Type + + return jsp; +} // end of MptrJson + +/***********************************************************************/ +/* Replace all array offsets by pointers. */ +/***********************************************************************/ +PJAR SWAP::MptrArray(PJAR ojar) { + PJAR jarp = (PJAR)MakePtr(Base, (size_t)ojar); + + jarp = (PJAR)new((size_t)jarp) JARRAY(NULL); + + if (jarp->First) { + for (int i = 0; i < jarp->Size; i++) + jarp->Mvals[i] = (PJVAL)MakePtr(Base, (size_t)jarp->Mvals[i]); + + jarp->First = (PJVAL)MptrJValue(jarp->First); + jarp->Last = (PJVAL)MakePtr(Base, (size_t)jarp->Last); + } // endif First + + return jarp; +} // end of MptrArray + +/***********************************************************************/ +/* Replace all object offsets by pointers. */ +/***********************************************************************/ +PJOB SWAP::MptrObject(PJOB ojob) { + PJOB jobp = (PJOB)MakePtr(Base, (size_t)ojob); + + jobp = (PJOB)new((size_t)jobp) JOBJECT(NULL); + + if (jobp->First) { + jobp->First = (PJPR)MptrPair(jobp->First); + jobp->Last = (PJPR)MakePtr(Base, (size_t)jobp->Last); + } // endif First + + return jobp; +} // end of MptrObject + +/***********************************************************************/ +/* Replace all pair offsets by pointers. */ +/***********************************************************************/ +PJPR SWAP::MptrPair(PJPR ojp) { + PJPR jpp = (PJPR)MakePtr(Base, (size_t)ojp); + + jpp->Key = (PCSZ)MakePtr(Base, (size_t)jpp->Key); + + if (jpp->Val) + jpp->Val = (PJVAL)MptrJValue(jpp->Val); + + if (jpp->Next) + jpp->Next = (PJPR)MptrPair(jpp->Next); + + return jpp; +} // end of MptrPair + +/***********************************************************************/ +/* Replace all value offsets by pointers. */ +/***********************************************************************/ +PJVAL SWAP::MptrJValue(PJVAL ojv) { + PJVAL jvp = (PJVAL)MakePtr(Base, (size_t)ojv); + + jvp = (PJVAL)new((size_t)jvp) JVALUE(0); + + if (!jvp->Del) { + if (jvp->Jsp) + jvp->Jsp = (PJSON)MptrJson(jvp->Jsp); + + if (jvp->Val) + jvp->Val = (PVL)MptrVal(jvp->Val); + + } // endif Del + + if (jvp->Next) + jvp->Next = (PJVAL)MptrJValue(jvp->Next); + + return jvp; +} // end of MptrJValue + +/***********************************************************************/ +/* Replace string offsets by a pointer. */ +/***********************************************************************/ +PVL SWAP::MptrVal(PVL ovl) { + PVL vlp = (PVL)MakePtr(Base, (size_t)ovl); + + if (vlp->Type == TYPE_STRG) + vlp->Strp = (PSZ)MakePtr(Base, (size_t)vlp->Strp); + + return vlp; +} // end of MptrValue diff --git a/storage/connect/json.h b/storage/connect/json.h index bc94b372133..c457a3fec45 100644 --- a/storage/connect/json.h +++ b/storage/connect/json.h @@ -27,27 +27,48 @@ enum JTYP {TYPE_NULL = TYPE_VOID, TYPE_JOB, TYPE_JVAL}; +class JDOC; class JOUT; class JSON; -class JMAP; class JVALUE; class JOBJECT; class JARRAY; -typedef class JPAIR *PJPR; +typedef class JDOC *PJDOC; typedef class JSON *PJSON; typedef class JVALUE *PJVAL; typedef class JOBJECT *PJOB; typedef class JARRAY *PJAR; -typedef struct { - char *str; - int len; - } STRG, *PSG; - // BSON size should be equal on Linux and Windows #define BMX 255 -typedef struct BSON* PBSON; +typedef struct BSON *PBSON; +typedef struct JPAIR *PJPR; +typedef struct VAL *PVL; + +/***********************************************************************/ +/* Structure JPAIR. The pairs of a json Object. */ +/***********************************************************************/ +struct JPAIR { + PCSZ Key; // This pair key name + PJVAL Val; // To the value of the pair + PJPR Next; // To the next pair +}; // end of struct JPAIR + +/***********************************************************************/ +/* Structure VAL (string, int, float, bool or null) */ +/***********************************************************************/ +struct VAL { + union { + char *Strp; // Ptr to a string + int N; // An integer value + long long LLn; // A big integer value + double F; // A float value + bool B; // True or false + }; + int Nd; // Decimal number + JTYP Type; // The value type +}; // end of struct VAL /***********************************************************************/ /* Structure used to return binary json to Json UDF functions. */ @@ -65,176 +86,111 @@ struct BSON { }; // end of struct BSON PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp); - +PVL AllocVal(PGLOBAL g, JTYP type); char *NextChr(PSZ s, char sep); char *GetJsonNull(void); +const char* GetFmt(int type, bool un); -PJSON ParseJson(PGLOBAL g, char* s, int n, int* prty = NULL, bool* b = NULL); +PJSON ParseJson(PGLOBAL g, char* s, size_t n, int* prty = NULL, bool* b = NULL); PSZ Serialize(PGLOBAL g, PJSON jsp, char *fn, int pretty); -bool SerializeArray(JOUT *js, PJAR jarp, bool b); -bool SerializeObject(JOUT *js, PJOB jobp); -bool SerializeValue(JOUT *js, PJVAL jvp); -char *NextChr(PSZ s, char sep); DllExport bool IsNum(PSZ s); /***********************************************************************/ -/* Class JOUT. Used by Serialize. */ +/* Class JDOC. The class for parsing and serializing json documents. */ /***********************************************************************/ -class JOUT : public BLOCK { - public: - JOUT(PGLOBAL gp) : BLOCK() {g = gp; Pretty = 3;} +class JDOC: public BLOCK { + friend PJSON ParseJson(PGLOBAL, char*, size_t, int*, bool*); + friend PSZ Serialize(PGLOBAL, PJSON, char*, int); +public: + JDOC(void) : js(NULL), s(NULL), len(0), pty(NULL) {} - virtual bool WriteStr(const char *s) = 0; - virtual bool WriteChr(const char c) = 0; - virtual bool Escape(const char *s) = 0; - int Prty(void) {return Pretty;} - - // Member - PGLOBAL g; - int Pretty; -}; // end of class JOUT - -/***********************************************************************/ -/* Class JOUTSTR. Used to Serialize to a string. */ -/***********************************************************************/ -class JOUTSTR : public JOUT { - public: - JOUTSTR(PGLOBAL g); - - virtual bool WriteStr(const char *s); - virtual bool WriteChr(const char c); - virtual bool Escape(const char *s); - - // Member - char *Strp; // The serialized string - size_t N; // Position of next char - size_t Max; // String max size -}; // end of class JOUTSTR - -/***********************************************************************/ -/* Class JOUTFILE. Used to Serialize to a file. */ -/***********************************************************************/ -class JOUTFILE : public JOUT { - public: - JOUTFILE(PGLOBAL g, FILE *str, int pty) : JOUT(g) {Stream = str; Pretty = pty;} - - virtual bool WriteStr(const char *s); - virtual bool WriteChr(const char c); - virtual bool Escape(const char *s); - - // Member - FILE *Stream; -}; // end of class JOUTFILE - -/***********************************************************************/ -/* Class JOUTPRT. Used to Serialize to a pretty file. */ -/***********************************************************************/ -class JOUTPRT : public JOUTFILE { - public: - JOUTPRT(PGLOBAL g, FILE *str) : JOUTFILE(g, str, 2) {M = 0; B = false;} - - virtual bool WriteStr(const char *s); - virtual bool WriteChr(const char c); - - // Member - int M; - bool B; -}; // end of class JOUTPRT - -/***********************************************************************/ -/* Class PAIR. The pairs of a json Object. */ -/***********************************************************************/ -class JPAIR : public BLOCK { - friend class JOBJECT; - friend class JSNX; - friend class JSON; - friend bool SerializeObject(JOUT *, PJOB); - public: - JPAIR(PCSZ key) : BLOCK() {Key = key; Val = NULL; Next = NULL;} - - inline PCSZ GetKey(void) {return Key;} - inline PJVAL GetVal(void) {return Val;} - inline PJPR GetNext(void) {return Next;} - - protected: - PCSZ Key; // This pair key name - PJVAL Val; // To the value of the pair - PJPR Next; // To the next pair -}; // end of class JPAIR - -/***********************************************************************/ -/* Class JSON. The base class for all other json classes. */ -/***********************************************************************/ -class JSON : public BLOCK { - friend PJSON ParseJson(PGLOBAL, char*, int, int*, bool*); - public: - JSON(void) : s(NULL), len(0), pty(NULL) {Size = 0;} - - int size(void) {return Size;} - virtual int GetSize(bool b) {return Size;} - virtual void Clear(void) {Size = 0;} - virtual JTYP GetType(void) {return TYPE_JSON;} - virtual JTYP GetValType(void) {X return TYPE_JSON;} - virtual void InitArray(PGLOBAL g) {X} -//virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL) {X return NULL;} - virtual PJPR AddPair(PGLOBAL g, PCSZ key) {X return NULL;} - virtual PJAR GetKeyList(PGLOBAL g) {X return NULL;} - virtual PJAR GetValList(PGLOBAL g) {X return NULL;} - virtual PJVAL GetValue(const char *key) {X return NULL;} - virtual PJOB GetObject(void) {return NULL;} - virtual PJAR GetArray(void) {return NULL;} - virtual PJVAL GetValue(int i) {X return NULL;} - virtual PVAL GetValue(void) {X return NULL;} - virtual PJSON GetJsp(void) { X return NULL; } - virtual PJSON GetJson(void) { X return NULL; } - virtual PJPR GetFirst(void) {X return NULL;} - virtual int GetInteger(void) {X return 0;} - virtual double GetFloat() {X return 0.0;} - virtual PSZ GetString(PGLOBAL g) {X return NULL;} - virtual PSZ GetText(PGLOBAL g, PSZ text) {X return NULL;} - virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; } - virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) { X return true; } - virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) {X} - virtual void SetValue(PVAL valp) {X} - virtual void SetValue(PJSON jsp) {X} - virtual void SetString(PGLOBAL g, PSZ s, short c) {X} - virtual void SetInteger(PGLOBAL g, int n) {X} - virtual void SetFloat(PGLOBAL g, double f) {X} - virtual void DeleteKey(PCSZ k) {X} - virtual bool DeleteValue(int i) {X return true;} - virtual bool IsNull(void) {X return true;} + void SetJp(JOUT* jp) { js = jp; } protected: PJAR ParseArray(PGLOBAL g, int& i); PJOB ParseObject(PGLOBAL g, int& i); PJVAL ParseValue(PGLOBAL g, int& i); char *ParseString(PGLOBAL g, int& i); - PVAL ParseNumeric(PGLOBAL g, int& i); + PVL ParseNumeric(PGLOBAL g, int& i); PJAR ParseAsArray(PGLOBAL g, int& i, int pretty, int *ptyp); + bool SerializeArray(PJAR jarp, bool b); + bool SerializeObject(PJOB jobp); + bool SerializeValue(PJVAL jvp); - // Members - int Size; - - // Only used when parsing + // Members used when parsing and serializing private: + JOUT* js; char *s; int len; bool *pty; +}; // end of class JDOC + +/***********************************************************************/ +/* Class JSON. The base class for all other json classes. */ +/***********************************************************************/ +class JSON : public BLOCK { +public: + // Constructor + JSON(void) { Type = TYPE_JSON; } + JSON(int) {} + + // Implementation + inline JTYP GetType(void) { return Type; } + + // Methods + virtual int size(void) { return 1; } + virtual JTYP GetValType(void) { X return TYPE_JSON; } + virtual void InitArray(PGLOBAL g) { X } + //virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL) {X return NULL;} + virtual PJPR AddPair(PGLOBAL g, PCSZ key) { X return NULL; } + virtual void Clear(void) { X } + virtual PJAR GetKeyList(PGLOBAL g) { X return NULL; } + virtual PJAR GetValList(PGLOBAL g) { X return NULL; } + virtual PJVAL GetValue(const char* key) { X return NULL; } + virtual PJOB GetObject(void) { return NULL; } + virtual PJAR GetArray(void) { return NULL; } + virtual PJVAL GetValue(int i) { X return NULL; } + virtual int GetSize(bool b) { X return 0; } + //virtual PVL GetVal(void) { X return NULL; } + virtual PJSON GetJsp(void) { X return NULL; } + virtual PJSON GetJson(void) { X return NULL; } + virtual PJPR GetFirst(void) { X return NULL; } + virtual int GetInteger(void) { X return 0; } + virtual double GetFloat() { X return 0.0; } + virtual PSZ GetString(PGLOBAL g) { X return NULL; } + virtual PSZ GetText(PGLOBAL g, PSZ text) { X return NULL; } + virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; } + virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) { X return true; } + virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) { X } + virtual void SetVal(PVL vlp) { X } + virtual void SetValue(PGLOBAL g, PVAL valp) { X } + virtual void SetValue(PJSON jsp) { X } + virtual void SetString(PGLOBAL g, PSZ s, short c) { X } + virtual void SetInteger(PGLOBAL g, int n) { X } + virtual void SetFloat(PGLOBAL g, double f) { X } + virtual void DeleteKey(PCSZ k) { X } + virtual bool DeleteValue(int i) { X return true; } + virtual bool IsNull(void) { X return true; } + + // Members + JTYP Type; }; // end of class JSON /***********************************************************************/ /* Class JOBJECT: contains a list of value pairs. */ /***********************************************************************/ class JOBJECT : public JSON { - friend bool SerializeObject(JOUT *, PJOB); + friend class JDOC; friend class JSNX; - public: - JOBJECT(void) : JSON() {First = Last = NULL;} + friend class SWAP; +public: + JOBJECT(void) : JSON() { Type = TYPE_JOB; First = Last = NULL; } + JOBJECT(int i) : JSON(i) {} - using JSON::GetValue; - using JSON::SetValue; - virtual void Clear(void) {First = Last = NULL; Size = 0;} - virtual JTYP GetType(void) {return TYPE_JOB;} + //using JSON::GetVal; + //using JSON::SetVal; + virtual void Clear(void) {First = Last = NULL;} +//virtual JTYP GetValType(void) {return TYPE_JOB;} virtual PJPR GetFirst(void) {return First;} virtual int GetSize(bool b); virtual PJPR AddPair(PGLOBAL g, PCSZ key); @@ -257,13 +213,16 @@ class JOBJECT : public JSON { /* Class JARRAY. */ /***********************************************************************/ class JARRAY : public JSON { - friend PJAR ParseArray(PGLOBAL, int&, STRG&, bool*); + friend class SWAP; public: - JARRAY(void) : JSON() {Alloc = 0; First = Last = NULL; Mvals = NULL;} + JARRAY(void) : JSON() + { Type = TYPE_JAR; Alloc = 0; First = Last = NULL; Mvals = NULL; } + JARRAY(int i) : JSON(i) {} - using JSON::GetValue; - using JSON::SetValue; + //using JSON::GetVal; + //using JSON::SetVal; virtual void Clear(void) {First = Last = NULL; Size = 0;} + virtual int size(void) { return Size; } virtual JTYP GetType(void) {return TYPE_JAR;} virtual PJAR GetArray(void) {return this;} virtual int GetSize(bool b); @@ -278,6 +237,7 @@ class JARRAY : public JSON { protected: // Members + int Size; // The number of items in the array int Alloc; // The Mvals allocated size PJVAL First; // Used when constructing PJVAL Last; // Last constructed value @@ -292,22 +252,26 @@ class JVALUE : public JSON { friend class JSNX; friend class JSONCOL; friend class JSON; - friend bool SerializeValue(JOUT*, PJVAL); - public: - JVALUE(void) : JSON() {Clear();} + friend class JDOC; + friend class SWAP; +public: + JVALUE(void) : JSON() { Type = TYPE_JVAL; Clear(); } JVALUE(PJSON jsp); + JVALUE(PGLOBAL g, PVL vlp); JVALUE(PGLOBAL g, PVAL valp); JVALUE(PGLOBAL g, PCSZ strp); + JVALUE(int i) : JSON(i) {} - using JSON::GetValue; - using JSON::SetValue; + //using JSON::GetVal; + //using JSON::SetVal; virtual void Clear(void) - {Jsp = NULL; Value = NULL; Next = NULL; Del = false; Size = 1;} + {Jsp = NULL; Val = NULL; Next = NULL; Del = false;} virtual JTYP GetType(void) {return TYPE_JVAL;} virtual JTYP GetValType(void); virtual PJOB GetObject(void); virtual PJAR GetArray(void); - virtual PVAL GetValue(void) {return Value;} + inline PVL GetVal(void) {return Val;} + PVAL GetValue(PGLOBAL g); virtual PJSON GetJsp(void) {return Jsp;} virtual PJSON GetJson(void) { return (Jsp ? Jsp : this); } virtual int GetInteger(void); @@ -316,17 +280,118 @@ class JVALUE : public JSON { virtual PSZ GetString(PGLOBAL g); virtual PSZ GetText(PGLOBAL g, PSZ text); virtual void SetValue(PJSON jsp); - virtual void SetValue(PVAL valp) { Value = valp; Jsp = NULL; } - virtual void SetString(PGLOBAL g, PSZ s, short c = 0); + virtual void SetValue(PGLOBAL g, PVAL valp); + inline void SetVal(PVL vlp) { Val = vlp; } + virtual void SetString(PGLOBAL g, PSZ s, int ci = 0); virtual void SetInteger(PGLOBAL g, int n); virtual void SetBigint(PGLOBAL g, longlong ll); virtual void SetFloat(PGLOBAL g, double f); - virtual void SetTiny(PGLOBAL g, char f); + virtual void SetBool(PGLOBAL g, bool b); virtual bool IsNull(void); protected: PJSON Jsp; // To the json value - PVAL Value; // The numeric value + PVL Val; // To the string or numeric value PJVAL Next; // Next value in array bool Del; // True when deleted }; // end of class JVALUE + + +/***********************************************************************/ +/* Class JOUT. Used by Serialize. */ +/***********************************************************************/ +class JOUT : public BLOCK { +public: + JOUT(PGLOBAL gp) : BLOCK() { g = gp; Pretty = 3; } + + virtual bool WriteStr(const char* s) = 0; + virtual bool WriteChr(const char c) = 0; + virtual bool Escape(const char* s) = 0; + int Prty(void) { return Pretty; } + + // Member + PGLOBAL g; + int Pretty; +}; // end of class JOUT + +/***********************************************************************/ +/* Class JOUTSTR. Used to Serialize to a string. */ +/***********************************************************************/ +class JOUTSTR : public JOUT { +public: + JOUTSTR(PGLOBAL g); + + virtual bool WriteStr(const char* s); + virtual bool WriteChr(const char c); + virtual bool Escape(const char* s); + + // Member + char* Strp; // The serialized string + size_t N; // Position of next char + size_t Max; // String max size +}; // end of class JOUTSTR + +/***********************************************************************/ +/* Class JOUTFILE. Used to Serialize to a file. */ +/***********************************************************************/ +class JOUTFILE : public JOUT { +public: + JOUTFILE(PGLOBAL g, FILE* str, int pty) : JOUT(g) { Stream = str; Pretty = pty; } + + virtual bool WriteStr(const char* s); + virtual bool WriteChr(const char c); + virtual bool Escape(const char* s); + + // Member + FILE* Stream; +}; // end of class JOUTFILE + +/***********************************************************************/ +/* Class JOUTPRT. Used to Serialize to a pretty file. */ +/***********************************************************************/ +class JOUTPRT : public JOUTFILE { +public: + JOUTPRT(PGLOBAL g, FILE* str) : JOUTFILE(g, str, 2) { M = 0; B = false; } + + virtual bool WriteStr(const char* s); + virtual bool WriteChr(const char c); + + // Member + int M; + bool B; +}; // end of class JOUTPRT + + +/***********************************************************************/ +/* Class SWAP. Used to make or unmake a JSON tree movable. */ +/* This is done by making all pointers to offsets. */ +/***********************************************************************/ +class SWAP : public BLOCK { +public: + // Constructor + SWAP(PGLOBAL g, PJSON jsp) + { + G = g, Base = (char*)jsp - 8; + } + + // Methods + void SwapJson(PJSON jsp, bool move); + +protected: + size_t MoffJson(PJSON jnp); + size_t MoffArray(PJAR jarp); + size_t MoffObject(PJOB jobp); + size_t MoffJValue(PJVAL jvp); + size_t MoffPair(PJPR jpp); + size_t MoffVal(PVL vlp); + PJSON MptrJson(PJSON jnp); + PJAR MptrArray(PJAR jarp); + PJOB MptrObject(PJOB jobp); + PJVAL MptrJValue(PJVAL jvp); + PJPR MptrPair(PJPR jpp); + PVL MptrVal(PVL vlp); + + // Member + PGLOBAL G, NG; + void *Base; +}; // end of class SWAP diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 06164f4ed78..6354b92107a 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -355,11 +355,20 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) if (Jb) { vp->SetValue_psz(Serialize(g, val->GetJsp(), NULL, 0)); } else switch (val->GetValType()) { + case TYPE_DTM: case TYPE_STRG: + vp->SetValue_psz(val->GetString(g)); + break; case TYPE_INTG: case TYPE_BINT: + vp->SetValue(val->GetInteger()); + break; case TYPE_DBL: - vp->SetValue_pval(val->GetValue()); + if (vp->IsTypeNum()) + vp->SetValue(val->GetFloat()); + else // Get the proper number of decimals + vp->SetValue_psz(val->GetString(g)); + break; case TYPE_BOOL: if (vp->IsTypeNum()) @@ -430,7 +439,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b) for (; i < Nod && row; i++) { if (Nodes[i].Op == OP_NUM) { - Value->SetValue(row->GetType() == TYPE_JAR ? row->size() : 1); + Value->SetValue(row->GetType() == TYPE_JAR ? ((PJAR)row)->size() : 1); val = new(g) JVALUE(g, Value); return val; } else if (Nodes[i].Op == OP_XX) { @@ -526,10 +535,10 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n) if (!jvrp->IsNull() || (op == OP_CNC && GetJsonNull())) { if (jvrp->IsNull()) { - jvrp->Value = AllocateValue(g, GetJsonNull(), TYPE_STRING); + jvrp->SetString(g, GetJsonNull(), 0); jvp = jvrp; } else if (n < Nod - 1 && jvrp->GetJson()) { - jval.SetValue(GetColumnValue(g, jvrp->GetJson(), n + 1)); + jval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1)); jvp = &jval; } else jvp = jvrp; @@ -781,13 +790,13 @@ PSZ JSNX::Locate(PGLOBAL g, PJSON jsp, PJVAL jvp, int k) switch (jsp->GetType()) { case TYPE_JAR: - err = LocateArray((PJAR)jsp); + err = LocateArray(g, (PJAR)jsp); break; case TYPE_JOB: - err = LocateObject((PJOB)jsp); + err = LocateObject(g, (PJOB)jsp); break; case TYPE_JVAL: - err = LocateValue((PJVAL)jsp); + err = LocateValue(g, (PJVAL)jsp); break; default: err = true; @@ -818,7 +827,7 @@ PSZ JSNX::Locate(PGLOBAL g, PJSON jsp, PJVAL jvp, int k) /*********************************************************************************/ /* Locate in a JSON Array. */ /*********************************************************************************/ -my_bool JSNX::LocateArray(PJAR jarp) +my_bool JSNX::LocateArray(PGLOBAL g, PJAR jarp) { char s[16]; size_t m = Jp->N; @@ -830,7 +839,7 @@ my_bool JSNX::LocateArray(PJAR jarp) if (Jp->WriteStr(s)) return true; - if (LocateValue(jarp->GetValue(i))) + if (LocateValue(g, jarp->GetValue(i))) return true; } // endfor i @@ -841,7 +850,7 @@ my_bool JSNX::LocateArray(PJAR jarp) /*********************************************************************************/ /* Locate in a JSON Object. */ /*********************************************************************************/ -my_bool JSNX::LocateObject(PJOB jobp) +my_bool JSNX::LocateObject(PGLOBAL g, PJOB jobp) { size_t m; @@ -856,7 +865,7 @@ my_bool JSNX::LocateObject(PJOB jobp) if (Jp->WriteStr(pair->Key)) return true; - if (LocateValue(pair->Val)) + if (LocateValue(g, pair->Val)) return true; } // endfor i @@ -867,14 +876,14 @@ my_bool JSNX::LocateObject(PJOB jobp) /*********************************************************************************/ /* Locate a JSON Value. */ /*********************************************************************************/ -my_bool JSNX::LocateValue(PJVAL jvp) +my_bool JSNX::LocateValue(PGLOBAL g, PJVAL jvp) { - if (CompareTree(Jvalp, jvp)) + if (CompareTree(g, Jvalp, jvp)) Found = (--K == 0); else if (jvp->GetArray()) - return LocateArray(jvp->GetArray()); + return LocateArray(g, jvp->GetArray()); else if (jvp->GetObject()) - return LocateObject(jvp->GetObject()); + return LocateObject(g, jvp->GetObject()); return false; } // end of LocateValue @@ -907,13 +916,13 @@ PSZ JSNX::LocateAll(PGLOBAL g, PJSON jsp, PJVAL jvp, int mx) switch (jsp->GetType()) { case TYPE_JAR: - err = LocateArrayAll((PJAR)jsp); + err = LocateArrayAll(g, (PJAR)jsp); break; case TYPE_JOB: - err = LocateObjectAll((PJOB)jsp); + err = LocateObjectAll(g, (PJOB)jsp); break; case TYPE_JVAL: - err = LocateValueAll((PJVAL)jsp); + err = LocateValueAll(g, (PJVAL)jsp); break; default: err = true; @@ -945,7 +954,7 @@ PSZ JSNX::LocateAll(PGLOBAL g, PJSON jsp, PJVAL jvp, int mx) /*********************************************************************************/ /* Locate in a JSON Array. */ /*********************************************************************************/ -my_bool JSNX::LocateArrayAll(PJAR jarp) +my_bool JSNX::LocateArrayAll(PGLOBAL g, PJAR jarp) { if (I < Imax) { Jpnp[++I].Type = TYPE_JAR; @@ -953,7 +962,7 @@ my_bool JSNX::LocateArrayAll(PJAR jarp) for (int i = 0; i < jarp->size(); i++) { Jpnp[I].N = i; - if (LocateValueAll(jarp->GetValue(i))) + if (LocateValueAll(g, jarp->GetValue(i))) return true; } // endfor i @@ -967,7 +976,7 @@ my_bool JSNX::LocateArrayAll(PJAR jarp) /*********************************************************************************/ /* Locate in a JSON Object. */ /*********************************************************************************/ -my_bool JSNX::LocateObjectAll(PJOB jobp) +my_bool JSNX::LocateObjectAll(PGLOBAL g, PJOB jobp) { if (I < Imax) { Jpnp[++I].Type = TYPE_JOB; @@ -975,7 +984,7 @@ my_bool JSNX::LocateObjectAll(PJOB jobp) for (PJPR pair = jobp->First; pair; pair = pair->Next) { Jpnp[I].Key = pair->Key; - if (LocateValueAll(pair->Val)) + if (LocateValueAll(g, pair->Val)) return true; } // endfor i @@ -989,14 +998,14 @@ my_bool JSNX::LocateObjectAll(PJOB jobp) /*********************************************************************************/ /* Locate a JSON Value. */ /*********************************************************************************/ -my_bool JSNX::LocateValueAll(PJVAL jvp) +my_bool JSNX::LocateValueAll(PGLOBAL g, PJVAL jvp) { - if (CompareTree(Jvalp, jvp)) + if (CompareTree(g, Jvalp, jvp)) return AddPath(); else if (jvp->GetArray()) - return LocateArrayAll(jvp->GetArray()); + return LocateArrayAll(g, jvp->GetArray()); else if (jvp->GetObject()) - return LocateObjectAll(jvp->GetObject()); + return LocateObjectAll(g, jvp->GetObject()); return false; } // end of LocateValueAll @@ -1004,7 +1013,7 @@ my_bool JSNX::LocateValueAll(PJVAL jvp) /*********************************************************************************/ /* Compare two JSON trees. */ /*********************************************************************************/ -my_bool JSNX::CompareTree(PJSON jp1, PJSON jp2) +my_bool JSNX::CompareTree(PGLOBAL g, PJSON jp1, PJSON jp2) { if (!jp1 || !jp2 || jp1->GetType() != jp2->GetType() || jp1->size() != jp2->size()) @@ -1013,26 +1022,22 @@ my_bool JSNX::CompareTree(PJSON jp1, PJSON jp2) my_bool found = true; if (jp1->GetType() == TYPE_JVAL) { - PVAL v1 = jp1->GetValue(), v2 = jp2->GetValue(); + PVL v1 = ((PJVAL)jp1)->GetVal(), v2 = ((PJVAL)jp2)->GetVal(); - if (v1 && v2) { - if (v1->GetType() == v2->GetType()) - found = !v1->CompareValue(v2); - else - found = false; - - } else - found = CompareTree(jp1->GetJsp(), jp2->GetJsp()); + if (v1 && v2) + found = CompareValues(v1, v2); + else + found = CompareTree(g, jp1->GetJsp(), jp2->GetJsp()); } else if (jp1->GetType() == TYPE_JAR) { for (int i = 0; found && i < jp1->size(); i++) - found = (CompareTree(jp1->GetValue(i), jp2->GetValue(i))); + found = (CompareTree(g, jp1->GetValue(i), jp2->GetValue(i))); } else if (jp1->GetType() == TYPE_JOB) { PJPR p1 = jp1->GetFirst(), p2 = jp2->GetFirst(); for (; found && p1 && p2; p1 = p1->Next, p2 = p2->Next) - found = CompareTree(p1->Val, p2->Val); + found = CompareTree(g, p1->Val, p2->Val); } else found = false; @@ -1040,11 +1045,61 @@ my_bool JSNX::CompareTree(PJSON jp1, PJSON jp2) return found; } // end of CompareTree +/*********************************************************************************/ +/* Compare two VAL values and return true if they are equal. */ +/*********************************************************************************/ +my_bool JSNX::CompareValues(PVL v1, PVL v2) +{ + my_bool b = false; + + switch (v1->Type) { + case TYPE_STRG: + if (v2->Type == TYPE_STRG) { + if (v1->Nd || v2->Nd) // Case insensitive + b = (!stricmp(v1->Strp, v2->Strp)); + else + b = (!strcmp(v1->Strp, v2->Strp)); + + } // endif Type + + break; + case TYPE_DTM: + b = (!strcmp(v1->Strp, v2->Strp)); + break; + case TYPE_INTG: + if (v2->Type == TYPE_INTG) + b = (v1->N == v2->N); + else if (v2->Type == TYPE_BINT) + b = (v1->N == v2->LLn); + + break; + case TYPE_BINT: + if (v2->Type == TYPE_INTG) + b = (v1->LLn == v2->N); + else if (v2->Type == TYPE_BINT) + b = (v1->LLn == v2->LLn); + + break; + case TYPE_DBL: + if (v2->Type == TYPE_DBL) + b = (v1->F == v2->F); + + break; + case TYPE_BOOL: + if (v2->Type == TYPE_BOOL) + b = (v1->B == v2->B); + + break; + default: b = true; // both nulls + } // endswitch Type + + return b; +} // end of CompareValues + /*********************************************************************************/ /* Add the found path to the list. */ /*********************************************************************************/ -my_bool JSNX::AddPath(void) -{ +my_bool JSNX::AddPath(void) { char s[16]; if (Jp->WriteStr("\"$")) @@ -1196,6 +1251,7 @@ static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp) case TYPE_VAL: jvp = new(g) JVALUE(g, (PVAL)vp); break; + case TYPE_DTM: case TYPE_STRG: jvp = new(g) JVALUE(g, (PCSZ)vp); break; @@ -1221,7 +1277,7 @@ static my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args, unsigned long reslen, unsigned long memlen, unsigned long more = 0) { - PGLOBAL g = PlugInit(NULL, memlen + more + 500); // +500 to avoid CheckMem + PGLOBAL g = PlugInit(NULL, (size_t)memlen + more + 500); // +500 to avoid CheckMem if (!g) { strcpy(message, "Allocation error"); @@ -1690,7 +1746,7 @@ static PCSZ MakeKey(PGLOBAL g, UDF_ARGS *args, int i) /*********************************************************************************/ /* Parse a json file. */ /*********************************************************************************/ -static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, int& len) +static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, size_t& len) { char *memory; HANDLE hFile; @@ -1714,7 +1770,7 @@ static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, int& len) /*******************************************************************************/ /* Get the file size (assuming file is smaller than 4 GB) */ /*******************************************************************************/ - len = mm.lenL; + len = (size_t)mm.sz.QuadPart; memory = (char *)mm.memory; if (!len) { // Empty or deleted file @@ -1784,7 +1840,7 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL) { char *sap = (args->arg_count > i) ? args->args[i] : NULL; int n, len; - short c; + int ci; long long bigint; PJSON jsp; PJVAL jvp = new(g) JVALUE; @@ -1827,8 +1883,8 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL) jvp->SetValue(jsp); } else { - c = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; - jvp->SetString(g, sap, c); + ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; + jvp->SetString(g, sap, ci); } // endif n } // endif len @@ -1839,7 +1895,7 @@ static PJVAL MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PJSON *top = NULL) if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) || (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) - jvp->SetTiny(g, (char)bigint); + jvp->SetBool(g, (char)bigint); else jvp->SetBigint(g, bigint); @@ -4404,7 +4460,8 @@ char *json_file(UDF_INIT *initid, UDF_ARGS *args, char *result, fn = MakePSZ(g, args, 0); if (args->arg_count > 1) { - int len, pretty = 3, pty = 3; + int pretty = 3, pty = 3; + size_t len; PJSON jsp; PJVAL jvp = NULL; @@ -5311,7 +5368,7 @@ char *jbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, // Get the json tree if ((jvp = jsx->GetRowValue(g, jsp, 0, false))) { - jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_VAL, jvp->GetValue()); + jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_VAL, jvp->GetValue(g)); if ((bsp = JbinAlloc(g, args, initid->max_length, jsp))) strcat(bsp->Msg, " item"); @@ -5637,7 +5694,8 @@ char *jbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { char *fn; - int pretty = 3, len = 0, pty = 3; + int pretty = 3, pty = 3; + size_t len = 0; PJSON jsp; PJVAL jvp = NULL; PGLOBAL g = (PGLOBAL)initid->ptr; @@ -5796,16 +5854,16 @@ char *jfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!g->Xchk) { JUP* jup = new(g) JUP(g); - str = jup->UnprettyJsonFile(g, fn, ofn, lrecl); + str = strcpy(result, jup->UnprettyJsonFile(g, fn, ofn, lrecl)); g->Xchk = str; } else str = (char*)g->Xchk; if (!str) { if (g->Message) - str = PlugDup(g, g->Message); + str = strcpy(result, g->Message); else - str = PlugDup(g, "Unexpected error"); + str = strcpy(result, "Unexpected error"); } // endif str @@ -5817,9 +5875,136 @@ void jfile_convert_deinit(UDF_INIT* initid) { JsonFreeMem((PGLOBAL)initid->ptr); } // end of jfile_convert_deinit +/*********************************************************************************/ +/* Convert a prettiest Json file to Pretty=0. */ +/*********************************************************************************/ +my_bool jfile_bjson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen; + + if (args->arg_count != 2 && args->arg_count != 3) { + strcpy(message, "This function must have 2 or 3 arguments"); + return true; + } else if (args->arg_count == 3 && args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third Argument must be an integer (LRECL)"); + return true; + } else for (int i = 0; i < 2; i++) + if (args->arg_type[i] != STRING_RESULT) { + sprintf(message, "Arguments %d must be a string (file name)", i + 1); + return true; + } // endif args + + CalcLen(args, false, reslen, memlen); + memlen = memlen * M; + memlen += (args->arg_count == 3) ? (ulong)*(longlong*)args->args[2] : 1024; + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of jfile_bjson_init + +char *jfile_bjson(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char*, char *error) { + char *fn, *ofn, *buf, *str = NULL; + bool loop; + ssize_t len, newloc; + size_t lrecl, *binszp; + PJSON jsp; + SWAP *swp; + PGLOBAL g = (PGLOBAL)initid->ptr; + + PlugSubSet(g->Sarea, g->Sarea_Size); + fn = MakePSZ(g, args, 0); + ofn = MakePSZ(g, args, 1); + + if (args->arg_count == 3) + lrecl = (size_t)*(longlong*)args->args[2]; + else + lrecl = 1024; + + if (!g->Xchk) { + int msgid = MSGID_OPEN_MODE_STRERROR; + FILE *fout; + FILE *fin; + + if (!(fin = global_fopen(g, msgid, fn, "rt"))) + str = strcpy(result, g->Message); + else if (!(fout = global_fopen(g, msgid, ofn, "wb"))) + str = strcpy(result, g->Message); + else if ((buf = (char*)PlgDBSubAlloc(g, NULL, lrecl)) && + (binszp = (size_t*)PlgDBSubAlloc(g, NULL, sizeof(size_t)))) { + JsonMemSave(g); + + try { + do { + loop = false; + JsonSubSet(g); + + if (!fgets(buf, lrecl, fin)) { + if (!feof(fin)) { + sprintf(g->Message, "Error %d reading %zd bytes from %s", errno, lrecl, fn); + str = strcpy(result, g->Message); + } else + str = strcpy(result, ofn); + + } else if ((len = strlen(buf))) { + if ((jsp = ParseJson(g, buf, len))) { + newloc = (size_t)PlugSubAlloc(g, NULL, 0); + *binszp = newloc - (size_t)jsp; + + swp = new(g) SWAP(g, jsp); + swp->SwapJson(jsp, true); + + if (fwrite(binszp, sizeof(binszp), 1, fout) != 1) { + sprintf(g->Message, "Error %d writing %zd bytes to %s", + errno, sizeof(binszp), ofn); + str = strcpy(result, g->Message); + } else if (fwrite(jsp, *binszp, 1, fout) != 1) { + sprintf(g->Message, "Error %d writing %zd bytes to %s", + errno, *binszp, ofn); + str = strcpy(result, g->Message); + } else + loop = true; + + } else { + str = strcpy(result, g->Message); + } // endif jsp + + } else + loop = true; + + } while (loop); + + } catch (int) { + str = strcpy(result, g->Message); + } catch (const char* msg) { + str = strcpy(result, msg); + } // end catch + + } else + str = strcpy(result, g->Message); + + if (fin) fclose(fin); + if (fout) fclose(fout); + g->Xchk = str; + } else + str = (char*)g->Xchk; + + if (!str) { + if (g->Message) + str = strcpy(result, g->Message); + else + str = strcpy(result, "Unexpected error"); + + } // endif str + + *res_length = strlen(str); + return str; +} // end of jfile_bjson + +void jfile_bjson_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of jfile_bjson_deinit + /* --------------------------------- Class JUP --------------------------------- */ -#define ARGS MY_MIN(24,len-i),s+MY_MAX(i-3,0) +#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0) /*********************************************************************************/ /* JUP public constructor. */ @@ -5827,7 +6012,8 @@ void jfile_convert_deinit(UDF_INIT* initid) { JUP::JUP(PGLOBAL g) { fs = NULL; s = buff = NULL; - i = k = len = recl = 0; + len = 0; + i = k = recl = 0; } // end of JUP constructor /*********************************************************************************/ @@ -5855,11 +6041,11 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) { /*******************************************************************************/ /* Get the file size (assuming file is smaller than 4 GB) */ /*******************************************************************************/ - if (!mm.lenL) { // Empty or deleted file + if (!mm.sz.QuadPart) { // Empty or deleted file CloseFileHandle(hFile); return NULL; } else - len = (int)mm.lenL; + len = (size_t)mm.sz.QuadPart; if (!mm.memory) { CloseFileHandle(hFile); @@ -5877,7 +6063,7 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) { sprintf(g->Message, MSG(OPEN_MODE_ERROR), "w", (int)errno, outfn); strcat(strcat(g->Message, ": "), strerror(errno)); - CloseMemMap(mm.memory, (size_t)mm.lenL); + CloseMemMap(mm.memory, len); return NULL; } // endif fs @@ -5886,7 +6072,7 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) { if (!unPretty(g, lrecl)) ret = outfn; - CloseMemMap(mm.memory, (size_t)mm.lenL); + CloseMemMap(mm.memory, len); fclose(fs); return ret; } // end of UnprettyJsonFile diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h index 897b0fe9919..f5b2bf75654 100644 --- a/storage/connect/jsonudf.h +++ b/storage/connect/jsonudf.h @@ -239,6 +239,10 @@ extern "C" { DllExport char* jfile_convert(UDF_EXEC_ARGS); DllExport void jfile_convert_deinit(UDF_INIT*); + DllExport my_bool jfile_bjson_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* jfile_bjson(UDF_EXEC_ARGS); + DllExport void jfile_bjson_deinit(UDF_INIT*); + DllExport my_bool envar_init(UDF_INIT*, UDF_ARGS*, char*); DllExport char *envar(UDF_EXEC_ARGS); @@ -292,13 +296,14 @@ protected: PVAL MakeJson(PGLOBAL g, PJSON jsp); void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n); PJSON GetRow(PGLOBAL g); - my_bool LocateArray(PJAR jarp); - my_bool LocateObject(PJOB jobp); - my_bool LocateValue(PJVAL jvp); - my_bool LocateArrayAll(PJAR jarp); - my_bool LocateObjectAll(PJOB jobp); - my_bool LocateValueAll(PJVAL jvp); - my_bool CompareTree(PJSON jp1, PJSON jp2); + my_bool CompareValues(PVL v1, PVL v2); + my_bool LocateArray(PGLOBAL g, PJAR jarp); + my_bool LocateObject(PGLOBAL g, PJOB jobp); + my_bool LocateValue(PGLOBAL g, PJVAL jvp); + my_bool LocateArrayAll(PGLOBAL g, PJAR jarp); + my_bool LocateObjectAll(PGLOBAL g, PJOB jobp); + my_bool LocateValueAll(PGLOBAL g, PJVAL jvp); + my_bool CompareTree(PGLOBAL g, PJSON jp1, PJSON jp2); my_bool AddPath(void); // Default constructor not to be used @@ -355,11 +360,11 @@ public: void CopyNumeric(PGLOBAL g); // Members - FILE* fs; - char* s; - char* buff; - int len; - int recl; - int i, k; + FILE *fs; + char *s; + char *buff; + size_t len; + int recl; + int i, k; }; // end of class JUP diff --git a/storage/connect/maputil.cpp b/storage/connect/maputil.cpp index 87263b3adf6..86300f17200 100644 --- a/storage/connect/maputil.cpp +++ b/storage/connect/maputil.cpp @@ -90,8 +90,8 @@ HANDLE CreateFileMap(PGLOBAL g, LPCSTR filename, return INVALID_HANDLE_VALUE; } // endif memory - // lenH is the high-order word of the file size - mm->lenL = GetFileSize(hFile, &mm->lenH); + // HighPart is the high-order word of the file size + mm->sz.LowPart = GetFileSize(hFile, (LPDWORD)&mm->sz.HighPart); CloseHandle(hFileMap); // Not used anymore } else // MODE_INSERT /*****************************************************************/ diff --git a/storage/connect/maputil.h b/storage/connect/maputil.h index e310488eb5d..fd62fbcfeae 100644 --- a/storage/connect/maputil.h +++ b/storage/connect/maputil.h @@ -7,8 +7,7 @@ extern "C" { typedef struct { void *memory; - DWORD lenL; - DWORD lenH; + LARGE_INTEGER sz; } MEMMAP; DllExport HANDLE CreateFileMap(PGLOBAL, LPCSTR, MEMMAP *, MODE, bool); diff --git a/storage/connect/mysql-test/connect/r/alter_xml.result b/storage/connect/mysql-test/connect/r/alter_xml.result index 7cdb1e5d21c..d2f882f1287 100644 --- a/storage/connect/mysql-test/connect/r/alter_xml.result +++ b/storage/connect/mysql-test/connect/r/alter_xml.result @@ -54,7 +54,7 @@ line # NOTE: The first (ignored) row is due to the remaining HEADER=1 option. # Testing field option modification -ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0; +ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0; SELECT * FROM t1; c d 1 One @@ -64,7 +64,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c` int(11) NOT NULL, - `d` char(10) NOT NULL `FIELD_FORMAT`='@' + `d` char(10) NOT NULL `XPATH`='@' ) ENGINE=CONNECT DEFAULT CHARSET=latin1 `QUOTED`=1 `TABLE_TYPE`=XML `TABNAME`=t1 `OPTION_LIST`='xmlsup=domdoc,rownode=row' `HEADER`=0 SELECT * FROM t2; line diff --git a/storage/connect/mysql-test/connect/r/alter_xml2.result b/storage/connect/mysql-test/connect/r/alter_xml2.result index 8eb56e3dcc3..a15be966aa8 100644 --- a/storage/connect/mysql-test/connect/r/alter_xml2.result +++ b/storage/connect/mysql-test/connect/r/alter_xml2.result @@ -56,7 +56,7 @@ line # NOTE: The first (ignored) row is due to the remaining HEADER=1 option. # Testing field option modification -ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0; +ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0; SELECT * FROM t1; c d 1 One @@ -66,7 +66,7 @@ SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `c` int(11) NOT NULL, - `d` char(10) NOT NULL `FIELD_FORMAT`='@' + `d` char(10) NOT NULL `XPATH`='@' ) ENGINE=CONNECT DEFAULT CHARSET=latin1 `QUOTED`=1 `TABLE_TYPE`=XML `TABNAME`=t1 `OPTION_LIST`='xmlsup=libxml2,rownode=row' `HEADER`=0 SELECT * FROM t2; line diff --git a/storage/connect/mysql-test/connect/r/jdbc_oracle.result b/storage/connect/mysql-test/connect/r/jdbc_oracle.result index ec314c5f072..d895a9aed87 100644 --- a/storage/connect/mysql-test/connect/r/jdbc_oracle.result +++ b/storage/connect/mysql-test/connect/r/jdbc_oracle.result @@ -3,7 +3,7 @@ command varchar(128) not null, number int(5) not null flag=1, message varchar(255) flag=2) ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' -OPTION_LIST='User=system,Password=manager,Execsrc=1'; +OPTION_LIST='User=system,Password=Choupy01,Execsrc=1'; SELECT * FROM t2 WHERE command = 'drop table employee'; command number message drop table employee 0 Execute: java.sql.SQLSyntaxErrorException: ORA-00942: table or view does not exist @@ -23,14 +23,14 @@ Warnings: Warning 1105 Affected rows CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' -OPTION_LIST='User=system,Password=manager'; +OPTION_LIST='User=system,Password=Choupy01'; SELECT * FROM t1 WHERE table_name='employee'; Table_Cat Table_Schema Table_Name Table_Type Remark NULL SYSTEM EMPLOYEE TABLE NULL DROP TABLE t1; CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='EMPLOYEE' CATFUNC=columns CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' -OPTION_LIST='User=system,Password=manager'; +OPTION_LIST='User=system,Password=Choupy01'; SELECT * FROM t1; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks NULL SYSTEM EMPLOYEE ID 3 NUMBER 38 0 0 10 0 NULL @@ -42,7 +42,7 @@ CREATE SERVER 'oracle' FOREIGN DATA WRAPPER 'oracle.jdbc.driver.OracleDriver' OP HOST 'jdbc:oracle:thin:@localhost:1521:xe', DATABASE 'SYSTEM', USER 'system', -PASSWORD 'manager', +PASSWORD 'Choupy01', PORT 0, SOCKET '', OWNER 'SYSTEM'); diff --git a/storage/connect/mysql-test/connect/r/json.result b/storage/connect/mysql-test/connect/r/json.result index 6b6f40d2c47..affaea604a8 100644 --- a/storage/connect/mysql-test/connect/r/json.result +++ b/storage/connect/mysql-test/connect/r/json.result @@ -24,15 +24,15 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15), -Language CHAR(2) FIELD_FORMAT='$.LANG', -Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', -Authors INT(2) FIELD_FORMAT='$.AUTHOR[#]', -Title CHAR(32) FIELD_FORMAT='$.TITLE', -Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION', -Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR', -Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', -Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', -Year int(4) FIELD_FORMAT='$.DATEPUB' +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +Authors INT(2) JPATH='$.AUTHOR[#]', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; @@ -46,16 +46,16 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15), -Language CHAR(2) FIELD_FORMAT='$.LANG', -Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', -AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].FIRSTNAME', -AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].LASTNAME', -Title CHAR(32) FIELD_FORMAT='$.TITLE', -Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION', -Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR', -Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', -Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', -Year int(4) FIELD_FORMAT='$.DATEPUB' +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; @@ -69,16 +69,16 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15), -Language CHAR(2) FIELD_FORMAT='$.LANG', -Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', -AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME', -AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME', -Title CHAR(32) FIELD_FORMAT='$.TITLE', -Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION', -Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR', -Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', -Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', -Year int(4) FIELD_FORMAT='$.DATEPUB' +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; @@ -176,17 +176,17 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15) NOT NULL, -Language CHAR(2) FIELD_FORMAT='$.LANG', -Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', -AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME', -AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME', -Title CHAR(32) FIELD_FORMAT='$.TITLE', -Translation CHAR(32) FIELD_FORMAT='$.TRANSLATED.PREFIX', -TranslatorFN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.FIRSTNAME', -TranslatorLN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.LASTNAME', -Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', -Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', -Year int(4) FIELD_FORMAT='$.DATEPUB', +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX', +TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME', +TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB', INDEX IX(ISBN) ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0'; @@ -209,9 +209,9 @@ DROP TABLE t1; # CREATE TABLE t1 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[].EXPENSE["+"].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[].EXPENSE[+].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t1; WHO WEEK WHAT AMOUNT @@ -230,9 +230,9 @@ DROP TABLE t1; # CREATE TABLE t1 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t1; WHO WEEK WHAT AMOUNT @@ -266,14 +266,14 @@ DROP TABLE t1; # CREATE TABLE t1 ( WHO CHAR(12) NOT NULL, -WEEKS CHAR(12) NOT NULL FIELD_FORMAT='$.WEEK[", "].NUMBER', -SUMS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[+].AMOUNT', -SUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[+].AMOUNT', -AVGS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[!].AMOUNT', -SUMAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[!].AMOUNT', -AVGSUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[+].AMOUNT', -AVGAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[!].AMOUNT', -AVERAGE DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[*].AMOUNT') +WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER', +SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT', +SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT', +AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT', +SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT', +AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT', +AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT', +AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t1; WHO WEEKS SUMS SUM AVGS SUMAVG AVGSUM AVGAVG AVERAGE @@ -286,9 +286,9 @@ DROP TABLE t1; # CREATE TABLE t2 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[0].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[0].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t2; WHO WEEK WHAT AMOUNT @@ -302,9 +302,9 @@ Janet 3 Food 18.00 Janet 3 Beer 18.00 CREATE TABLE t3 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[1].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[1].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t3; WHO WEEK WHAT AMOUNT @@ -318,9 +318,9 @@ Beth 4 Beer 15.00 Janet 4 Car 17.00 CREATE TABLE t4 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[2].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[2].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t4; WHO WEEK WHAT AMOUNT @@ -374,8 +374,8 @@ DROP TABLE t1, t2, t3, t4; CREATE TABLE t2 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp3.json'; SELECT * FROM t2; WHO WEEK WHAT AMOUNT @@ -390,8 +390,8 @@ Janet 3 Beer 18.00 CREATE TABLE t3 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp4.json'; SELECT * FROM t3; WHO WEEK WHAT AMOUNT @@ -406,8 +406,8 @@ Janet 4 Car 17.00 CREATE TABLE t4 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp5.json'; SELECT * FROM t4; WHO WEEK WHAT AMOUNT @@ -425,8 +425,8 @@ Janet 5 Food 12.00 CREATE TABLE t1 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp*.json' MULTIPLE=1; SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT; WHO WEEK WHAT AMOUNT @@ -461,8 +461,8 @@ DROP TABLE t1; CREATE TABLE t1 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp%s.json'; ALTER TABLE t1 PARTITION BY LIST COLUMNS(WEEK) ( diff --git a/storage/connect/mysql-test/connect/r/json_java_2.result b/storage/connect/mysql-test/connect/r/json_java_2.result index 47fc4abbd28..2ce89f971b2 100644 --- a/storage/connect/mysql-test/connect/r/json_java_2.result +++ b/storage/connect/mysql-test/connect/r/json_java_2.result @@ -2,7 +2,7 @@ set connect_enable_mongo=1; # # Test the MONGO table type # -CREATE TABLE t1 (Document varchar(1024) field_format='*') +CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096 OPTION_LIST='Driver=Java,Version=2' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -15,7 +15,7 @@ DROP TABLE t1; # Test catfunc # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=Java,Version=2' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096; +OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096; SELECT * from t1; Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath _id 1 CHAR 24 24 0 0 _id @@ -27,7 +27,7 @@ borough 1 CHAR 13 13 0 0 cuisine 1 CHAR 64 64 0 0 grades_date 1 CHAR 1024 1024 0 1 grades.0.date grades_grade 1 CHAR 14 14 0 1 grades.0.grade -grades_score 5 BIGINT 2 2 0 1 grades.0.score +grades_score 7 INTEGER 2 2 0 1 grades.0.score name 1 CHAR 98 98 0 0 restaurant_id 1 CHAR 8 8 0 0 DROP TABLE t1; @@ -60,7 +60,7 @@ DROP TABLE t1; # Test discovery # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants -OPTION_LIST='Level=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -73,10 +73,10 @@ t1 CREATE TABLE `t1` ( `cuisine` char(64) NOT NULL, `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date', `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', - `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096 +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096 SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id 58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445 @@ -111,12 +111,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096; @@ -259,7 +259,7 @@ t1 CREATE TABLE `t1` ( `borough` char(13) NOT NULL, `grades_date` char(24) DEFAULT NULL `JPATH`='grades.0.date', `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', - `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL ) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=2' `LRECL`=4096 @@ -305,8 +305,8 @@ false CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, -loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', -loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=JSON TABNAME='cities' @@ -344,11 +344,11 @@ true CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, -prices_0 INT(6) FIELD_FORMAT='prices.0', -prices_1 INT(6) FIELD_FORMAT='prices.1', -prices_2 INT(6) FIELD_FORMAT='prices.2', -prices_3 INT(6) FIELD_FORMAT='prices.3', -prices_4 INT(6) FIELD_FORMAT='prices.4') +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/r/json_java_3.result b/storage/connect/mysql-test/connect/r/json_java_3.result index 720c82cd7f9..d914b507f52 100644 --- a/storage/connect/mysql-test/connect/r/json_java_3.result +++ b/storage/connect/mysql-test/connect/r/json_java_3.result @@ -2,7 +2,7 @@ set connect_enable_mongo=1; # # Test the MONGO table type # -CREATE TABLE t1 (Document varchar(1024) field_format='*') +CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096 OPTION_LIST='Driver=Java,Version=3' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -15,7 +15,7 @@ DROP TABLE t1; # Test catfunc # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=Java,Version=3' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096; +OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096; SELECT * from t1; Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath _id 1 CHAR 24 24 0 0 _id @@ -27,7 +27,7 @@ borough 1 CHAR 13 13 0 0 cuisine 1 CHAR 64 64 0 0 grades_date 1 CHAR 1024 1024 0 1 grades.0.date grades_grade 1 CHAR 14 14 0 1 grades.0.grade -grades_score 5 BIGINT 2 2 0 1 grades.0.score +grades_score 7 INTEGER 2 2 0 1 grades.0.score name 1 CHAR 98 98 0 0 restaurant_id 1 CHAR 8 8 0 0 DROP TABLE t1; @@ -60,7 +60,7 @@ DROP TABLE t1; # Test discovery # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants -OPTION_LIST='Level=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -73,10 +73,10 @@ t1 CREATE TABLE `t1` ( `cuisine` char(64) NOT NULL, `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date', `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', - `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096 +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096 SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id 58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 @@ -111,12 +111,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096; @@ -259,7 +259,7 @@ t1 CREATE TABLE `t1` ( `borough` char(13) NOT NULL, `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date', `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', - `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL ) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=3' `LRECL`=4096 @@ -305,8 +305,8 @@ false CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, -loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', -loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=JSON TABNAME='cities' @@ -344,11 +344,11 @@ true CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, -prices_0 INT(6) FIELD_FORMAT='prices.0', -prices_1 INT(6) FIELD_FORMAT='prices.1', -prices_2 INT(6) FIELD_FORMAT='prices.2', -prices_3 INT(6) FIELD_FORMAT='prices.3', -prices_4 INT(6) FIELD_FORMAT='prices.4') +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/r/json_mongo_c.result b/storage/connect/mysql-test/connect/r/json_mongo_c.result index f9bfc01763e..454743e679b 100644 --- a/storage/connect/mysql-test/connect/r/json_mongo_c.result +++ b/storage/connect/mysql-test/connect/r/json_mongo_c.result @@ -2,7 +2,7 @@ set connect_enable_mongo=1; # # Test the MONGO table type # -CREATE TABLE t1 (Document varchar(1024) field_format='*') +CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=1024 OPTION_LIST='Driver=C,Version=0' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -15,7 +15,7 @@ DROP TABLE t1; # Test catfunc # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=C,Version=0' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=1024; +OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=1024; SELECT * from t1; Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath _id 1 CHAR 24 24 0 0 _id @@ -27,7 +27,7 @@ borough 1 CHAR 13 13 0 0 cuisine 1 CHAR 64 64 0 0 grades_date 1 CHAR 1024 1024 0 1 grades.0.date grades_grade 1 CHAR 14 14 0 1 grades.0.grade -grades_score 5 BIGINT 2 2 0 1 grades.0.score +grades_score 7 INTEGER 2 2 0 1 grades.0.score name 1 CHAR 98 98 0 0 restaurant_id 1 CHAR 8 8 0 0 DROP TABLE t1; @@ -60,7 +60,7 @@ DROP TABLE t1; # Test discovery # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants -OPTION_LIST='Level=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -73,10 +73,10 @@ t1 CREATE TABLE `t1` ( `cuisine` char(64) NOT NULL, `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date', `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', - `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024 +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024 SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id 58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 @@ -111,12 +111,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; @@ -259,7 +259,7 @@ t1 CREATE TABLE `t1` ( `borough` char(13) NOT NULL, `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date', `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', - `grades_score` bigint(2) DEFAULT NULL `JPATH`='grades.0.score', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL ) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `COLIST`='{"projection":{"cuisine":0}}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=C,level=2,version=0' `LRECL`=1024 @@ -305,8 +305,8 @@ false CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, -loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', -loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=JSON TABNAME='cities' @@ -344,11 +344,11 @@ true CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, -prices_0 INT(6) FIELD_FORMAT='prices.0', -prices_1 INT(6) FIELD_FORMAT='prices.1', -prices_2 INT(6) FIELD_FORMAT='prices.2', -prices_3 INT(6) FIELD_FORMAT='prices.3', -prices_4 INT(6) FIELD_FORMAT='prices.4') +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=JSON TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/r/mongo_c.result b/storage/connect/mysql-test/connect/r/mongo_c.result index 132bb34ce64..cabdf713d16 100644 --- a/storage/connect/mysql-test/connect/r/mongo_c.result +++ b/storage/connect/mysql-test/connect/r/mongo_c.result @@ -2,7 +2,7 @@ set connect_enable_mongo=1; # # Test the MONGO table type # -CREATE TABLE t1 (Document varchar(1024) field_format='*') +CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants OPTION_LIST='Driver=C,Version=0' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -15,7 +15,7 @@ DROP TABLE t1; # Test catfunc # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=C,Version=0' DATA_CHARSET=utf8 ; +OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8 ; SELECT * from t1; Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Bpath _id 1 CHAR 24 24 0 0 @@ -58,7 +58,7 @@ DROP TABLE t1; # Test discovery # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants -OPTION_LIST='Level=1,Driver=C,Version=0' DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -72,7 +72,7 @@ t1 CREATE TABLE `t1` ( `grades_0` varchar(512) DEFAULT NULL `FIELD_FORMAT`='grades.0', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_0 name restaurant_id 58ada47de5a51ddfcd5ed51c 1007 Morris Park Ave 10462 Bronx Bakery {"date":{"$date":1393804800000},"grade":"A","score":2} Morris Park Bake Shop 30075445 @@ -107,12 +107,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=C,Version=0' ; @@ -301,8 +301,8 @@ false CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, -loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', -loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=MONGO TABNAME='cities' @@ -340,11 +340,11 @@ true CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, -prices_0 INT(6) FIELD_FORMAT='prices.0', -prices_1 INT(6) FIELD_FORMAT='prices.1', -prices_2 INT(6) FIELD_FORMAT='prices.2', -prices_3 INT(6) FIELD_FORMAT='prices.3', -prices_4 INT(6) FIELD_FORMAT='prices.4') +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=C,Version=0' ; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/r/mongo_java_2.result b/storage/connect/mysql-test/connect/r/mongo_java_2.result index bc186d7137e..890b88324bb 100644 --- a/storage/connect/mysql-test/connect/r/mongo_java_2.result +++ b/storage/connect/mysql-test/connect/r/mongo_java_2.result @@ -2,7 +2,7 @@ set connect_enable_mongo=1; # # Test the MONGO table type # -CREATE TABLE t1 (Document varchar(1024) field_format='*') +CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants OPTION_LIST='Driver=Java,Version=2' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -15,7 +15,7 @@ DROP TABLE t1; # Test catfunc # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=Java,Version=2' DATA_CHARSET=utf8 ; +OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8 ; SELECT * from t1; Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Bpath _id 1 CHAR 24 24 0 0 @@ -58,7 +58,7 @@ DROP TABLE t1; # Test discovery # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants -OPTION_LIST='Level=1,Driver=Java,Version=2' DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -72,7 +72,7 @@ t1 CREATE TABLE `t1` ( `grades_0` char(99) DEFAULT NULL `FIELD_FORMAT`='grades.0', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_0 name restaurant_id 58ada47de5a51ddfcd5ed51c 1007 [ -73.856077 , 40.848447] Morris Park Ave 10462 Bronx Bakery { "date" : { "$date" : "2014-03-03T00:00:00.000Z"} , "grade" : "A" , "score" : 2} Morris Park Bake Shop 30075445 @@ -107,12 +107,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=2' ; @@ -301,8 +301,8 @@ false CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, -loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', -loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=MONGO TABNAME='cities' @@ -340,11 +340,11 @@ true CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, -prices_0 INT(6) FIELD_FORMAT='prices.0', -prices_1 INT(6) FIELD_FORMAT='prices.1', -prices_2 INT(6) FIELD_FORMAT='prices.2', -prices_3 INT(6) FIELD_FORMAT='prices.3', -prices_4 INT(6) FIELD_FORMAT='prices.4') +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=2' ; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/r/mongo_java_3.result b/storage/connect/mysql-test/connect/r/mongo_java_3.result index 30c696fc9eb..f6f9895a29e 100644 --- a/storage/connect/mysql-test/connect/r/mongo_java_3.result +++ b/storage/connect/mysql-test/connect/r/mongo_java_3.result @@ -2,7 +2,7 @@ set connect_enable_mongo=1; # # Test the MONGO table type # -CREATE TABLE t1 (Document varchar(1024) field_format='*') +CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants OPTION_LIST='Driver=Java,Version=3' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -15,7 +15,7 @@ DROP TABLE t1; # Test catfunc # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=Java,Version=3' DATA_CHARSET=utf8 ; +OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8 ; SELECT * from t1; Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Bpath _id 1 CHAR 24 24 0 0 @@ -58,7 +58,7 @@ DROP TABLE t1; # Test discovery # CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME=restaurants -OPTION_LIST='Level=1,Driver=Java,Version=3' DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -72,7 +72,7 @@ t1 CREATE TABLE `t1` ( `grades_0` char(84) DEFAULT NULL `FIELD_FORMAT`='grades.0', `name` char(98) NOT NULL, `restaurant_id` char(8) NOT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Level=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 `TABLE_TYPE`='MONGO' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_0 name restaurant_id 58ada47de5a51ddfcd5ed51c 1007 [-73.856077, 40.848447] Morris Park Ave 10462 Bronx Bakery { "date" : { "$date" : 1393804800000 }, "grade" : "A", "score" : 2 } Morris Park Bake Shop 30075445 @@ -107,12 +107,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=3' ; @@ -301,8 +301,8 @@ false CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, -loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', -loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=MONGO TABNAME='cities' @@ -340,11 +340,11 @@ true CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, -prices_0 INT(6) FIELD_FORMAT='prices.0', -prices_1 INT(6) FIELD_FORMAT='prices.1', -prices_2 INT(6) FIELD_FORMAT='prices.2', -prices_3 INT(6) FIELD_FORMAT='prices.3', -prices_4 INT(6) FIELD_FORMAT='prices.4') +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=MONGO TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=Java,Version=3' ; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/r/odbc_oracle.result b/storage/connect/mysql-test/connect/r/odbc_oracle.result index 8dc7dc07bb1..acb7d9a74c9 100644 --- a/storage/connect/mysql-test/connect/r/odbc_oracle.result +++ b/storage/connect/mysql-test/connect/r/odbc_oracle.result @@ -10,7 +10,7 @@ SET NAMES utf8; # All tables in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Table_Type Remark @@ -20,7 +20,7 @@ NULL MTR V1 VIEW NULL DROP TABLE t1; # All tables in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='%.%'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Table_Type Remark @@ -30,7 +30,7 @@ NULL MTR V1 VIEW NULL DROP TABLE t1; # All tables "T1" in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='%.T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Table_Type Remark @@ -38,7 +38,7 @@ NULL MTR T1 TABLE NULL DROP TABLE t1; # All tables "T1" in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Table_Type Remark @@ -46,7 +46,7 @@ NULL MTR T1 TABLE NULL DROP TABLE t1; # Table "T1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='MTR.T1'; SELECT * FROM t1 ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Table_Type Remark @@ -54,7 +54,7 @@ NULL MTR T1 TABLE NULL DROP TABLE t1; # All tables in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='MTR.%'; SELECT * FROM t1 ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Table_Type Remark @@ -68,7 +68,7 @@ DROP TABLE t1; # All columns in all schemas (limited with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks @@ -80,7 +80,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu DROP TABLE t1; # All columns in all schemas (limited with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.%'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks @@ -91,7 +91,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu MTR V1 B 6 NUMBER 38 40 NULL NULL 1 DROP TABLE t1; # All tables "T1" in all schemas (limited with WHERE) -CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' CATFUNC=Columns TABNAME='%.T1'; +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks MTR T1 A 3 DECIMAL 38 40 0 10 1 @@ -99,7 +99,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu DROP TABLE t1; # Table "T1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='MTR.T1'; SELECT * FROM t1 ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks @@ -108,7 +108,7 @@ Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Bu DROP TABLE t1; # All tables "T1" in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; Table_Cat Table_Schema Table_Name Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Radix Nullable Remarks @@ -121,14 +121,14 @@ DROP TABLE t1; # Table "T1" in the default schema ("MTR") CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='T1'; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `A` decimal(40,0) DEFAULT NULL, `B` double DEFAULT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='T1' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='T1' SELECT * FROM t1 ORDER BY A; A B 10 1000000000 @@ -157,14 +157,14 @@ DROP VIEW v1; DROP TABLE t1; # Table "T1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='MTR.T1'; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `A` decimal(40,0) DEFAULT NULL, `B` double DEFAULT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T1' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T1' SELECT * FROM t1; A B 10 1000000000 @@ -173,14 +173,14 @@ A B DROP TABLE t1; # View "V1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='MTR.V1'; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `A` decimal(40,0) DEFAULT NULL, `B` double DEFAULT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.V1' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.V1' SELECT * FROM t1; A B 10 1000000000 @@ -209,13 +209,13 @@ DROP VIEW v1; DROP TABLE t1; # Table "T2" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='MTR.T2'; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `A` varchar(64) DEFAULT NULL -) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T2' +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' `TABLE_TYPE`='ODBC' `TABNAME`='MTR.T2' SELECT * FROM t1; A test diff --git a/storage/connect/mysql-test/connect/r/xml.result b/storage/connect/mysql-test/connect/r/xml.result index 6a0c9db27b3..09dac32a2a4 100644 --- a/storage/connect/mysql-test/connect/r/xml.result +++ b/storage/connect/mysql-test/connect/r/xml.result @@ -85,9 +85,9 @@ DROP TABLE t1; # Testing mixed tag and attribute values # CREATE TABLE t1 ( -ISBN CHAR(15) FIELD_FORMAT='@', -LANG CHAR(2) FIELD_FORMAT='@', -SUBJECT CHAR(32) FIELD_FORMAT='@', +ISBN CHAR(15) XPATH='@', +LANG CHAR(2) XPATH='@', +SUBJECT CHAR(32) XPATH='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -118,9 +118,9 @@ DROP TABLE t1; # Testing INSERT on mixed tag and attribute values # CREATE TABLE t1 ( -ISBN CHAR(15) FIELD_FORMAT='@', -LANG CHAR(2) FIELD_FORMAT='@', -SUBJECT CHAR(32) FIELD_FORMAT='@', +ISBN CHAR(15) XPATH='@', +LANG CHAR(2) XPATH='@', +SUBJECT CHAR(32) XPATH='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -205,18 +205,18 @@ DROP TABLE t1; # Testing XPath # CREATE TABLE t1 ( -isbn CHAR(15) FIELD_FORMAT='@ISBN', -language CHAR(2) FIELD_FORMAT='@LANG', -subject CHAR(32) FIELD_FORMAT='@SUBJECT', -authorfn CHAR(20) FIELD_FORMAT='AUTHOR/FIRSTNAME', -authorln CHAR(20) FIELD_FORMAT='AUTHOR/LASTNAME', -title CHAR(32) FIELD_FORMAT='TITLE', -translated CHAR(32) FIELD_FORMAT='TRANSLATOR/@PREFIX', -tranfn CHAR(20) FIELD_FORMAT='TRANSLATOR/FIRSTNAME', -tranln CHAR(20) FIELD_FORMAT='TRANSLATOR/LASTNAME', -publisher CHAR(20) FIELD_FORMAT='PUBLISHER/NAME', -location CHAR(20) FIELD_FORMAT='PUBLISHER/PLACE', -year INT(4) FIELD_FORMAT='DATEPUB' +isbn CHAR(15) XPATH='@ISBN', +language CHAR(2) XPATH='@LANG', +subject CHAR(32) XPATH='@SUBJECT', +authorfn CHAR(20) XPATH='AUTHOR/FIRSTNAME', +authorln CHAR(20) XPATH='AUTHOR/LASTNAME', +title CHAR(32) XPATH='TITLE', +translated CHAR(32) XPATH='TRANSLATOR/@PREFIX', +tranfn CHAR(20) XPATH='TRANSLATOR/FIRSTNAME', +tranln CHAR(20) XPATH='TRANSLATOR/LASTNAME', +publisher CHAR(20) XPATH='PUBLISHER/NAME', +location CHAR(20) XPATH='PUBLISHER/PLACE', +year INT(4) XPATH='DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=domdoc'; SELECT * FROM t1; @@ -258,7 +258,7 @@ DROP TABLE t1; # CREATE TABLE t1 ( -isbn CHAR(15) FIELD_FORMAT='@isbn' +isbn CHAR(15) XPATH='@isbn' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=domdoc'; SELECT * FROM t1; @@ -374,7 +374,7 @@ INSERT INTO t1 VALUES (_cp1251 0xC0C1C2C3); Warnings: Level Warning Code 1105 -Message Com error: Unable to save character to 'iso-8859-1' encoding. +Message Com error: Impossible d'enregistrer le caractre dans le codage iso-8859-1. INSERT INTO t1 VALUES ('&<>"\''); SELECT node, hex(node) FROM t1; diff --git a/storage/connect/mysql-test/connect/r/xml2.result b/storage/connect/mysql-test/connect/r/xml2.result index f7bbc17c8a0..891c6e6f8dd 100644 --- a/storage/connect/mysql-test/connect/r/xml2.result +++ b/storage/connect/mysql-test/connect/r/xml2.result @@ -87,9 +87,9 @@ DROP TABLE t1; # Testing mixed tag and attribute values # CREATE TABLE t1 ( -ISBN CHAR(15) FIELD_FORMAT='@', -LANG CHAR(2) FIELD_FORMAT='@', -SUBJECT CHAR(32) FIELD_FORMAT='@', +ISBN CHAR(15) XPATH='@', +LANG CHAR(2) XPATH='@', +SUBJECT CHAR(32) XPATH='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -120,9 +120,9 @@ DROP TABLE t1; # Testing INSERT on mixed tag and attribute values # CREATE TABLE t1 ( -ISBN CHAR(15) FIELD_FORMAT='@', -LANG CHAR(2) FIELD_FORMAT='@', -SUBJECT CHAR(32) FIELD_FORMAT='@', +ISBN CHAR(15) XPATH='@', +LANG CHAR(2) XPATH='@', +SUBJECT CHAR(32) XPATH='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -207,18 +207,18 @@ DROP TABLE t1; # Testing XPath # CREATE TABLE t1 ( -isbn CHAR(15) FIELD_FORMAT='@ISBN', -language CHAR(2) FIELD_FORMAT='@LANG', -subject CHAR(32) FIELD_FORMAT='@SUBJECT', -authorfn CHAR(20) FIELD_FORMAT='AUTHOR/FIRSTNAME', -authorln CHAR(20) FIELD_FORMAT='AUTHOR/LASTNAME', -title CHAR(32) FIELD_FORMAT='TITLE', -translated CHAR(32) FIELD_FORMAT='TRANSLATOR/@PREFIX', -tranfn CHAR(20) FIELD_FORMAT='TRANSLATOR/FIRSTNAME', -tranln CHAR(20) FIELD_FORMAT='TRANSLATOR/LASTNAME', -publisher CHAR(20) FIELD_FORMAT='PUBLISHER/NAME', -location CHAR(20) FIELD_FORMAT='PUBLISHER/PLACE', -year INT(4) FIELD_FORMAT='DATEPUB' +isbn CHAR(15) XPATH='@ISBN', +language CHAR(2) XPATH='@LANG', +subject CHAR(32) XPATH='@SUBJECT', +authorfn CHAR(20) XPATH='AUTHOR/FIRSTNAME', +authorln CHAR(20) XPATH='AUTHOR/LASTNAME', +title CHAR(32) XPATH='TITLE', +translated CHAR(32) XPATH='TRANSLATOR/@PREFIX', +tranfn CHAR(20) XPATH='TRANSLATOR/FIRSTNAME', +tranln CHAR(20) XPATH='TRANSLATOR/LASTNAME', +publisher CHAR(20) XPATH='PUBLISHER/NAME', +location CHAR(20) XPATH='PUBLISHER/PLACE', +year INT(4) XPATH='DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2'; SELECT * FROM t1; @@ -260,7 +260,7 @@ DROP TABLE t1; # CREATE TABLE t1 ( -isbn CHAR(15) FIELD_FORMAT='@isbn' +isbn CHAR(15) XPATH='@isbn' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2'; SELECT * FROM t1; diff --git a/storage/connect/mysql-test/connect/r/xml2_html.result b/storage/connect/mysql-test/connect/r/xml2_html.result index 143f46529f6..499108b724d 100644 --- a/storage/connect/mysql-test/connect/r/xml2_html.result +++ b/storage/connect/mysql-test/connect/r/xml2_html.result @@ -5,9 +5,9 @@ SET NAMES utf8; # Testing HTML like XML file # CREATE TABLE beers ( -`Name` CHAR(16) FIELD_FORMAT='brandName', -`Origin` CHAR(16) FIELD_FORMAT='origin', -`Description` CHAR(32) FIELD_FORMAT='details') +`Name` CHAR(16) XPATH='brandName', +`Origin` CHAR(16) XPATH='origin', +`Description` CHAR(32) XPATH='details') ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml' TABNAME='table' OPTION_LIST='xmlsup=libxml2,rownode=tr,colnode=td'; SELECT * FROM beers; diff --git a/storage/connect/mysql-test/connect/r/xml2_mult.result b/storage/connect/mysql-test/connect/r/xml2_mult.result index 87d1118edd5..ac4cdea4e7f 100644 --- a/storage/connect/mysql-test/connect/r/xml2_mult.result +++ b/storage/connect/mysql-test/connect/r/xml2_mult.result @@ -5,9 +5,9 @@ SET NAMES utf8; # Testing expanded values # CREATE TABLE `bookstore` ( -`category` CHAR(16) NOT NULL FIELD_FORMAT='@', +`category` CHAR(16) NOT NULL XPATH='@', `title` VARCHAR(50) NOT NULL, -`lang` char(2) NOT NULL FIELD_FORMAT='title/@', +`lang` char(2) NOT NULL XPATH='title/@', `author` VARCHAR(24) NOT NULL, `year` INT(4) NOT NULL, `price` DOUBLE(8,2) NOT NULL) diff --git a/storage/connect/mysql-test/connect/r/xml2_zip.result b/storage/connect/mysql-test/connect/r/xml2_zip.result index f176149c53f..7d67a3d5498 100644 --- a/storage/connect/mysql-test/connect/r/xml2_zip.result +++ b/storage/connect/mysql-test/connect/r/xml2_zip.result @@ -4,17 +4,17 @@ Warning 1105 No file name. Table will use t1.xml # Testing zipped XML tables # CREATE TABLE t1 ( -ISBN CHAR(13) NOT NULL FIELD_FORMAT='@', -LANG CHAR(2) NOT NULL FIELD_FORMAT='@', -SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@', -AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME', -AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME', -TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX', -TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME', -TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME', +ISBN CHAR(13) NOT NULL XPATH='@', +LANG CHAR(2) NOT NULL XPATH='@', +SUBJECT CHAR(12) NOT NULL XPATH='@', +AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME', +AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME', +TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX', +TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME', +TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME', TITLE CHAR(30) NOT NULL, -PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME', -PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE', +PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME', +PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE', DATEPUB CHAR(4) NOT NULL ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR'; diff --git a/storage/connect/mysql-test/connect/r/xml_html.result b/storage/connect/mysql-test/connect/r/xml_html.result index 4b984a49901..308c67ffc28 100644 --- a/storage/connect/mysql-test/connect/r/xml_html.result +++ b/storage/connect/mysql-test/connect/r/xml_html.result @@ -3,9 +3,9 @@ SET NAMES utf8; # Testing HTML like XML file # CREATE TABLE beers ( -`Name` CHAR(16) FIELD_FORMAT='brandName', -`Origin` CHAR(16) FIELD_FORMAT='origin', -`Description` CHAR(32) FIELD_FORMAT='details') +`Name` CHAR(16) XPATH='brandName', +`Origin` CHAR(16) XPATH='origin', +`Description` CHAR(32) XPATH='details') ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml' TABNAME='table' OPTION_LIST='xmlsup=domdoc,rownode=tr,colnode=td'; SELECT * FROM beers; diff --git a/storage/connect/mysql-test/connect/r/xml_mult.result b/storage/connect/mysql-test/connect/r/xml_mult.result index 9922b40060c..427adf94313 100644 --- a/storage/connect/mysql-test/connect/r/xml_mult.result +++ b/storage/connect/mysql-test/connect/r/xml_mult.result @@ -3,9 +3,9 @@ SET NAMES utf8; # Testing expanded values # CREATE TABLE `bookstore` ( -`category` CHAR(16) NOT NULL FIELD_FORMAT='@', +`category` CHAR(16) NOT NULL XPATH='@', `title` VARCHAR(50) NOT NULL, -`lang` char(2) NOT NULL FIELD_FORMAT='title/@', +`lang` char(2) NOT NULL XPATH='title/@', `author` VARCHAR(24) NOT NULL, `year` INT(4) NOT NULL, `price` DOUBLE(8,2) NOT NULL) diff --git a/storage/connect/mysql-test/connect/r/xml_zip.result b/storage/connect/mysql-test/connect/r/xml_zip.result index f7790e4cfff..ddde0fa8a35 100644 --- a/storage/connect/mysql-test/connect/r/xml_zip.result +++ b/storage/connect/mysql-test/connect/r/xml_zip.result @@ -2,17 +2,17 @@ # Testing zipped XML tables # CREATE TABLE t1 ( -ISBN CHAR(13) NOT NULL FIELD_FORMAT='@', -LANG CHAR(2) NOT NULL FIELD_FORMAT='@', -SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@', -AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME', -AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME', -TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX', -TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME', -TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME', +ISBN CHAR(13) NOT NULL XPATH='@', +LANG CHAR(2) NOT NULL XPATH='@', +SUBJECT CHAR(12) NOT NULL XPATH='@', +AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME', +AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME', +TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX', +TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME', +TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME', TITLE CHAR(30) NOT NULL, -PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME', -PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE', +PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME', +PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE', DATEPUB CHAR(4) NOT NULL ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR'; diff --git a/storage/connect/mysql-test/connect/t/alter_xml.test b/storage/connect/mysql-test/connect/t/alter_xml.test index 8b2164d5548..4c2e1670f4c 100644 --- a/storage/connect/mysql-test/connect/t/alter_xml.test +++ b/storage/connect/mysql-test/connect/t/alter_xml.test @@ -21,7 +21,7 @@ SELECT * FROM t2; --echo # NOTE: The first (ignored) row is due to the remaining HEADER=1 option. --echo # Testing field option modification -ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0; +ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0; SELECT * FROM t1; SHOW CREATE TABLE t1; SELECT * FROM t2; diff --git a/storage/connect/mysql-test/connect/t/alter_xml2.test b/storage/connect/mysql-test/connect/t/alter_xml2.test index d67c80c4e9f..ec4065baa47 100644 --- a/storage/connect/mysql-test/connect/t/alter_xml2.test +++ b/storage/connect/mysql-test/connect/t/alter_xml2.test @@ -21,7 +21,7 @@ SELECT * FROM t2; --echo # NOTE: The first (ignored) row is due to the remaining HEADER=1 option. --echo # Testing field option modification -ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL FIELD_FORMAT='@', HEADER=0; +ALTER TABLE t1 MODIFY d CHAR(10) NOT NULL XPATH='@', HEADER=0; SELECT * FROM t1; SHOW CREATE TABLE t1; SELECT * FROM t2; diff --git a/storage/connect/mysql-test/connect/t/jdbc_oracle.test b/storage/connect/mysql-test/connect/t/jdbc_oracle.test index 10cb7a7b77d..1316352d4f5 100644 --- a/storage/connect/mysql-test/connect/t/jdbc_oracle.test +++ b/storage/connect/mysql-test/connect/t/jdbc_oracle.test @@ -8,20 +8,20 @@ CREATE TABLE t2 ( number int(5) not null flag=1, message varchar(255) flag=2) ENGINE=CONNECT TABLE_TYPE=JDBC CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' -OPTION_LIST='User=system,Password=manager,Execsrc=1'; +OPTION_LIST='User=system,Password=Choupy01,Execsrc=1'; SELECT * FROM t2 WHERE command = 'drop table employee'; SELECT * FROM t2 WHERE command = 'create table employee (id int not null, name varchar(32), title char(16), salary number(8,2))'; SELECT * FROM t2 WHERE command = "insert into employee values(4567,'Johnson', 'Engineer', 12560.50)"; CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC CATFUNC=tables CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' -OPTION_LIST='User=system,Password=manager'; +OPTION_LIST='User=system,Password=Choupy01'; SELECT * FROM t1 WHERE table_name='employee'; DROP TABLE t1; CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JDBC TABNAME='EMPLOYEE' CATFUNC=columns CONNECTION='jdbc:oracle:thin:@localhost:1521:xe' -OPTION_LIST='User=system,Password=manager'; +OPTION_LIST='User=system,Password=Choupy01'; SELECT * FROM t1; DROP TABLE t1; @@ -32,7 +32,7 @@ CREATE SERVER 'oracle' FOREIGN DATA WRAPPER 'oracle.jdbc.driver.OracleDriver' OP HOST 'jdbc:oracle:thin:@localhost:1521:xe', DATABASE 'SYSTEM', USER 'system', -PASSWORD 'manager', +PASSWORD 'Choupy01', PORT 0, SOCKET '', OWNER 'SYSTEM'); diff --git a/storage/connect/mysql-test/connect/t/json.test b/storage/connect/mysql-test/connect/t/json.test index 018489525f7..8b42ef9cfab 100644 --- a/storage/connect/mysql-test/connect/t/json.test +++ b/storage/connect/mysql-test/connect/t/json.test @@ -35,15 +35,15 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15), - Language CHAR(2) FIELD_FORMAT='$.LANG', - Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', - Authors INT(2) FIELD_FORMAT='$.AUTHOR[#]', - Title CHAR(32) FIELD_FORMAT='$.TITLE', - Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION', - Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR', - Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', - Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', - Year int(4) FIELD_FORMAT='$.DATEPUB' + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + Authors INT(2) JPATH='$.AUTHOR[#]', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; @@ -55,16 +55,16 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15), - Language CHAR(2) FIELD_FORMAT='$.LANG', - Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', - AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].FIRSTNAME', - AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[" and "].LASTNAME', - Title CHAR(32) FIELD_FORMAT='$.TITLE', - Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION', - Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR', - Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', - Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', - Year int(4) FIELD_FORMAT='$.DATEPUB' + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; @@ -76,16 +76,16 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15), - Language CHAR(2) FIELD_FORMAT='$.LANG', - Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', - AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME', - AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME', - Title CHAR(32) FIELD_FORMAT='$.TITLE', - Translation CHAR(32) FIELD_FORMAT='$.TRANSLATION', - Translator CHAR(80) FIELD_FORMAT='$.TRANSLATOR', - Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', - Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', - Year int(4) FIELD_FORMAT='$.DATEPUB' + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; @@ -122,17 +122,17 @@ DROP TABLE t1; CREATE TABLE t1 ( ISBN CHAR(15) NOT NULL, - Language CHAR(2) FIELD_FORMAT='$.LANG', - Subject CHAR(32) FIELD_FORMAT='$.SUBJECT', - AuthorFN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].FIRSTNAME', - AuthorLN CHAR(128) FIELD_FORMAT='$.AUTHOR[*].LASTNAME', - Title CHAR(32) FIELD_FORMAT='$.TITLE', - Translation CHAR(32) FIELD_FORMAT='$.TRANSLATED.PREFIX', - TranslatorFN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.FIRSTNAME', - TranslatorLN CHAR(80) FIELD_FORMAT='$.TRANSLATED.TRANSLATOR.LASTNAME', - Publisher CHAR(20) FIELD_FORMAT='$.PUBLISHER.NAME', - Location CHAR(16) FIELD_FORMAT='$.PUBLISHER.PLACE', - Year int(4) FIELD_FORMAT='$.DATEPUB', + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX', + TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME', + TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB', INDEX IX(ISBN) ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0'; @@ -148,9 +148,9 @@ DROP TABLE t1; --echo # CREATE TABLE t1 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[].EXPENSE["+"].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[].EXPENSE[+].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t1; DROP TABLE t1; @@ -160,9 +160,9 @@ DROP TABLE t1; --echo # CREATE TABLE t1 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[*].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[*].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; #--error ER_GET_ERRMSG SELECT * FROM t1; @@ -173,14 +173,14 @@ DROP TABLE t1; --echo # CREATE TABLE t1 ( WHO CHAR(12) NOT NULL, -WEEKS CHAR(12) NOT NULL FIELD_FORMAT='$.WEEK[", "].NUMBER', -SUMS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[+].AMOUNT', -SUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[+].AMOUNT', -AVGS CHAR(64) NOT NULL FIELD_FORMAT='$.WEEK["+"].EXPENSE[!].AMOUNT', -SUMAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[+].EXPENSE[!].AMOUNT', -AVGSUM DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[+].AMOUNT', -AVGAVG DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[!].AMOUNT', -AVERAGE DOUBLE(8,2) NOT NULL FIELD_FORMAT='$.WEEK[!].EXPENSE[*].AMOUNT') +WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER', +SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT', +SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT', +AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT', +SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT', +AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT', +AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT', +AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t1; DROP TABLE t1; @@ -190,25 +190,25 @@ DROP TABLE t1; --echo # CREATE TABLE t2 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[0].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[0].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[0].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t2; CREATE TABLE t3 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[1].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[1].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[1].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t3; CREATE TABLE t4 ( WHO CHAR(12), -WEEK INT(2) FIELD_FORMAT='$.WEEK[2].NUMBER', -WHAT CHAR(32) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.WEEK[2].EXPENSE[*].AMOUNT') +WEEK INT(2) JPATH='$.WEEK[2].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='expense.json'; SELECT * FROM t4; @@ -230,24 +230,24 @@ DROP TABLE t1, t2, t3, t4; CREATE TABLE t2 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp3.json'; SELECT * FROM t2; CREATE TABLE t3 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp4.json'; SELECT * FROM t3; CREATE TABLE t4 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp5.json'; SELECT * FROM t4; @@ -257,8 +257,8 @@ SELECT * FROM t4; CREATE TABLE t1 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp*.json' MULTIPLE=1; SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT; DROP TABLE t1; @@ -269,8 +269,8 @@ DROP TABLE t1; CREATE TABLE t1 ( WHO CHAR(12), WEEK INT(2), -WHAT CHAR(32) FIELD_FORMAT='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) FIELD_FORMAT='$.EXPENSE.[*].AMOUNT') +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='mulexp%s.json'; ALTER TABLE t1 PARTITION BY LIST COLUMNS(WEEK) ( diff --git a/storage/connect/mysql-test/connect/t/mongo_test.inc b/storage/connect/mysql-test/connect/t/mongo_test.inc index 357fa55240b..1c8bf07184f 100644 --- a/storage/connect/mysql-test/connect/t/mongo_test.inc +++ b/storage/connect/mysql-test/connect/t/mongo_test.inc @@ -3,7 +3,7 @@ set connect_enable_mongo=1; --echo # --echo # Test the MONGO table type --echo # -eval CREATE TABLE t1 (Document varchar(1024) field_format='*') +eval CREATE TABLE t1 (Document varchar(1024) JPATH='*') ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME=restaurants $CONN OPTION_LIST='Driver=$DRV,Version=$VERS' DATA_CHARSET=utf8; SELECT * from t1 limit 3; @@ -13,7 +13,7 @@ DROP TABLE t1; --echo # Test catfunc --echo # eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME=restaurants CATFUNC=columns -OPTION_LIST='Level=1,Driver=$DRV,Version=$VERS' DATA_CHARSET=utf8 $CONN; +OPTION_LIST='Depth=1,Driver=$DRV,Version=$VERS' DATA_CHARSET=utf8 $CONN; SELECT * from t1; DROP TABLE t1; @@ -36,7 +36,7 @@ DROP TABLE t1; --echo # Test discovery --echo # eval CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME=restaurants -OPTION_LIST='Level=1,Driver=$DRV,Version=$VERS' $CONN DATA_CHARSET=utf8; +OPTION_LIST='Depth=1,Driver=$DRV,Version=$VERS' $CONN DATA_CHARSET=utf8; SHOW CREATE TABLE t1; SELECT * FROM t1 LIMIT 5; DROP TABLE t1; @@ -58,12 +58,12 @@ _id VARCHAR(24) NOT NULL, name VARCHAR(64) NOT NULL, cuisine CHAR(200) NOT NULL, borough CHAR(16) NOT NULL, -street VARCHAR(65) FIELD_FORMAT='address.street', -building CHAR(16) FIELD_FORMAT='address.building', -zipcode CHAR(5) FIELD_FORMAT='address.zipcode', -grade CHAR(1) FIELD_FORMAT='grades.0.grade', -score INT(4) NOT NULL FIELD_FORMAT='grades.0.score', -`date` DATE FIELD_FORMAT='grades.0.date', +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', restaurant_id VARCHAR(255) NOT NULL) ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME='restaurants' DATA_CHARSET=utf8 OPTION_LIST='Driver=$DRV,Version=$VERS' $CONN; @@ -156,8 +156,8 @@ DROP TABLE t1; eval CREATE TABLE t1 ( _id char(5) NOT NULL, city char(16) NOT NULL, - loc_0 double(12,6) NOT NULL `FIELD_FORMAT`='loc.0', - loc_1 char(12) NOT NULL `FIELD_FORMAT`='loc.1', + loc_0 double(12,6) NOT NULL `JPATH`='loc.0', + loc_1 char(12) NOT NULL `JPATH`='loc.1', pop int(11) NOT NULL, state char(2) NOT NULL) ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=$TYPE TABNAME='cities' @@ -181,11 +181,11 @@ DROP TABLE t1; eval CREATE TABLE t1 ( _id int(4) NOT NULL, item CHAR(8) NOT NULL, - prices_0 INT(6) FIELD_FORMAT='prices.0', - prices_1 INT(6) FIELD_FORMAT='prices.1', - prices_2 INT(6) FIELD_FORMAT='prices.2', - prices_3 INT(6) FIELD_FORMAT='prices.3', - prices_4 INT(6) FIELD_FORMAT='prices.4') + prices_0 INT(6) JPATH='prices.0', + prices_1 INT(6) JPATH='prices.1', + prices_2 INT(6) JPATH='prices.2', + prices_3 INT(6) JPATH='prices.3', + prices_4 INT(6) JPATH='prices.4') ENGINE=CONNECT TABLE_TYPE=$TYPE TABNAME='testcoll' DATA_CHARSET=utf8 OPTION_LIST='Driver=$DRV,Version=$VERS' $CONN; INSERT INTO t1 VALUES diff --git a/storage/connect/mysql-test/connect/t/odbc_oracle.test b/storage/connect/mysql-test/connect/t/odbc_oracle.test index 9de742a2647..18d29f69f1a 100644 --- a/storage/connect/mysql-test/connect/t/odbc_oracle.test +++ b/storage/connect/mysql-test/connect/t/odbc_oracle.test @@ -78,42 +78,42 @@ SET NAMES utf8; --echo # All tables in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # All tables in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='%.%'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # All tables "T1" in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='%.T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # All tables "T1" in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # Table "T1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='MTR.T1'; SELECT * FROM t1 ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # All tables in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Tables TABNAME='MTR.%'; SELECT * FROM t1 ORDER BY Table_Schema, Table_Name; DROP TABLE t1; @@ -127,7 +127,7 @@ DROP TABLE t1; --echo # All columns in all schemas (limited with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns; # Disable warnings to avoid "Result limited to 20000 lines" --disable_warnings @@ -137,7 +137,7 @@ DROP TABLE t1; --echo # All columns in all schemas (limited with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.%'; # Disable warnings to avoid "Result limited to 20000 lines" --disable_warnings @@ -146,20 +146,20 @@ SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # All tables "T1" in all schemas (limited with WHERE) -CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' CATFUNC=Columns TABNAME='%.T1'; +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # Table "T1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='MTR.T1'; SELECT * FROM t1 ORDER BY Table_Schema, Table_Name; DROP TABLE t1; --echo # All tables "T1" in all schemas (filtered with WHERE) CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' CATFUNC=Columns TABNAME='%.T1'; SELECT * FROM t1 WHERE Table_Schema='MTR' ORDER BY Table_Schema, Table_Name; DROP TABLE t1; @@ -172,7 +172,7 @@ DROP TABLE t1; --echo # Table "T1" in the default schema ("MTR") CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='T1'; SHOW CREATE TABLE t1; SELECT * FROM t1 ORDER BY A; @@ -189,7 +189,7 @@ DROP TABLE t1; --echo # Table "T1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='MTR.T1'; SHOW CREATE TABLE t1; SELECT * FROM t1; @@ -197,7 +197,7 @@ DROP TABLE t1; --echo # View "V1" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='MTR.V1'; SHOW CREATE TABLE t1; SELECT * FROM t1; @@ -214,7 +214,7 @@ DROP TABLE t1; --echo # Table "T2" in the schema "MTR" CREATE TABLE t1 ENGINE=CONNECT -TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=mtr' +TABLE_TYPE=ODBC CONNECTION='DSN=ConnectEngineOracle;UID=mtr;PWD=newmtr' TABNAME='MTR.T2'; SHOW CREATE TABLE t1; SELECT * FROM t1; diff --git a/storage/connect/mysql-test/connect/t/xml.test b/storage/connect/mysql-test/connect/t/xml.test index 0fdf8e90b6e..669fa3f64dc 100644 --- a/storage/connect/mysql-test/connect/t/xml.test +++ b/storage/connect/mysql-test/connect/t/xml.test @@ -77,9 +77,9 @@ DROP TABLE t1; --echo # Testing mixed tag and attribute values --echo # CREATE TABLE t1 ( - ISBN CHAR(15) FIELD_FORMAT='@', - LANG CHAR(2) FIELD_FORMAT='@', - SUBJECT CHAR(32) FIELD_FORMAT='@', + ISBN CHAR(15) XPATH='@', + LANG CHAR(2) XPATH='@', + SUBJECT CHAR(32) XPATH='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -98,9 +98,9 @@ DROP TABLE t1; --copy_file $MTR_SUITE_DIR/std_data/xsample.xml $MYSQLD_DATADIR/test/xsample2.xml --chmod 0644 $MYSQLD_DATADIR/test/xsample2.xml CREATE TABLE t1 ( - ISBN CHAR(15) FIELD_FORMAT='@', - LANG CHAR(2) FIELD_FORMAT='@', - SUBJECT CHAR(32) FIELD_FORMAT='@', + ISBN CHAR(15) XPATH='@', + LANG CHAR(2) XPATH='@', + SUBJECT CHAR(32) XPATH='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -123,18 +123,18 @@ DROP TABLE t1; --echo # Testing XPath --echo # CREATE TABLE t1 ( - isbn CHAR(15) FIELD_FORMAT='@ISBN', - language CHAR(2) FIELD_FORMAT='@LANG', - subject CHAR(32) FIELD_FORMAT='@SUBJECT', - authorfn CHAR(20) FIELD_FORMAT='AUTHOR/FIRSTNAME', - authorln CHAR(20) FIELD_FORMAT='AUTHOR/LASTNAME', - title CHAR(32) FIELD_FORMAT='TITLE', - translated CHAR(32) FIELD_FORMAT='TRANSLATOR/@PREFIX', - tranfn CHAR(20) FIELD_FORMAT='TRANSLATOR/FIRSTNAME', - tranln CHAR(20) FIELD_FORMAT='TRANSLATOR/LASTNAME', - publisher CHAR(20) FIELD_FORMAT='PUBLISHER/NAME', - location CHAR(20) FIELD_FORMAT='PUBLISHER/PLACE', - year INT(4) FIELD_FORMAT='DATEPUB' + isbn CHAR(15) XPATH='@ISBN', + language CHAR(2) XPATH='@LANG', + subject CHAR(32) XPATH='@SUBJECT', + authorfn CHAR(20) XPATH='AUTHOR/FIRSTNAME', + authorln CHAR(20) XPATH='AUTHOR/LASTNAME', + title CHAR(32) XPATH='TITLE', + translated CHAR(32) XPATH='TRANSLATOR/@PREFIX', + tranfn CHAR(20) XPATH='TRANSLATOR/FIRSTNAME', + tranln CHAR(20) XPATH='TRANSLATOR/LASTNAME', + publisher CHAR(20) XPATH='PUBLISHER/NAME', + location CHAR(20) XPATH='PUBLISHER/PLACE', + year INT(4) XPATH='DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=domdoc'; SELECT * FROM t1; @@ -150,8 +150,8 @@ DROP TABLE t1; #--echo # Relative paths are not supported #--echo # #CREATE TABLE t1 ( -# authorfn CHAR(20) FIELD_FORMAT='//FIRSTNAME', -# authorln CHAR(20) FIELD_FORMAT='//LASTNAME' +# authorfn CHAR(20) XPATH='//FIRSTNAME', +# authorln CHAR(20) XPATH='//LASTNAME' #) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' # TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1'; #SELECT * FROM t1; @@ -165,8 +165,8 @@ DROP TABLE t1; #--echo # Absolute path is not supported #--echo # #CREATE TABLE t1 ( -# authorfn CHAR(20) FIELD_FORMAT='/BIBLIO/BOOK/AUTHOR/FIRSTNAME', -# authorln CHAR(20) FIELD_FORMAT='/BIBLIO/BOOK/AUTHOR/LASTNAME' +# authorfn CHAR(20) XPATH='/BIBLIO/BOOK/AUTHOR/FIRSTNAME', +# authorln CHAR(20) XPATH='/BIBLIO/BOOK/AUTHOR/LASTNAME' #) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' # TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1'; #SELECT * FROM t1; @@ -178,7 +178,7 @@ DROP TABLE t1; --echo # CREATE TABLE t1 ( - isbn CHAR(15) FIELD_FORMAT='@isbn' + isbn CHAR(15) XPATH='@isbn' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=domdoc'; SELECT * FROM t1; diff --git a/storage/connect/mysql-test/connect/t/xml2.test b/storage/connect/mysql-test/connect/t/xml2.test index 7bbc3dbd87c..9c5f685d399 100644 --- a/storage/connect/mysql-test/connect/t/xml2.test +++ b/storage/connect/mysql-test/connect/t/xml2.test @@ -77,9 +77,9 @@ DROP TABLE t1; --echo # Testing mixed tag and attribute values --echo # CREATE TABLE t1 ( - ISBN CHAR(15) FIELD_FORMAT='@', - LANG CHAR(2) FIELD_FORMAT='@', - SUBJECT CHAR(32) FIELD_FORMAT='@', + ISBN CHAR(15) XPATH='@', + LANG CHAR(2) XPATH='@', + SUBJECT CHAR(32) XPATH='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -98,9 +98,9 @@ DROP TABLE t1; --copy_file $MTR_SUITE_DIR/std_data/xsample.xml $MYSQLD_DATADIR/test/xsample2.xml --chmod 0644 $MYSQLD_DATADIR/test/xsample2.xml CREATE TABLE t1 ( - ISBN CHAR(15) FIELD_FORMAT='@', - LANG CHAR(2) FIELD_FORMAT='@', - SUBJECT CHAR(32) FIELD_FORMAT='@', + ISBN CHAR(15) XPATH='@', + LANG CHAR(2) XPATH='@', + SUBJECT CHAR(32) XPATH='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -123,18 +123,18 @@ DROP TABLE t1; --echo # Testing XPath --echo # CREATE TABLE t1 ( - isbn CHAR(15) FIELD_FORMAT='@ISBN', - language CHAR(2) FIELD_FORMAT='@LANG', - subject CHAR(32) FIELD_FORMAT='@SUBJECT', - authorfn CHAR(20) FIELD_FORMAT='AUTHOR/FIRSTNAME', - authorln CHAR(20) FIELD_FORMAT='AUTHOR/LASTNAME', - title CHAR(32) FIELD_FORMAT='TITLE', - translated CHAR(32) FIELD_FORMAT='TRANSLATOR/@PREFIX', - tranfn CHAR(20) FIELD_FORMAT='TRANSLATOR/FIRSTNAME', - tranln CHAR(20) FIELD_FORMAT='TRANSLATOR/LASTNAME', - publisher CHAR(20) FIELD_FORMAT='PUBLISHER/NAME', - location CHAR(20) FIELD_FORMAT='PUBLISHER/PLACE', - year INT(4) FIELD_FORMAT='DATEPUB' + isbn CHAR(15) XPATH='@ISBN', + language CHAR(2) XPATH='@LANG', + subject CHAR(32) XPATH='@SUBJECT', + authorfn CHAR(20) XPATH='AUTHOR/FIRSTNAME', + authorln CHAR(20) XPATH='AUTHOR/LASTNAME', + title CHAR(32) XPATH='TITLE', + translated CHAR(32) XPATH='TRANSLATOR/@PREFIX', + tranfn CHAR(20) XPATH='TRANSLATOR/FIRSTNAME', + tranln CHAR(20) XPATH='TRANSLATOR/LASTNAME', + publisher CHAR(20) XPATH='PUBLISHER/NAME', + location CHAR(20) XPATH='PUBLISHER/PLACE', + year INT(4) XPATH='DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2'; SELECT * FROM t1; @@ -150,8 +150,8 @@ DROP TABLE t1; #--echo # Relative paths are not supported #--echo # #CREATE TABLE t1 ( -# authorfn CHAR(20) FIELD_FORMAT='//FIRSTNAME', -# authorln CHAR(20) FIELD_FORMAT='//LASTNAME' +# authorfn CHAR(20) XPATH='//FIRSTNAME', +# authorln CHAR(20) XPATH='//LASTNAME' #) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' # TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1'; #SELECT * FROM t1; @@ -165,8 +165,8 @@ DROP TABLE t1; #--echo # Absolute path is not supported #--echo # #CREATE TABLE t1 ( -# authorfn CHAR(20) FIELD_FORMAT='/BIBLIO/BOOK/AUTHOR/FIRSTNAME', -# authorln CHAR(20) FIELD_FORMAT='/BIBLIO/BOOK/AUTHOR/LASTNAME' +# authorfn CHAR(20) XPATH='/BIBLIO/BOOK/AUTHOR/FIRSTNAME', +# authorln CHAR(20) XPATH='/BIBLIO/BOOK/AUTHOR/LASTNAME' #) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' # TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1'; #SELECT * FROM t1; @@ -178,7 +178,7 @@ DROP TABLE t1; --echo # CREATE TABLE t1 ( - isbn CHAR(15) FIELD_FORMAT='@isbn' + isbn CHAR(15) XPATH='@isbn' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=libxml2'; SELECT * FROM t1; diff --git a/storage/connect/mysql-test/connect/t/xml2_html.test b/storage/connect/mysql-test/connect/t/xml2_html.test index 1c84b46ec38..2f4fc50e5e6 100644 --- a/storage/connect/mysql-test/connect/t/xml2_html.test +++ b/storage/connect/mysql-test/connect/t/xml2_html.test @@ -11,9 +11,9 @@ SET NAMES utf8; --echo # Testing HTML like XML file --echo # CREATE TABLE beers ( -`Name` CHAR(16) FIELD_FORMAT='brandName', -`Origin` CHAR(16) FIELD_FORMAT='origin', -`Description` CHAR(32) FIELD_FORMAT='details') +`Name` CHAR(16) XPATH='brandName', +`Origin` CHAR(16) XPATH='origin', +`Description` CHAR(32) XPATH='details') ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml' TABNAME='table' OPTION_LIST='xmlsup=libxml2,rownode=tr,colnode=td'; SELECT * FROM beers; diff --git a/storage/connect/mysql-test/connect/t/xml2_mult.test b/storage/connect/mysql-test/connect/t/xml2_mult.test index cd83827fe34..e9914c71aad 100644 --- a/storage/connect/mysql-test/connect/t/xml2_mult.test +++ b/storage/connect/mysql-test/connect/t/xml2_mult.test @@ -15,9 +15,9 @@ SET NAMES utf8; --echo # Testing expanded values --echo # CREATE TABLE `bookstore` ( - `category` CHAR(16) NOT NULL FIELD_FORMAT='@', + `category` CHAR(16) NOT NULL XPATH='@', `title` VARCHAR(50) NOT NULL, - `lang` char(2) NOT NULL FIELD_FORMAT='title/@', + `lang` char(2) NOT NULL XPATH='title/@', `author` VARCHAR(24) NOT NULL, `year` INT(4) NOT NULL, `price` DOUBLE(8,2) NOT NULL) diff --git a/storage/connect/mysql-test/connect/t/xml2_zip.test b/storage/connect/mysql-test/connect/t/xml2_zip.test index d8c7894f861..46697dc97f7 100644 --- a/storage/connect/mysql-test/connect/t/xml2_zip.test +++ b/storage/connect/mysql-test/connect/t/xml2_zip.test @@ -11,17 +11,17 @@ let $MYSQLD_DATADIR= `select @@datadir`; --echo # Testing zipped XML tables --echo # CREATE TABLE t1 ( -ISBN CHAR(13) NOT NULL FIELD_FORMAT='@', -LANG CHAR(2) NOT NULL FIELD_FORMAT='@', -SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@', -AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME', -AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME', -TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX', -TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME', -TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME', +ISBN CHAR(13) NOT NULL XPATH='@', +LANG CHAR(2) NOT NULL XPATH='@', +SUBJECT CHAR(12) NOT NULL XPATH='@', +AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME', +AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME', +TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX', +TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME', +TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME', TITLE CHAR(30) NOT NULL, -PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME', -PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE', +PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME', +PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE', DATEPUB CHAR(4) NOT NULL ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR'; diff --git a/storage/connect/mysql-test/connect/t/xml_html.test b/storage/connect/mysql-test/connect/t/xml_html.test index 34d29953f68..1430f68d2b2 100644 --- a/storage/connect/mysql-test/connect/t/xml_html.test +++ b/storage/connect/mysql-test/connect/t/xml_html.test @@ -11,9 +11,9 @@ SET NAMES utf8; --echo # Testing HTML like XML file --echo # CREATE TABLE beers ( -`Name` CHAR(16) FIELD_FORMAT='brandName', -`Origin` CHAR(16) FIELD_FORMAT='origin', -`Description` CHAR(32) FIELD_FORMAT='details') +`Name` CHAR(16) XPATH='brandName', +`Origin` CHAR(16) XPATH='origin', +`Description` CHAR(32) XPATH='details') ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='beers.xml' TABNAME='table' OPTION_LIST='xmlsup=domdoc,rownode=tr,colnode=td'; SELECT * FROM beers; diff --git a/storage/connect/mysql-test/connect/t/xml_mult.test b/storage/connect/mysql-test/connect/t/xml_mult.test index cf703e90da4..221d6734546 100644 --- a/storage/connect/mysql-test/connect/t/xml_mult.test +++ b/storage/connect/mysql-test/connect/t/xml_mult.test @@ -15,9 +15,9 @@ SET NAMES utf8; --echo # Testing expanded values --echo # CREATE TABLE `bookstore` ( - `category` CHAR(16) NOT NULL FIELD_FORMAT='@', + `category` CHAR(16) NOT NULL XPATH='@', `title` VARCHAR(50) NOT NULL, - `lang` char(2) NOT NULL FIELD_FORMAT='title/@', + `lang` char(2) NOT NULL XPATH='title/@', `author` VARCHAR(24) NOT NULL, `year` INT(4) NOT NULL, `price` DOUBLE(8,2) NOT NULL) diff --git a/storage/connect/mysql-test/connect/t/xml_zip.test b/storage/connect/mysql-test/connect/t/xml_zip.test index ad31ca46d4c..774021485f9 100644 --- a/storage/connect/mysql-test/connect/t/xml_zip.test +++ b/storage/connect/mysql-test/connect/t/xml_zip.test @@ -11,17 +11,17 @@ let $MYSQLD_DATADIR= `select @@datadir`; --echo # Testing zipped XML tables --echo # CREATE TABLE t1 ( -ISBN CHAR(13) NOT NULL FIELD_FORMAT='@', -LANG CHAR(2) NOT NULL FIELD_FORMAT='@', -SUBJECT CHAR(12) NOT NULL FIELD_FORMAT='@', -AUTHOR_FIRSTNAME CHAR(15) NOT NULL FIELD_FORMAT='AUTHOR/FIRSTNAME', -AUTHOR_LASTNAME CHAR(8) NOT NULL FIELD_FORMAT='AUTHOR/LASTNAME', -TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/@PREFIX', -TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/FIRSTNAME', -TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL FIELD_FORMAT='TRANSLATOR/LASTNAME', +ISBN CHAR(13) NOT NULL XPATH='@', +LANG CHAR(2) NOT NULL XPATH='@', +SUBJECT CHAR(12) NOT NULL XPATH='@', +AUTHOR_FIRSTNAME CHAR(15) NOT NULL XPATH='AUTHOR/FIRSTNAME', +AUTHOR_LASTNAME CHAR(8) NOT NULL XPATH='AUTHOR/LASTNAME', +TRANSLATOR_PREFIX CHAR(24) DEFAULT NULL XPATH='TRANSLATOR/@PREFIX', +TRANSLATOR_FIRSTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/FIRSTNAME', +TRANSLATOR_LASTNAME CHAR(6) DEFAULT NULL XPATH='TRANSLATOR/LASTNAME', TITLE CHAR(30) NOT NULL, -PUBLISHER_NAME CHAR(15) NOT NULL FIELD_FORMAT='PUBLISHER/NAME', -PUBLISHER_PLACE CHAR(5) NOT NULL FIELD_FORMAT='PUBLISHER/PLACE', +PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME', +PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE', DATEPUB CHAR(4) NOT NULL ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR'; diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index b3147bb7357..a2b5204cd0a 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -1998,7 +1998,7 @@ int TDBDOS::Cardinality(PGLOBAL g) if (Mode == MODE_ANY && ExactInfo()) { // Using index impossible or failed, do it the hard way Mode = MODE_READ; - To_Line = (char*)PlugSubAlloc(g, NULL, Lrecl + 1); + To_Line = (char*)PlugSubAlloc(g, NULL, (size_t)Lrecl + 1); if (Txfp->OpenTableFile(g)) return (Cardinal = Txfp->Cardinality(g)); @@ -2232,7 +2232,7 @@ int TDBDOS::ReadDB(PGLOBAL g) return RC_EF; case -2: // No match for join return RC_NF; - case -3: // Same record as last non null one + case -3: // Same record as non null last one num_there++; return RC_OK; default: diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index cdf9e40f97c..a79f2a37f32 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1,6 +1,6 @@ /************* tabjson C++ Program Source Code File (.CPP) *************/ -/* PROGRAM NAME: tabjson Version 1.7 */ -/* (C) Copyright to the author Olivier BERTRAND 2014 - 2019 */ +/* PROGRAM NAME: tabjson Version 1.8 */ +/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */ /* This program are the JSON class DB execution routines. */ /***********************************************************************/ @@ -286,15 +286,17 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) #endif } // endif Driver - } else + } else if (tdp->Pretty >= 0) tjnp = new(g) TDBJSN(tdp, new(g) DOSFAM(tdp)); + else + tjnp = new(g) TDBJSN(tdp, new(g) BINFAM(tdp)); tjnp->SetMode(MODE_READ); // Allocate the parse work memory PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL)); memset(G, 0, sizeof(GLOBAL)); - G->Sarea_Size = (size_t)tdp->Lrecl * 10; + G->Sarea_Size = (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 10 : 2); G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); PlugSubSet(G->Sarea, G->Sarea_Size); G->jump_level = 0; @@ -309,7 +311,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) case RC_FX: goto err; default: - jsp = tjnp->GetRow(); + jsp = tjnp->FindRow(g); } // endswitch ReadDB } // endif pretty @@ -335,11 +337,11 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) /* Analyse the JSON tree and define columns. */ /*********************************************************************/ for (i = 1; ; i++) { - for (jpp = row->GetFirst(); jpp; jpp = jpp->GetNext()) { - strncpy(colname, jpp->GetKey(), 64); + for (jpp = row->GetFirst(); jpp; jpp = jpp->Next) { + strncpy(colname, jpp->Key, 64); fmt[bf] = 0; - if (Find(g, jpp->GetVal(), colname, MY_MIN(lvl, 0))) + if (Find(g, jpp->Val, colname, MY_MIN(lvl, 0))) goto err; } // endfor jpp @@ -359,7 +361,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) case RC_FX: goto err; default: - jsp = tjnp->GetRow(); + jsp = tjnp->FindRow(g); } // endswitch ReadDB } else @@ -390,14 +392,35 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) PJOB job; PJAR jar; - if ((valp = jvp ? jvp->GetValue() : NULL)) { + if ((vlp = jvp ? jvp->GetVal() : NULL)) { if (JsonAllPath() && !fmt[bf]) strcat(fmt, colname); - jcol.Type = valp->GetType(); - jcol.Len = valp->GetValLen(); - jcol.Scale = valp->GetValPrec(); - jcol.Cbn = valp->IsNull(); + jcol.Type = vlp->Type; + + switch (vlp->Type) { + case TYPE_STRG: + case TYPE_DTM: + jcol.Len = (int)strlen(vlp->Strp); + break; + case TYPE_INTG: + case TYPE_BINT: + jcol.Len = (int)strlen(jvp->GetString(g)); + break; + case TYPE_DBL: + jcol.Len = (int)strlen(jvp->GetString(g)); + jcol.Scale = vlp->Nd; + break; + case TYPE_BOOL: + jcol.Len = 1; + break; + default: + jcol.Len = 0; + break; + } // endswitch Type + + jcol.Scale = vlp->Nd; + jcol.Cbn = vlp->Type == TYPE_NULL; } else if (!jvp || jvp->IsNull()) { jcol.Type = TYPE_UNKNOWN; jcol.Len = jcol.Scale = 0; @@ -413,8 +436,8 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) case TYPE_JOB: job = (PJOB)jsp; - for (PJPR jrp = job->GetFirst(); jrp; jrp = jrp->GetNext()) { - PCSZ k = jrp->GetKey(); + for (PJPR jrp = job->GetFirst(); jrp; jrp = jrp->Next) { + PCSZ k = jrp->Key; if (*k != '$') { n = sizeof(fmt) - strlen(fmt) -1; @@ -423,7 +446,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) strncat(strncat(colname, "_", n), k, n - 1); } // endif Key - if (Find(g, jrp->GetVal(), k, j + 1)) + if (Find(g, jrp->Val, k, j + 1)) return true; *p = *pc = 0; @@ -625,9 +648,9 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) PTXF txfp = NULL; // JSN not used for pretty=1 for insert or delete - if (!Pretty || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) { + if (Pretty <= 0 || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) { USETEMP tmp = UseTemp(); - bool map = Mapped && m != MODE_INSERT && + bool map = Mapped && Pretty >= 0 && m != MODE_INSERT && !(tmp != TMP_NO && m == MODE_UPDATE) && !(tmp == TMP_FORCE && (m == MODE_UPDATE || m == MODE_DELETE)); @@ -684,17 +707,19 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) #endif // !GZ_SUPPORT } else if (map) txfp = new(g) MAPFAM(this); - else + else if (Pretty < 0) // BJsonfile + txfp = new(g) BINFAM(this); + else txfp = new(g) DOSFAM(this); - // Txfp must be set for TDBDOS + // Txfp must be set for TDBJSN tdbp = new(g) TDBJSN(this, txfp); if (Lrecl) { // Allocate the parse work memory PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL)); memset(G, 0, sizeof(GLOBAL)); - G->Sarea_Size = Lrecl * 10; + G->Sarea_Size = (size_t)Lrecl * 10; G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); PlugSubSet(G->Sarea, G->Sarea_Size); G->jump_level = 0; @@ -736,10 +761,10 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) /* --------------------------- Class TDBJSN -------------------------- */ /***********************************************************************/ -/* Implementation of the TDBJSN class. */ +/* Implementation of the TDBJSN class (Pretty < 2) */ /***********************************************************************/ TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) - { +{ G = NULL; Top = NULL; Row = NULL; @@ -772,35 +797,35 @@ TDBJSN::TDBJSN(PJDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) SameRow = 0; Xval = -1; Comma = false; - } // end of TDBJSN standard constructor +} // end of TDBJSN standard constructor -TDBJSN::TDBJSN(TDBJSN *tdbp) : TDBDOS(NULL, tdbp) - { - G = NULL; - Top = tdbp->Top; - Row = tdbp->Row; - Val = tdbp->Val; - Colp = tdbp->Colp; - Jmode = tdbp->Jmode; - Objname = tdbp->Objname; - Xcol = tdbp->Xcol; - Fpos = tdbp->Fpos; - N = tdbp->N; - M = tdbp->M; - Limit = tdbp->Limit; - NextSame = tdbp->NextSame; - SameRow = tdbp->SameRow; - Xval = tdbp->Xval; - B = tdbp->B; - Sep = tdbp->Sep; - Pretty = tdbp->Pretty; - Strict = tdbp->Strict; - Comma = tdbp->Comma; - } // end of TDBJSN copy constructor +TDBJSN::TDBJSN(TDBJSN* tdbp) : TDBDOS(NULL, tdbp) +{ + G = NULL; + Top = tdbp->Top; + Row = tdbp->Row; + Val = tdbp->Val; + Colp = tdbp->Colp; + Jmode = tdbp->Jmode; + Objname = tdbp->Objname; + Xcol = tdbp->Xcol; + Fpos = tdbp->Fpos; + N = tdbp->N; + M = tdbp->M; + Limit = tdbp->Limit; + NextSame = tdbp->NextSame; + SameRow = tdbp->SameRow; + Xval = tdbp->Xval; + B = tdbp->B; + Sep = tdbp->Sep; + Pretty = tdbp->Pretty; + Strict = tdbp->Strict; + Comma = tdbp->Comma; +} // end of TDBJSN copy constructor // Used for update PTDB TDBJSN::Clone(PTABS t) - { +{ G = NULL; PTDB tp; PJCOL cp1, cp2; @@ -814,23 +839,23 @@ PTDB TDBJSN::Clone(PTABS t) } // endfor cp1 return tp; - } // end of Clone +} // end of Clone /***********************************************************************/ /* Allocate JSN column description block. */ /***********************************************************************/ PCOL TDBJSN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) - { +{ PJCOL colp = new(g) JSONCOL(g, cdp, this, cprec, n); return (colp->ParseJpath(g)) ? NULL : colp; - } // end of MakeCol +} // end of MakeCol /***********************************************************************/ /* InsertSpecialColumn: Put a special column ahead of the column list.*/ /***********************************************************************/ PCOL TDBJSN::InsertSpecialColumn(PCOL colp) - { +{ if (!colp->IsSpecial()) return NULL; @@ -840,31 +865,47 @@ PCOL TDBJSN::InsertSpecialColumn(PCOL colp) colp->SetNext(Columns); Columns = colp; return colp; - } // end of InsertSpecialColumn +} // end of InsertSpecialColumn +#if 0 /***********************************************************************/ /* JSON Cardinality: returns table size in number of rows. */ /***********************************************************************/ int TDBJSN::Cardinality(PGLOBAL g) - { +{ if (!g) return 0; - else if (Cardinal < 0) - Cardinal = TDBDOS::Cardinality(g); + else if (Cardinal < 0) { + Cardinal = TDBDOS::Cardinality(g); + + } // endif Cardinal return Cardinal; - } // end of Cardinality +} // end of Cardinality /***********************************************************************/ /* JSON GetMaxSize: returns file size estimate in number of lines. */ /***********************************************************************/ int TDBJSN::GetMaxSize(PGLOBAL g) - { - if (MaxSize < 0) - MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1); +{ + if (MaxSize < 0) + MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1); return MaxSize; - } // end of GetMaxSize +} // end of GetMaxSize +#endif // 0 + +/***********************************************************************/ +/* JSON EstimatedLength. Returns an estimated minimum line length. */ +/***********************************************************************/ +int TDBJSN::EstimatedLength(void) +{ + if (AvgLen <= 0) + return (Lrecl ? Lrecl : 1024) / 8; // TODO: make it better + else + return AvgLen; + +} // end of Estimated Length /***********************************************************************/ /* Find the row in the tree structure. */ @@ -904,7 +945,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g) /* OpenDB: Data Base open routine for JSN access method. */ /***********************************************************************/ bool TDBJSN::OpenDB(PGLOBAL g) - { +{ if (Use == USE_OPEN) { /*******************************************************************/ /* Table already open replace it at its beginning. */ @@ -928,7 +969,45 @@ bool TDBJSN::OpenDB(PGLOBAL g) } // endif Use - if (TDBDOS::OpenDB(g)) + if (Pretty < 0) { + /*******************************************************************/ + /* Binary BJSON table. */ + /*******************************************************************/ + xtrc(1, "JSN OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n", + this, Tdb_No, Use, Mode); + + if (Use == USE_OPEN) { + /*******************************************************************/ + /* Table already open, just replace it at its beginning. */ + /*******************************************************************/ + if (!To_Kindex) { + Txfp->Rewind(); // see comment in Work.log + } else // Table is to be accessed through a sorted index table + To_Kindex->Reset(); + + return false; + } // endif use + + /*********************************************************************/ + /* Open according to logical input/output mode required. */ + /* Use conventionnal input/output functions. */ + /*********************************************************************/ + if (Txfp->OpenTableFile(g)) + return true; + + Use = USE_OPEN; // Do it now in case we are recursively called + + /*********************************************************************/ + /* Lrecl is Ok. */ + /*********************************************************************/ + size_t linelen = Lrecl; + + //To_Line = (char*)PlugSubAlloc(g, NULL, linelen); + //memset(To_Line, 0, linelen); + To_Line = Txfp->GetBuf(); + xtrc(1, "OpenJSN: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line); + return false; + } else if (TDBDOS::OpenDB(g)) return true; if (Xcol) @@ -943,7 +1022,7 @@ bool TDBJSN::OpenDB(PGLOBAL g) /* Kindex construction if the file is accessed using an index. */ /***********************************************************************/ bool TDBJSN::SkipHeader(PGLOBAL g) - { +{ int len = GetFileLength(g); bool rc = false; @@ -952,62 +1031,71 @@ bool TDBJSN::SkipHeader(PGLOBAL g) return true; #endif // _DEBUG -#if defined(__WIN__) -#define Ending 2 -#else // !__WIN__ -#define Ending 1 -#endif // !__WIN__ - if (Pretty == 1) { if (Mode == MODE_INSERT || Mode == MODE_DELETE) { // Mode Insert and delete are no more handled here - assert(false); - } else if (len) // !Insert && !Delete + DBUG_ASSERT(false); + } else if (len > 0) // !Insert && !Delete rc = (Txfp->SkipRecord(g, false) == RC_FX || Txfp->RecordPos(g)); - } // endif Pretty + } // endif Pretty return rc; - } // end of SkipHeader +} // end of SkipHeader /***********************************************************************/ /* ReadDB: Data Base read routine for JSN access method. */ /***********************************************************************/ -int TDBJSN::ReadDB(PGLOBAL g) - { - int rc; +int TDBJSN::ReadDB(PGLOBAL g) { + int rc; - N++; + N++; - if (NextSame) { - SameRow = NextSame; - NextSame = 0; - M++; - return RC_OK; - } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK) { - if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK)) - // Deferred reading failed - return rc; + if (NextSame) { + SameRow = NextSame; + NextSame = 0; + M++; + return RC_OK; + } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK) { + if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK)) + return rc; // Deferred reading failed - // Recover the memory used for parsing - PlugSubSet(G->Sarea, G->Sarea_Size); + if (Pretty >= 0) { + // Recover the memory used for parsing + PlugSubSet(G->Sarea, G->Sarea_Size); - if ((Row = ParseJson(G, To_Line, strlen(To_Line), &Pretty, &Comma))) { - Row = FindRow(g); - SameRow = 0; - Fpos++; - M = 1; - rc = RC_OK; - } else if (Pretty != 1 || strcmp(To_Line, "]")) { - strcpy(g->Message, G->Message); - rc = RC_FX; - } else - rc = RC_EF; + if ((Row = ParseJson(G, To_Line, strlen(To_Line), &Pretty, &Comma))) { + Row = FindRow(g); + SameRow = 0; + Fpos++; + M = 1; + rc = RC_OK; + } else if (Pretty != 1 || strcmp(To_Line, "]")) { + strcpy(g->Message, G->Message); + rc = RC_FX; + } else + rc = RC_EF; - } // endif ReadDB + } else { + // Here we get a movable Json binary tree + PJSON jsp; + SWAP* swp; - return rc; - } // end of ReadDB + jsp = (PJSON)To_Line; + swp = new(g) SWAP(G, jsp); + swp->SwapJson(jsp, false); // Restore pointers from offsets + Row = jsp; + Row = FindRow(g); + SameRow = 0; + Fpos++; + M = 1; + rc = RC_OK; + } // endif Pretty + + } // endif ReadDB + + return rc; +} // end of ReadDB /***********************************************************************/ /* Make the top tree from the object path. */ @@ -1081,8 +1169,8 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp) /***********************************************************************/ /* PrepareWriting: Prepare the line for WriteDB. */ /***********************************************************************/ - bool TDBJSN::PrepareWriting(PGLOBAL g) - { +bool TDBJSN::PrepareWriting(PGLOBAL g) +{ PSZ s; if (MakeTopTree(g, Row)) @@ -1103,7 +1191,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp) } else return true; - } // end of PrepareWriting +} // end of PrepareWriting /***********************************************************************/ /* WriteDB: Data Base write routine for JSON access method. */ @@ -1125,7 +1213,7 @@ int TDBJSN::WriteDB(PGLOBAL g) JSONCOL::JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i) : DOSCOL(g, cdp, tdbp, cprec, i, "DOS") { - Tjp = (TDBJSN *)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp); + Tjp = (TDBJSN *)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp); G = Tjp->G; Jpath = cdp->GetFmt(); MulVal = NULL; @@ -1159,7 +1247,7 @@ JSONCOL::JSONCOL(JSONCOL *col1, PTDB tdbp) : DOSCOL(col1, tdbp) /* SetBuffer: prepare a column block for write operation. */ /***********************************************************************/ bool JSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) - { +{ if (DOSCOL::SetBuffer(g, value, ok, check)) return true; @@ -1170,13 +1258,13 @@ bool JSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) Tjp = (TDBJSN*)To_Tdb; G = Tjp->G; return false; - } // end of SetBuffer +} // end of SetBuffer /***********************************************************************/ /* Check whether this object is expanded. */ /***********************************************************************/ bool JSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b) - { +{ if ((Tjp->Xcol && nm && !strcmp(nm, Tjp->Xcol) && (Tjp->Xval < 0 || Tjp->Xval == i)) || Xpd) { Xpd = true; // Expandable object @@ -1187,7 +1275,7 @@ bool JSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b) } // endif Xcol return false; - } // end of CheckExpand +} // end of CheckExpand /***********************************************************************/ /* Analyse array processing options. */ @@ -1502,13 +1590,13 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp) Value->SetValue_psz(Serialize(g, jsp, NULL, 0)); return Value; - } // end of MakeJson +} // end of MakeJson /***********************************************************************/ /* SetValue: Set a value from a JVALUE contains. */ /***********************************************************************/ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) - { +{ if (val) { vp->SetNull(false); @@ -1518,7 +1606,31 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) case TYPE_BINT: case TYPE_DBL: case TYPE_DTM: - vp->SetValue_pval(val->GetValue()); + switch (vp->GetType()) { + case TYPE_STRING: + case TYPE_DATE: + vp->SetValue_psz(val->GetString(g)); + break; + case TYPE_INT: + case TYPE_SHORT: + case TYPE_TINY: + vp->SetValue(val->GetInteger()); + break; + case TYPE_BIGINT: + vp->SetValue(val->GetBigint()); + break; + case TYPE_DOUBLE: + vp->SetValue(val->GetFloat()); + + if (val->GetValType() == TYPE_DBL) + vp->SetPrec(val->Val->Nd); + + break; + default: + sprintf(g->Message, "Unsupported column type %d\n", vp->GetType()); + throw 888; + } // endswitch Type + break; case TYPE_BOOL: if (vp->IsTypeNum()) @@ -1546,13 +1658,13 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) vp->SetNull(true); } // endif val - } // end of SetJsonValue +} // end of SetJsonValue /***********************************************************************/ /* ReadColumn: */ /***********************************************************************/ void JSONCOL::ReadColumn(PGLOBAL g) - { +{ if (!Tjp->SameRow || Xnod >= Tjp->SameRow) Value->SetValue_pval(GetColumnValue(g, Tjp->Row, 0)); @@ -1563,20 +1675,20 @@ void JSONCOL::ReadColumn(PGLOBAL g) if (!Nullable) Value->SetNull(false); - } // end of ReadColumn +} // end of ReadColumn /***********************************************************************/ /* GetColumnValue: */ /***********************************************************************/ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i) - { +{ int n = Nod - 1; PJAR arp; PJVAL val = NULL; for (; i < Nod && row; i++) { if (Nodes[i].Op == OP_NUM) { - Value->SetValue(row->GetType() == TYPE_JAR ? row->size() : 1); + Value->SetValue(row->GetType() == TYPE_JAR ? ((PJAR)row)->size() : 1); return(Value); } else if (Nodes[i].Op == OP_XX) { return MakeJson(G, row); @@ -1626,13 +1738,13 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i) SetJsonValue(g, Value, val, n); return Value; - } // end of GetColumnValue +} // end of GetColumnValue /***********************************************************************/ /* ExpandArray: */ /***********************************************************************/ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) - { +{ int ars = MY_MIN(Tjp->Limit, arp->size()); PJVAL jvp; JVALUE jval; @@ -1650,7 +1762,7 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) } // endif jvp if (n < Nod - 1 && jvp->GetJson()) { - jval.SetValue(GetColumnValue(g, jvp->GetJson(), n + 1)); + jval.SetValue(g, GetColumnValue(g, jvp->GetJson(), n + 1)); jvp = &jval; } // endif n @@ -1666,13 +1778,13 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) SetJsonValue(g, Value, jvp, n); return Value; - } // end of ExpandArray +} // end of ExpandArray /***********************************************************************/ /* CalculateArray: */ /***********************************************************************/ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n) - { +{ int i, ars, nv = 0, nextsame = Tjp->NextSame; bool err; OPVAL op = Nodes[n].Op; @@ -1695,11 +1807,12 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n) if (!jvrp->IsNull() || (op == OP_CNC && GetJsonNull())) do { if (jvrp->IsNull()) { - jvrp->Value = AllocateValue(g, GetJsonNull(), TYPE_STRING); - jvp = jvrp; + jvrp->Val = AllocVal(g, TYPE_STRG); + jvrp->Val->Strp = PlugDup(g, GetJsonNull()); + jvp = jvrp; } else if (n < Nod - 1 && jvrp->GetJson()) { Tjp->NextSame = nextsame; - jval.SetValue(GetColumnValue(g, jvrp->GetJson(), n + 1)); + jval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1)); jvp = &jval; } else jvp = jvrp; @@ -1767,13 +1880,13 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n) Tjp->NextSame = nextsame; return vp; - } // end of CalculateArray +} // end of CalculateArray /***********************************************************************/ /* GetRow: Get the object containing this column. */ /***********************************************************************/ PJSON JSONCOL::GetRow(PGLOBAL g) - { +{ PJVAL val = NULL; PJAR arp; PJSON nwr, row = Tjp->Row; @@ -1845,13 +1958,13 @@ PJSON JSONCOL::GetRow(PGLOBAL g) } // endfor i return row; - } // end of GetRow +} // end of GetRow /***********************************************************************/ /* WriteColumn: */ /***********************************************************************/ void JSONCOL::WriteColumn(PGLOBAL g) - { +{ if (Xpd && Tjp->Pretty < 2) { strcpy(g->Message, "Cannot write expanded column when Pretty is not 2"); throw 666; @@ -1887,7 +2000,7 @@ void JSONCOL::WriteColumn(PGLOBAL g) if (Nodes[Nod-1].Op == OP_XX) { s = Value->GetCharValue(); - if (!(jsp = ParseJson(G, s, (int)strlen(s)))) { + if (!(jsp = ParseJson(G, s, strlen(s)))) { strcpy(g->Message, s); throw 666; } // endif jsp @@ -1928,14 +2041,14 @@ void JSONCOL::WriteColumn(PGLOBAL g) objp->SetValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Key); } else if (jvp) - jvp->SetValue(Value); + jvp->SetValue(g, Value); break; default: // ?????????? sprintf(g->Message, "Invalid column type %d", Buf_Type); } // endswitch Type - } // end of WriteColumn +} // end of WriteColumn /* -------------------------- Class TDBJSON -------------------------- */ @@ -1943,23 +2056,23 @@ void JSONCOL::WriteColumn(PGLOBAL g) /* Implementation of the TDBJSON class. */ /***********************************************************************/ TDBJSON::TDBJSON(PJDEF tdp, PTXF txfp) : TDBJSN(tdp, txfp) - { +{ Doc = NULL; Multiple = tdp->Multiple; Done = Changed = false; - } // end of TDBJSON standard constructor +} // end of TDBJSON standard constructor TDBJSON::TDBJSON(PJTDB tdbp) : TDBJSN(tdbp) - { +{ Doc = tdbp->Doc; Multiple = tdbp->Multiple; Done = tdbp->Done; Changed = tdbp->Changed; - } // end of TDBJSON copy constructor +} // end of TDBJSON copy constructor // Used for update PTDB TDBJSON::Clone(PTABS t) - { +{ PTDB tp; PJCOL cp1, cp2; PGLOBAL g = t->G; @@ -1972,13 +2085,13 @@ PTDB TDBJSON::Clone(PTABS t) } // endfor cp1 return tp; - } // end of Clone +} // end of Clone /***********************************************************************/ /* Make the document tree from the object path. */ /***********************************************************************/ int TDBJSON::MakeNewDoc(PGLOBAL g) - { +{ // Create a void table that will be populated Doc = new(g) JARRAY; @@ -1987,15 +2100,16 @@ int TDBJSON::MakeNewDoc(PGLOBAL g) Done = true; return RC_OK; - } // end of MakeNewDoc +} // end of MakeNewDoc /***********************************************************************/ /* Make the document tree from a file. */ /***********************************************************************/ int TDBJSON::MakeDocument(PGLOBAL g) - { +{ char *p, *p1, *p2, *memory, *objpath, *key = NULL; - int len, i = 0; + int i = 0; + size_t len; my_bool a; MODE mode = Mode; PJSON jsp; @@ -2139,13 +2253,13 @@ int TDBJSON::MakeDocument(PGLOBAL g) Done = true; return RC_OK; - } // end of MakeDocument +} // end of MakeDocument /***********************************************************************/ /* JSON Cardinality: returns table size in number of rows. */ /***********************************************************************/ int TDBJSON::Cardinality(PGLOBAL g) - { +{ if (!g) return (Xcol || Multiple) ? 0 : 1; else if (Cardinal < 0) { @@ -2159,48 +2273,48 @@ int TDBJSON::Cardinality(PGLOBAL g) } // endif Cardinal return Cardinal; - } // end of Cardinality +} // end of Cardinality /***********************************************************************/ /* JSON GetMaxSize: returns table size estimate in number of rows. */ /***********************************************************************/ int TDBJSON::GetMaxSize(PGLOBAL g) - { +{ if (MaxSize < 0) MaxSize = Cardinality(g) * ((Xcol) ? Limit : 1); return MaxSize; - } // end of GetMaxSize +} // end of GetMaxSize /***********************************************************************/ /* ResetSize: call by TDBMUL when calculating size estimate. */ /***********************************************************************/ void TDBJSON::ResetSize(void) - { +{ MaxSize = Cardinal = -1; Fpos = -1; N = 0; Done = false; - } // end of ResetSize +} // end of ResetSize /***********************************************************************/ /* TDBJSON is not indexable. */ /***********************************************************************/ int TDBJSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool) - { +{ if (pxdf) { strcpy(g->Message, "JSON not indexable when pretty = 2"); return RC_FX; } else return RC_OK; - } // end of MakeIndex +} // end of MakeIndex /***********************************************************************/ /* Return the position in the table. */ /***********************************************************************/ int TDBJSON::GetRecpos(void) - { +{ #if 0 union { uint Rpos; @@ -2212,13 +2326,13 @@ int TDBJSON::GetRecpos(void) return Rpos; #endif // 0 return Fpos; - } // end of GetRecpos +} // end of GetRecpos /***********************************************************************/ /* Set the position in the table. */ /***********************************************************************/ bool TDBJSON::SetRecpos(PGLOBAL, int recpos) - { +{ #if 0 union { uint Rpos; @@ -2239,13 +2353,13 @@ bool TDBJSON::SetRecpos(PGLOBAL, int recpos) Fpos = recpos - 1; return false; - } // end of SetRecpos +} // end of SetRecpos /***********************************************************************/ /* JSON Access Method opening routine. */ /***********************************************************************/ bool TDBJSON::OpenDB(PGLOBAL g) - { +{ if (Use == USE_OPEN) { /*******************************************************************/ /* Table already open replace it at its beginning. */ @@ -2277,13 +2391,13 @@ bool TDBJSON::OpenDB(PGLOBAL g) Use = USE_OPEN; return false; - } // end of OpenDB +} // end of OpenDB /***********************************************************************/ /* ReadDB: Data Base read routine for JSON access method. */ /***********************************************************************/ int TDBJSON::ReadDB(PGLOBAL) - { +{ int rc; N++; @@ -2301,18 +2415,18 @@ int TDBJSON::ReadDB(PGLOBAL) SameRow = 0; M = 1; - rc = RC_OK; + rc = RC_OK; } else rc = RC_EF; return rc; - } // end of ReadDB +} // end of ReadDB /***********************************************************************/ /* WriteDB: Data Base write routine for JSON access method. */ /***********************************************************************/ int TDBJSON::WriteDB(PGLOBAL g) - { +{ if (Jmode == MODE_OBJECT) { PJVAL vp = new(g) JVALUE(Row); @@ -2342,13 +2456,13 @@ int TDBJSON::WriteDB(PGLOBAL g) Changed = true; return RC_OK; - } // end of WriteDB +} // end of WriteDB /***********************************************************************/ /* Data Base delete line routine for JSON access method. */ /***********************************************************************/ int TDBJSON::DeleteDB(PGLOBAL g, int irc) - { +{ if (irc == RC_OK) { // Deleted current row if (Doc->DeleteValue(Fpos)) { @@ -2365,13 +2479,13 @@ int TDBJSON::DeleteDB(PGLOBAL g, int irc) } // endfor i return RC_OK; - } // end of DeleteDB +} // end of DeleteDB /***********************************************************************/ /* Data Base close routine for JSON access methods. */ /***********************************************************************/ void TDBJSON::CloseDB(PGLOBAL g) - { +{ if (!Changed) return; @@ -2387,7 +2501,7 @@ void TDBJSON::CloseDB(PGLOBAL g) if (!Serialize(g, Top, filename, Pretty)) puts(g->Message); - } // end of CloseDB +} // end of CloseDB /* ---------------------------TDBJCL class --------------------------- */ @@ -2395,18 +2509,18 @@ void TDBJSON::CloseDB(PGLOBAL g) /* TDBJCL class constructor. */ /***********************************************************************/ TDBJCL::TDBJCL(PJDEF tdp) : TDBCAT(tdp) - { +{ Topt = tdp->GetTopt(); Db = tdp->Schema; Dsn = tdp->Uri; - } // end of TDBJCL constructor +} // end of TDBJCL constructor /***********************************************************************/ /* GetResult: Get the list the JSON file columns. */ /***********************************************************************/ PQRYRES TDBJCL::GetResult(PGLOBAL g) - { +{ return JSONColumns(g, Db, Dsn, Topt, false); - } // end of GetResult +} // end of GetResult /* --------------------------- End of json --------------------------- */ diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index 88aa5e2ee8b..b9313e4d809 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -58,7 +58,7 @@ public: // Members JCOL jcol; PJCL jcp, fjcp, pjcp; - PVAL valp; + PVL vlp; PJDEF tdp; TDBJSN *tjnp; PJTDB tjsp; @@ -126,6 +126,7 @@ public: class DllExport TDBJSN : public TDBDOS { friend class JSONCOL; friend class JSONDEF; + friend class JSONDISC; #if defined(CMGO_SUPPORT) friend class CMGFAM; #endif // CMGO_SUPPORT @@ -154,14 +155,17 @@ public: {return Txfp->GetAmType() == TYPE_AM_MGO || !Xcol;} // Database routines - virtual int Cardinality(PGLOBAL g); - virtual int GetMaxSize(PGLOBAL g); + //virtual int Cardinality(PGLOBAL g); + //virtual int GetMaxSize(PGLOBAL g); virtual bool OpenDB(PGLOBAL g); virtual int ReadDB(PGLOBAL g); virtual bool PrepareWriting(PGLOBAL g); virtual int WriteDB(PGLOBAL g); - protected: + // Specific routine + virtual int EstimatedLength(void); + +protected: PJSON FindRow(PGLOBAL g); int MakeTopTree(PGLOBAL g, PJSON jsp); diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index de04f7678f9..bca6d26d1e9 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -380,8 +380,8 @@ PVAL AllocateValue(PGLOBAL g, int type, int len, int prec, case TYPE_STRING: valp = new(g) TYPVAL(g, (PSZ)NULL, len, prec); break; - case TYPE_DATE: - valp = new(g) DTVAL(g, len, prec, fmt); + case TYPE_DATE: + valp = new(g) DTVAL(g, len, prec, fmt); break; case TYPE_INT: if (uns) diff --git a/storage/connect/value.h b/storage/connect/value.h index ee7a1c8032f..3b907c2bf41 100644 --- a/storage/connect/value.h +++ b/storage/connect/value.h @@ -65,7 +65,8 @@ DllExport BYTE OpBmp(PGLOBAL g, OPVAL opc); /***********************************************************************/ class DllExport VALUE : public BLOCK { friend class CONSTANT; // The only object allowed to use SetConstFormat - public: + friend class SWAP; // The only class allowed to access protected +public: // Constructors // Implementation @@ -260,7 +261,8 @@ class DllExport TYPVAL : public VALUE { /***********************************************************************/ template <> class DllExport TYPVAL: public VALUE { - public: + friend class SWAP; // The only class allowed to offsets Strg +public: // Constructors TYPVAL(PSZ s, short c = 0); TYPVAL(PGLOBAL g, PSZ s, int n, int c); @@ -346,7 +348,8 @@ class DllExport DECVAL: public TYPVAL { /* Specific BINARY class. */ /***********************************************************************/ class DllExport BINVAL: public VALUE { - public: + friend class SWAP; // The only class allowed to offsets pointers +public: // Constructors //BINVAL(void *p); BINVAL(PGLOBAL g, void *p, int cl, int n); diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index 95f038d494c..4d0f198705a 100644 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -2471,7 +2471,7 @@ void XFILE::Close(void) } // endif Xfile #if defined(XMAP) - if (Mmp && CloseMemMap(Mmp->memory, Mmp->lenL)) + if (Mmp && CloseMemMap(Mmp->memory, (size_t)Mmp->sz.QuadPart)) printf("Error closing mapped index\n"); #endif // XMAP } // end of Close @@ -2487,7 +2487,7 @@ void *XFILE::FileView(PGLOBAL g, char *fn) Mmp = (MMP)PlugSubAlloc(g, NULL, sizeof(MEMMAP)); h = CreateFileMap(g, fn, Mmp, MODE_READ, false); - if (h == INVALID_HANDLE_VALUE || (!Mmp->lenH && !Mmp->lenL)) { + if (h == INVALID_HANDLE_VALUE || (!Mmp->sz.QuadPart)) { if (!(*g->Message)) strcpy(g->Message, MSG(FILE_MAP_ERR)); From 78ccc605a56d9f6d7a7fd4dc8bc1003145ec3901 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 3 Nov 2020 23:19:22 +0100 Subject: [PATCH 002/150] Fix compile error on LINUX (LARGE_INTEGER) --- storage/connect/filamap.cpp | 8 ++++++-- storage/connect/filamvct.cpp | 18 +++++++++++++----- storage/connect/jsonudf.cpp | 19 ++++++++++++++----- storage/connect/maputil.cpp | 4 ++-- storage/connect/maputil.h | 3 ++- storage/connect/xindex.cpp | 4 ++-- 6 files changed, 39 insertions(+), 17 deletions(-) diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp index 66cf081e5af..007968e0d44 100644 --- a/storage/connect/filamap.cpp +++ b/storage/connect/filamap.cpp @@ -5,7 +5,7 @@ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -176,7 +176,11 @@ bool MAPFAM::OpenTableFile(PGLOBAL g) /*******************************************************************/ /* Get the file size. */ /*******************************************************************/ - len = (size_t)mm.sz.QuadPart; + len = (size_t)mm.lenL; + + if (mm.lenH) + len += ((size_t)mm.lenH * 0x000000001LL); + Memory = (char *)mm.memory; if (!len) { // Empty or deleted file diff --git a/storage/connect/filamvct.cpp b/storage/connect/filamvct.cpp index bf4ef8557ad..5a3e025cd55 100644 --- a/storage/connect/filamvct.cpp +++ b/storage/connect/filamvct.cpp @@ -5,7 +5,7 @@ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 2005-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2005-2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -1423,8 +1423,12 @@ bool VCMFAM::OpenTableFile(PGLOBAL g) /*******************************************************************/ /* Get the file size. */ /*******************************************************************/ - len = (size_t)mm.sz.QuadPart; - Memory = (char *)mm.memory; + len = (size_t)mm.lenL; + + if (mm.lenH) + len += ((size_t)mm.lenH * 0x000000001LL); + + Memory = (char *)mm.memory; if (!len) { // Empty or deleted file CloseFileHandle(hFile); @@ -2816,8 +2820,12 @@ bool VMPFAM::MapColumnFile(PGLOBAL g, MODE mode, int i) /*****************************************************************/ /* Get the file size (assuming file is smaller than 4 GB) */ /*****************************************************************/ - len = (size_t)mm.sz.QuadPart; - Memcol[i] = (char *)mm.memory; + len = (size_t)mm.lenL; + + if (mm.lenH) + len += ((size_t)mm.lenH * 0x000000001LL); + + Memcol[i] = (char *)mm.memory; if (!len) { // Empty or deleted file CloseFileHandle(hFile); diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 6354b92107a..6c334111db0 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -1768,9 +1768,13 @@ static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, size_t& len) } // endif hFile /*******************************************************************************/ - /* Get the file size (assuming file is smaller than 4 GB) */ + /* Get the file size. */ /*******************************************************************************/ - len = (size_t)mm.sz.QuadPart; + len = (size_t)mm.lenL; + + if (mm.lenH) + len += ((size_t)mm.lenH * 0x000000001LL); + memory = (char *)mm.memory; if (!len) { // Empty or deleted file @@ -6041,11 +6045,16 @@ char* JUP::UnprettyJsonFile(PGLOBAL g, char *fn, char *outfn, int lrecl) { /*******************************************************************************/ /* Get the file size (assuming file is smaller than 4 GB) */ /*******************************************************************************/ - if (!mm.sz.QuadPart) { // Empty or deleted file + if (!mm.lenL && !mm.lenH) { // Empty or deleted file CloseFileHandle(hFile); return NULL; - } else - len = (size_t)mm.sz.QuadPart; + } else { + len = (size_t)mm.lenL; + + if (mm.lenH) + len += ((size_t)mm.lenH * 0x000000001LL); + + } // endif size if (!mm.memory) { CloseFileHandle(hFile); diff --git a/storage/connect/maputil.cpp b/storage/connect/maputil.cpp index 86300f17200..87263b3adf6 100644 --- a/storage/connect/maputil.cpp +++ b/storage/connect/maputil.cpp @@ -90,8 +90,8 @@ HANDLE CreateFileMap(PGLOBAL g, LPCSTR filename, return INVALID_HANDLE_VALUE; } // endif memory - // HighPart is the high-order word of the file size - mm->sz.LowPart = GetFileSize(hFile, (LPDWORD)&mm->sz.HighPart); + // lenH is the high-order word of the file size + mm->lenL = GetFileSize(hFile, &mm->lenH); CloseHandle(hFileMap); // Not used anymore } else // MODE_INSERT /*****************************************************************/ diff --git a/storage/connect/maputil.h b/storage/connect/maputil.h index fd62fbcfeae..e310488eb5d 100644 --- a/storage/connect/maputil.h +++ b/storage/connect/maputil.h @@ -7,7 +7,8 @@ extern "C" { typedef struct { void *memory; - LARGE_INTEGER sz; + DWORD lenL; + DWORD lenH; } MEMMAP; DllExport HANDLE CreateFileMap(PGLOBAL, LPCSTR, MEMMAP *, MODE, bool); diff --git a/storage/connect/xindex.cpp b/storage/connect/xindex.cpp index 4d0f198705a..95f038d494c 100644 --- a/storage/connect/xindex.cpp +++ b/storage/connect/xindex.cpp @@ -2471,7 +2471,7 @@ void XFILE::Close(void) } // endif Xfile #if defined(XMAP) - if (Mmp && CloseMemMap(Mmp->memory, (size_t)Mmp->sz.QuadPart)) + if (Mmp && CloseMemMap(Mmp->memory, Mmp->lenL)) printf("Error closing mapped index\n"); #endif // XMAP } // end of Close @@ -2487,7 +2487,7 @@ void *XFILE::FileView(PGLOBAL g, char *fn) Mmp = (MMP)PlugSubAlloc(g, NULL, sizeof(MEMMAP)); h = CreateFileMap(g, fn, Mmp, MODE_READ, false); - if (h == INVALID_HANDLE_VALUE || (!Mmp->sz.QuadPart)) { + if (h == INVALID_HANDLE_VALUE || (!Mmp->lenH && !Mmp->lenL)) { if (!(*g->Message)) strcpy(g->Message, MSG(FILE_MAP_ERR)); From 49428c8fa6905c87b2b89e094c72f48a1e75b383 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 4 Nov 2020 11:36:29 +0100 Subject: [PATCH 003/150] Fix compile error on LINUX (no suitable operator delete) --- storage/connect/block.h | 6 +++--- storage/connect/json.cpp | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/storage/connect/block.h b/storage/connect/block.h index 479bee373fa..2d922808a67 100644 --- a/storage/connect/block.h +++ b/storage/connect/block.h @@ -44,8 +44,8 @@ class DllExport BLOCK { return PlugSubAlloc(g, mp, size); } // end of new - void* operator new(size_t size, size_t mp) { - xtrc(256, "Realloc at: mp=%zd\n", mp); + void* operator new(size_t size, long long mp) { + xtrc(256, "Realloc at: mp=%lld\n", mp); return (void*)mp; } // end of new @@ -55,7 +55,7 @@ class DllExport BLOCK { #if !defined(__BORLANDC__) // Avoid warning C4291 by defining a matching dummy delete operator void operator delete(void*, PGLOBAL, void *) {} - //void operator delete(void*, size_t) {} + void operator delete(void*, long long) {} #endif virtual ~BLOCK() {} diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index ce3ddd865a5..d461b60470d 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -1820,7 +1820,7 @@ PJSON SWAP::MptrJson(PJSON ojp) { // ojp is an offset PJAR SWAP::MptrArray(PJAR ojar) { PJAR jarp = (PJAR)MakePtr(Base, (size_t)ojar); - jarp = (PJAR)new((size_t)jarp) JARRAY(NULL); + jarp = (PJAR)new((long long)jarp) JARRAY(NULL); if (jarp->First) { for (int i = 0; i < jarp->Size; i++) @@ -1839,7 +1839,7 @@ PJAR SWAP::MptrArray(PJAR ojar) { PJOB SWAP::MptrObject(PJOB ojob) { PJOB jobp = (PJOB)MakePtr(Base, (size_t)ojob); - jobp = (PJOB)new((size_t)jobp) JOBJECT(NULL); + jobp = (PJOB)new((long long)jobp) JOBJECT(NULL); if (jobp->First) { jobp->First = (PJPR)MptrPair(jobp->First); @@ -1872,7 +1872,7 @@ PJPR SWAP::MptrPair(PJPR ojp) { PJVAL SWAP::MptrJValue(PJVAL ojv) { PJVAL jvp = (PJVAL)MakePtr(Base, (size_t)ojv); - jvp = (PJVAL)new((size_t)jvp) JVALUE(0); + jvp = (PJVAL)new((long long)jvp) JVALUE(0); if (!jvp->Del) { if (jvp->Jsp) From 6a94ad98fb8da214a9d917a72f704e6da795e338 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 4 Nov 2020 15:46:02 +0100 Subject: [PATCH 004/150] Fix crash on Json date columns --- storage/connect/json.cpp | 12 +++++++++++- storage/connect/plgdbsem.h | 2 +- storage/connect/value.h | 3 ++- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index d461b60470d..c4d2c006da1 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -1587,7 +1587,17 @@ void JVALUE::SetValue(PGLOBAL g, PVAL valp) Val->Type = TYPE_NULL; } else switch (valp->GetType()) { case TYPE_STRING: - case TYPE_DATE: + if (((DTVAL*)valp)->IsFormatted()) + Val->Strp = valp->GetCharValue(); + else { + char buf[32]; + + Val->Strp = PlugDup(g, valp->GetCharString(buf)); + } // endif Formatted + + Val->Type = TYPE_DTM; + break; + case TYPE_DATE: Val->Strp = valp->GetCharValue(); Val->Type = TYPE_STRG; break; diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h index a40e32bcfb2..1d644cb75c2 100644 --- a/storage/connect/plgdbsem.h +++ b/storage/connect/plgdbsem.h @@ -160,7 +160,7 @@ enum RECFM {RECFM_DFLT = 0, /* Default table type */ RECFM_FMT = 8, /* FMT formatted file */ RECFM_VCT = 9, /* VCT formatted files */ RECFM_XML = 10, /* XML formatted files */ - RECFM_JASON = 11, /* JASON formatted files */ + RECFM_JSON = 11, /* JSON formatted files */ RECFM_DIR = 12, /* DIR table */ RECFM_ODBC = 13, /* Table accessed via ODBC */ RECFM_JDBC = 14, /* Table accessed via JDBC */ diff --git a/storage/connect/value.h b/storage/connect/value.h index 3b907c2bf41..df6a55501b6 100644 --- a/storage/connect/value.h +++ b/storage/connect/value.h @@ -418,7 +418,8 @@ class DllExport DTVAL : public TYPVAL { virtual bool SetValue_char(const char *p, int n); virtual void SetValue_psz(PCSZ s); virtual void SetValue_pvblk(PVBLK blk, int n); - virtual char *GetCharString(char *p); + virtual PSZ GetCharValue(void) { return Sdate; } + virtual char *GetCharString(char *p); virtual int ShowValue(char *buf, int len); virtual bool FormatValue(PVAL vp, PCSZ fmt); bool SetFormat(PGLOBAL g, PCSZ fmt, int len, int year = 0); From a13642a82f1f61a115c7f42605e8b3a97f2f184f Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 4 Nov 2020 16:33:10 +0100 Subject: [PATCH 005/150] Try to fix that F..k gcc operator delete error --- storage/connect/block.h | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/connect/block.h b/storage/connect/block.h index 2d922808a67..7cd2eb7ed19 100644 --- a/storage/connect/block.h +++ b/storage/connect/block.h @@ -56,6 +56,7 @@ class DllExport BLOCK { // Avoid warning C4291 by defining a matching dummy delete operator void operator delete(void*, PGLOBAL, void *) {} void operator delete(void*, long long) {} + void operator delete(void*) {} #endif virtual ~BLOCK() {} From ecb00f3cd8cd55b402f504d4edc9e2ff08360809 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Thu, 5 Nov 2020 19:13:26 +0100 Subject: [PATCH 006/150] Try to fix failing tests --- storage/connect/block.h | 8 ++------ storage/connect/ha_connect.cc | 29 +++++++++++++++-------------- storage/connect/json.cpp | 7 ++++--- 3 files changed, 21 insertions(+), 23 deletions(-) diff --git a/storage/connect/block.h b/storage/connect/block.h index 7cd2eb7ed19..6ac3b73bf13 100644 --- a/storage/connect/block.h +++ b/storage/connect/block.h @@ -30,8 +30,6 @@ #define DllExport #endif // !__WIN__ -typedef class JSON *PJSON; - /***********************************************************************/ /* Definition of class BLOCK with its method function new. */ /***********************************************************************/ @@ -52,14 +50,12 @@ class DllExport BLOCK { virtual void Printf(PGLOBAL, FILE *, uint) {} // Produce file desc virtual void Prints(PGLOBAL, char *, uint) {} // Produce string desc -#if !defined(__BORLANDC__) - // Avoid warning C4291 by defining a matching dummy delete operator + // Avoid gcc errors by defining matching dummy delete operators void operator delete(void*, PGLOBAL, void *) {} void operator delete(void*, long long) {} void operator delete(void*) {} -#endif - virtual ~BLOCK() {} + virtual ~BLOCK() {} }; // end of class BLOCK #endif // !BLOCK_DEFINED diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 859d50b9a2c..d06ee5a6415 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -170,7 +170,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.07.0002 October 18, 2020"; + char version[]= "Version 1.07.0002 November 05, 2020"; #if defined(__WIN__) char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__; char slash= '\\'; @@ -398,7 +398,7 @@ static MYSQL_THDVAR_ENUM( // Adding JPATH to all Json table columns static MYSQL_THDVAR_BOOL(json_all_path, PLUGIN_VAR_RQCMDARG, "Adding JPATH to all Json table columns", - NULL, NULL, 0); // NO by default + NULL, NULL, 1); // YES by default // Null representation for JSON values static MYSQL_THDVAR_STR(json_null, @@ -411,7 +411,7 @@ static MYSQL_THDVAR_STR(json_null, static MYSQL_THDVAR_INT(default_depth, PLUGIN_VAR_RQCMDARG, "Default depth used by Json, XML and Mongo discovery", - NULL, NULL, 0, -1, 16, 1); + NULL, NULL, 5, -1, 16, 1); // Defaults to 5 // Estimate max number of rows for JSON aggregate functions static MYSQL_THDVAR_UINT(json_grp_size, @@ -4543,14 +4543,12 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn, bool quick) case TAB_DIR: case TAB_ZIP: case TAB_OEM: - if (table && table->pos_in_table_list) // if SELECT - { + if (table && table->pos_in_table_list) { // if SELECT #if MYSQL_VERSION_ID > 100200 Switch_to_definer_security_ctx backup_ctx(thd, table->pos_in_table_list); #endif // VERSION_ID > 100200 return check_global_access(thd, FILE_ACL); - } - else + } else return check_global_access(thd, FILE_ACL); case TAB_ODBC: case TAB_JDBC: @@ -5360,7 +5358,8 @@ static char *encode(PGLOBAL g, const char *cnm) */ static bool add_field(String* sql, TABTYPE ttp, const char* field_name, int typ, int len, int dec, char* key, uint tm, const char* rem, - char* dft, char* xtra, char* fmt, int flag, bool dbf, char v) { + char* dft, char* xtra, char* fmt, int flag, bool dbf, char v) +{ #if defined(DEVELOPMENT) // Some client programs regard CHAR(36) as GUID char var = (len > 255 || len == 36) ? 'V' : v; @@ -5604,8 +5603,8 @@ static int connect_assisted_discovery(handlerton *, THD* thd, String sql(buf, sizeof(buf), system_charset_info); sql.copy(STRING_WITH_LEN("CREATE TABLE whatever ("), system_charset_info); - user = host = pwd = tbl = src = col = ocl = pic = fcl = skc = rnk = zfn = NULL; - dsn = url = NULL; + user= host= pwd= tbl= src= col= ocl= pic= fcl= skc= rnk= zfn= NULL; + dsn= url= NULL; // Get the useful create options ttp= GetTypeID(topt->type); @@ -6268,7 +6267,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, // Now add the field if (add_field(&sql, ttp, cnm, typ, prec, dec, key, tm, rem, dft, xtra, - fmt, flg, dbf, v)) + fmt, flg, dbf, v)) rc= HA_ERR_OUT_OF_MEM; } // endfor i @@ -6767,8 +6766,8 @@ int ha_connect::create(const char *name, TABLE *table_arg, if (trace(1)) htrc("xchk=%p createas=%d\n", g->Xchk, g->Createas); -#if defined(ZIP_SUPPORT) if (options->zipped) { +#if defined(ZIP_SUPPORT) // Check whether the zip entry must be made from a file PCSZ fn= GetListOption(g, "Load", options->oplist, NULL); @@ -6790,9 +6789,11 @@ int ha_connect::create(const char *name, TABLE *table_arg, } // endif LoadFile } // endif fn - +#else // !ZIP_SUPPORT + my_message(ER_UNKNOWN_ERROR, "Option ZIP not supported", MYF(0)); + DBUG_RETURN(HA_ERR_INTERNAL_ERROR); +#endif // !ZIP_SUPPORT } // endif zipped -#endif // ZIP_SUPPORT // To check whether indexes have to be made or remade if (!g->Xchk) { diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index c4d2c006da1..dd12bf546de 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -1533,10 +1533,11 @@ PSZ JVALUE::GetString(PGLOBAL g) sprintf(buf, "%.*lf", Val->Nd, Val->F); break; case TYPE_BOOL: - p = (Val->B) ? "true" : "false"; + p = (char*)PlugDup(g, (Val->B) ? "true" : "false"); break; case TYPE_NULL: - p = "null"; + p = (char*)PlugDup(g, "null") + ; break; default: p = NULL; @@ -1545,7 +1546,7 @@ PSZ JVALUE::GetString(PGLOBAL g) } else p = NULL; - return p; + return (p == buf)? (char*)PlugDup(g, buf) : p; } // end of GetString /***********************************************************************/ From addb28f62dc205e61c70e6ca8ef8547bfa44c50a Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Thu, 5 Nov 2020 22:14:01 +0100 Subject: [PATCH 007/150] Try to fix failing tests --- storage/connect/json.cpp | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index dd12bf546de..11553e2f0dd 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -1053,7 +1053,7 @@ PSZ JOBJECT::GetText(PGLOBAL g, PSZ text) return text; if (!text) { - text = (char*)PlugSubAlloc(g, NULL, 0); + text = (char*)PlugSubAlloc(g, NULL, 512); // TODO: get size text[0] = 0; n = 1; } else @@ -1079,8 +1079,8 @@ PSZ JOBJECT::GetText(PGLOBAL g, PSZ text) } else for (PJPR jp = First; jp; jp = jp->Next) jp->Val->GetText(g, text); - if (n) - PlugSubAlloc(g, NULL, strlen(text) + 1); + //if (n) + // PlugSubAlloc(g, NULL, strlen(text) + 1); return text + n; } // end of GetText; @@ -1290,7 +1290,7 @@ PSZ JARRAY::GetText(PGLOBAL g, PSZ text) PJVAL jp; if (!text) { - text = (char*)PlugSubAlloc(g, NULL, 0); + text = (char*)PlugSubAlloc(g, NULL, 512); text[0] = 0; n = 1; } else @@ -1299,8 +1299,8 @@ PSZ JARRAY::GetText(PGLOBAL g, PSZ text) for (jp = First; jp; jp = jp->Next) jp->GetText(g, text); - if (n) - PlugSubAlloc(g, NULL, strlen(text) + 1); + //if (n) + // PlugSubAlloc(g, NULL, strlen(text) + 1); return text + n; } // end of GetText; @@ -1536,8 +1536,7 @@ PSZ JVALUE::GetString(PGLOBAL g) p = (char*)PlugDup(g, (Val->B) ? "true" : "false"); break; case TYPE_NULL: - p = (char*)PlugDup(g, "null") - ; + p = (char*)PlugDup(g, "null"); break; default: p = NULL; @@ -1546,7 +1545,7 @@ PSZ JVALUE::GetString(PGLOBAL g) } else p = NULL; - return (p == buf)? (char*)PlugDup(g, buf) : p; + return (p == buf) ? (char*)PlugDup(g, buf) : p; } // end of GetString /***********************************************************************/ From a4e999eec5ad08eb663f94fef0e9590daf1e5b5f Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Thu, 5 Nov 2020 23:04:37 +0100 Subject: [PATCH 008/150] Try to fix failing tests --- storage/connect/json.cpp | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index 11553e2f0dd..1006c9c9de1 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -1053,7 +1053,7 @@ PSZ JOBJECT::GetText(PGLOBAL g, PSZ text) return text; if (!text) { - text = (char*)PlugSubAlloc(g, NULL, 512); // TODO: get size + text = (PSZ)malloc(1024); // TODO: get size text[0] = 0; n = 1; } else @@ -1079,10 +1079,14 @@ PSZ JOBJECT::GetText(PGLOBAL g, PSZ text) } else for (PJPR jp = First; jp; jp = jp->Next) jp->Val->GetText(g, text); - //if (n) - // PlugSubAlloc(g, NULL, strlen(text) + 1); + if (n) { + PSZ txt = (PSZ)PlugSubAlloc(g, NULL, strlen(text)); + strcpy(txt, text + 1); // Remove leading blank + free(text); + text = txt; + } // endif n - return text + n; + return text; } // end of GetText; /***********************************************************************/ @@ -1290,7 +1294,7 @@ PSZ JARRAY::GetText(PGLOBAL g, PSZ text) PJVAL jp; if (!text) { - text = (char*)PlugSubAlloc(g, NULL, 512); + text = (char*)malloc(1024); // Should be large enough text[0] = 0; n = 1; } else @@ -1299,10 +1303,14 @@ PSZ JARRAY::GetText(PGLOBAL g, PSZ text) for (jp = First; jp; jp = jp->Next) jp->GetText(g, text); - //if (n) - // PlugSubAlloc(g, NULL, strlen(text) + 1); + if (n) { + PSZ txt = (PSZ)PlugSubAlloc(g, NULL, strlen(text)); + strcpy(txt, text + 1); // Remove leading blank + free(text); + text = txt; + } // endif n - return text + n; + return text; } // end of GetText; /***********************************************************************/ From d3372258b0cda3f1f84128958bb0da151fc0d951 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 6 Nov 2020 15:34:13 +0100 Subject: [PATCH 009/150] Update tests to cope with changes --- storage/connect/mysql-test/connect/r/json_java_2.result | 1 + storage/connect/mysql-test/connect/r/json_java_3.result | 1 + storage/connect/mysql-test/connect/r/json_mongo_c.result | 1 + storage/connect/mysql-test/connect/r/mongo_c.result | 1 + storage/connect/mysql-test/connect/r/mongo_java_2.result | 1 + storage/connect/mysql-test/connect/r/mongo_java_3.result | 1 + storage/connect/mysql-test/connect/r/xml2_zip.result | 4 ++-- storage/connect/mysql-test/connect/r/xml_zip.result | 4 ++-- storage/connect/mysql-test/connect/t/json_java_2.test | 1 + storage/connect/mysql-test/connect/t/json_java_3.test | 1 + storage/connect/mysql-test/connect/t/mongo_test.inc | 1 + storage/connect/mysql-test/connect/t/xml2_zip.test | 4 ++-- storage/connect/mysql-test/connect/t/xml_zip.test | 4 ++-- 13 files changed, 17 insertions(+), 8 deletions(-) diff --git a/storage/connect/mysql-test/connect/r/json_java_2.result b/storage/connect/mysql-test/connect/r/json_java_2.result index 2ce89f971b2..bc6ad16772e 100644 --- a/storage/connect/mysql-test/connect/r/json_java_2.result +++ b/storage/connect/mysql-test/connect/r/json_java_2.result @@ -1,4 +1,5 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; # # Test the MONGO table type # diff --git a/storage/connect/mysql-test/connect/r/json_java_3.result b/storage/connect/mysql-test/connect/r/json_java_3.result index d914b507f52..e7dd9468845 100644 --- a/storage/connect/mysql-test/connect/r/json_java_3.result +++ b/storage/connect/mysql-test/connect/r/json_java_3.result @@ -1,4 +1,5 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; # # Test the MONGO table type # diff --git a/storage/connect/mysql-test/connect/r/json_mongo_c.result b/storage/connect/mysql-test/connect/r/json_mongo_c.result index 454743e679b..7d26e0152f2 100644 --- a/storage/connect/mysql-test/connect/r/json_mongo_c.result +++ b/storage/connect/mysql-test/connect/r/json_mongo_c.result @@ -1,4 +1,5 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; # # Test the MONGO table type # diff --git a/storage/connect/mysql-test/connect/r/mongo_c.result b/storage/connect/mysql-test/connect/r/mongo_c.result index cabdf713d16..8b86ce32943 100644 --- a/storage/connect/mysql-test/connect/r/mongo_c.result +++ b/storage/connect/mysql-test/connect/r/mongo_c.result @@ -1,4 +1,5 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; # # Test the MONGO table type # diff --git a/storage/connect/mysql-test/connect/r/mongo_java_2.result b/storage/connect/mysql-test/connect/r/mongo_java_2.result index 890b88324bb..cccda2760d6 100644 --- a/storage/connect/mysql-test/connect/r/mongo_java_2.result +++ b/storage/connect/mysql-test/connect/r/mongo_java_2.result @@ -1,4 +1,5 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; # # Test the MONGO table type # diff --git a/storage/connect/mysql-test/connect/r/mongo_java_3.result b/storage/connect/mysql-test/connect/r/mongo_java_3.result index f6f9895a29e..ae39148a156 100644 --- a/storage/connect/mysql-test/connect/r/mongo_java_3.result +++ b/storage/connect/mysql-test/connect/r/mongo_java_3.result @@ -1,4 +1,5 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; # # Test the MONGO table type # diff --git a/storage/connect/mysql-test/connect/r/xml2_zip.result b/storage/connect/mysql-test/connect/r/xml2_zip.result index 7d67a3d5498..e743af32418 100644 --- a/storage/connect/mysql-test/connect/r/xml2_zip.result +++ b/storage/connect/mysql-test/connect/r/xml2_zip.result @@ -17,7 +17,7 @@ PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME', PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE', DATEPUB CHAR(4) NOT NULL ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR'; +OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR'; SELECT * FROM t1; ISBN 9782212090819 LANG fr @@ -69,7 +69,7 @@ PUBLISHER_PLACE Paris DATEPUB 2003 CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='xmlsup=libxml2'; +OPTION_LIST='depth=0,xmlsup=libxml2'; SELECT * FROM t2; ISBN 9782212090819 LANG fr diff --git a/storage/connect/mysql-test/connect/r/xml_zip.result b/storage/connect/mysql-test/connect/r/xml_zip.result index ddde0fa8a35..5f17249b390 100644 --- a/storage/connect/mysql-test/connect/r/xml_zip.result +++ b/storage/connect/mysql-test/connect/r/xml_zip.result @@ -15,7 +15,7 @@ PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME', PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE', DATEPUB CHAR(4) NOT NULL ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR'; +OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR'; SELECT * FROM t1; ISBN 9782212090819 LANG fr @@ -67,7 +67,7 @@ PUBLISHER_PLACE Paris DATEPUB 2003 CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='xmlsup=domdoc'; +OPTION_LIST='depth=0,xmlsup=domdoc'; SELECT * FROM t2; ISBN 9782212090819 LANG fr diff --git a/storage/connect/mysql-test/connect/t/json_java_2.test b/storage/connect/mysql-test/connect/t/json_java_2.test index 2f64d8e2eed..03202828bb1 100644 --- a/storage/connect/mysql-test/connect/t/json_java_2.test +++ b/storage/connect/mysql-test/connect/t/json_java_2.test @@ -3,6 +3,7 @@ --disable_query_log eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo2.jar'; +set connect_json_all_path=0; --enable_query_log let $DRV= Java; let $VERS= 2; diff --git a/storage/connect/mysql-test/connect/t/json_java_3.test b/storage/connect/mysql-test/connect/t/json_java_3.test index cee8343772a..238808a833f 100644 --- a/storage/connect/mysql-test/connect/t/json_java_3.test +++ b/storage/connect/mysql-test/connect/t/json_java_3.test @@ -3,6 +3,7 @@ --disable_query_log eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo3.jar'; +set connect_json_all_path=0; --enable_query_log let $DRV= Java; let $VERS= 3; diff --git a/storage/connect/mysql-test/connect/t/mongo_test.inc b/storage/connect/mysql-test/connect/t/mongo_test.inc index 1c8bf07184f..0a9c80f5ba5 100644 --- a/storage/connect/mysql-test/connect/t/mongo_test.inc +++ b/storage/connect/mysql-test/connect/t/mongo_test.inc @@ -1,4 +1,5 @@ set connect_enable_mongo=1; +set connect_json_all_path=0; --echo # --echo # Test the MONGO table type diff --git a/storage/connect/mysql-test/connect/t/xml2_zip.test b/storage/connect/mysql-test/connect/t/xml2_zip.test index 46697dc97f7..df69f9dace3 100644 --- a/storage/connect/mysql-test/connect/t/xml2_zip.test +++ b/storage/connect/mysql-test/connect/t/xml2_zip.test @@ -24,13 +24,13 @@ PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME', PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE', DATEPUB CHAR(4) NOT NULL ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR'; +OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=libxml2,expand=1,mulnode=AUTHOR'; SELECT * FROM t1; #testing discovery CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='xmlsup=libxml2'; +OPTION_LIST='depth=0,xmlsup=libxml2'; SELECT * FROM t2; DROP TABLE t1,t2; diff --git a/storage/connect/mysql-test/connect/t/xml_zip.test b/storage/connect/mysql-test/connect/t/xml_zip.test index 774021485f9..29ee2e0e607 100644 --- a/storage/connect/mysql-test/connect/t/xml_zip.test +++ b/storage/connect/mysql-test/connect/t/xml_zip.test @@ -24,13 +24,13 @@ PUBLISHER_NAME CHAR(15) NOT NULL XPATH='PUBLISHER/NAME', PUBLISHER_PLACE CHAR(5) NOT NULL XPATH='PUBLISHER/PLACE', DATEPUB CHAR(4) NOT NULL ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR'; +OPTION_LIST='depth=0,entry=xsample2.xml,load=xsample2.xml,rownode=BOOK,xmlsup=domdoc,expand=1,mulnode=AUTHOR'; SELECT * FROM t1; #testing discovery CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample2.zip' ZIPPED=YES -OPTION_LIST='xmlsup=domdoc'; +OPTION_LIST='depth=0,xmlsup=domdoc'; SELECT * FROM t2; DROP TABLE t1,t2; From 46edfd6338f7709aca65070496019c9970bf8ecf Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 7 Nov 2020 15:40:46 +0100 Subject: [PATCH 010/150] - Getting text of json items now includes all array members modified: storage/connect/json.cpp modified: storage/connect/json.h modified: storage/connect/jsonudf.cpp modified: storage/connect/jsonudf.h modified: storage/connect/mysql-test/connect/r/json.result modified: storage/connect/mysql-test/connect/r/json_java_2.result modified: storage/connect/mysql-test/connect/r/json_mongo_c.result modified: storage/connect/mysql-test/connect/r/json_udf.result modified: storage/connect/mysql-test/connect/r/json_udf_bin.result modified: storage/connect/mysql-test/connect/r/zip.result modified: storage/connect/mysql-test/connect/t/zip.test modified: storage/connect/tabjson.cpp modified: storage/connect/tabjson.h modified: storage/connect/xobject.h --- storage/connect/json.cpp | 149 ++++++++++-------- storage/connect/json.h | 13 +- storage/connect/jsonudf.cpp | 16 +- storage/connect/jsonudf.h | 2 +- .../connect/mysql-test/connect/r/json.result | 2 +- .../mysql-test/connect/r/json_java_2.result | 30 ++-- .../mysql-test/connect/r/json_mongo_c.result | 30 ++-- .../mysql-test/connect/r/json_udf.result | 6 +- .../mysql-test/connect/r/json_udf_bin.result | 2 +- .../connect/mysql-test/connect/r/zip.result | 42 ++--- storage/connect/mysql-test/connect/t/zip.test | 30 ++-- storage/connect/tabjson.cpp | 15 +- storage/connect/tabjson.h | 4 +- storage/connect/xobject.h | 1 + 14 files changed, 182 insertions(+), 160 deletions(-) diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index 1006c9c9de1..9af5761c9ee 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -1045,48 +1045,57 @@ PJVAL JOBJECT::GetValue(const char* key) /***********************************************************************/ /* Return the text corresponding to all keys (XML like). */ /***********************************************************************/ -PSZ JOBJECT::GetText(PGLOBAL g, PSZ text) +PSZ JOBJECT::GetText(PGLOBAL g, PSTRG text) { - int n; + if (First) { + bool b; - if (!First) - return text; + if (!text) { + text = new(g) STRING(g, 256); + b = true; + } else { + if (text->GetLastChar() != ' ') + text->Append(' '); - if (!text) { - text = (PSZ)malloc(1024); // TODO: get size - text[0] = 0; - n = 1; - } else - n = 0; + b = false; + } // endif text - if (n == 1 && !First->Next && !strcmp(First->Key, "$date")) { - int i; + if (b && !First->Next && !strcmp(First->Key, "$date")) { + int i; + PSZ s; - First->Val->GetText(g, text); - i = (text[1] == '-' ? 2 : 1); + First->Val->GetText(g, text); + s = text->GetStr(); + i = (s[1] == '-' ? 2 : 1); - if (IsNum(text + i)) { - // Date is in milliseconds - int j = (int)strlen(text); + if (IsNum(s + i)) { + // Date is in milliseconds + int j = text->GetLength(); - if (j >= 4 + i) - text[j - 3] = 0; // Change it to seconds - else - strcpy(text, " 0"); + if (j >= 4 + i) { + s[j - 3] = 0; // Change it to seconds + text->SetLength((uint)strlen(s)); + } else + text->Set(" 0"); - } // endif text + } // endif text - } else for (PJPR jp = First; jp; jp = jp->Next) - jp->Val->GetText(g, text); + } else for (PJPR jp = First; jp; jp = jp->Next) { + jp->Val->GetText(g, text); - if (n) { - PSZ txt = (PSZ)PlugSubAlloc(g, NULL, strlen(text)); - strcpy(txt, text + 1); // Remove leading blank - free(text); - text = txt; - } // endif n + if (jp->Next) + text->Append(' '); - return text; + } // endfor jp + + if (b) { + text->Trim(); + return text->GetStr(); + } // endif b + + } // endif First + + return NULL; } // end of GetText; /***********************************************************************/ @@ -1288,29 +1297,42 @@ bool JARRAY::SetValue(PGLOBAL g, PJVAL jvp, int n) /***********************************************************************/ /* Return the text corresponding to all values. */ /***********************************************************************/ -PSZ JARRAY::GetText(PGLOBAL g, PSZ text) +PSZ JARRAY::GetText(PGLOBAL g, PSTRG text) { - int n; - PJVAL jp; + if (First) { + bool b; + PJVAL jp; - if (!text) { - text = (char*)malloc(1024); // Should be large enough - text[0] = 0; - n = 1; - } else - n = 0; + if (!text) { + text = new(g) STRING(g, 256); + b = true; + } else { + if (text->GetLastChar() != ' ') + text->Append(" ("); + else + text->Append('('); - for (jp = First; jp; jp = jp->Next) - jp->GetText(g, text); + b = false; + } - if (n) { - PSZ txt = (PSZ)PlugSubAlloc(g, NULL, strlen(text)); - strcpy(txt, text + 1); // Remove leading blank - free(text); - text = txt; - } // endif n + for (jp = First; jp; jp = jp->Next) { + jp->GetText(g, text); - return text; + if (jp->Next) + text->Append(", "); + else if (!b) + text->Append(')'); + + } // endfor jp + + if (b) { + text->Trim(); + return text->GetStr(); + } // endif b + + } // endif First + + return NULL; } // end of GetText; /***********************************************************************/ @@ -1520,10 +1542,10 @@ double JVALUE::GetFloat(void) /***********************************************************************/ /* Return the Value's String value. */ /***********************************************************************/ -PSZ JVALUE::GetString(PGLOBAL g) +PSZ JVALUE::GetString(PGLOBAL g, char *buff) { char buf[32]; - char *p = buf; + char *p = (buff) ? buff : buf; if (Val) { switch (Val->Type) { @@ -1532,19 +1554,19 @@ PSZ JVALUE::GetString(PGLOBAL g) p = Val->Strp; break; case TYPE_INTG: - sprintf(buf, "%d", Val->N); + sprintf(p, "%d", Val->N); break; case TYPE_BINT: - sprintf(buf, "%lld", Val->LLn); + sprintf(p, "%lld", Val->LLn); break; case TYPE_DBL: - sprintf(buf, "%.*lf", Val->Nd, Val->F); + sprintf(p, "%.*lf", Val->Nd, Val->F); break; case TYPE_BOOL: - p = (char*)PlugDup(g, (Val->B) ? "true" : "false"); + p = (char*)((Val->B) ? "true" : "false"); break; case TYPE_NULL: - p = (char*)PlugDup(g, "null"); + p = (char*)"null"; break; default: p = NULL; @@ -1559,19 +1581,20 @@ PSZ JVALUE::GetString(PGLOBAL g) /***********************************************************************/ /* Return the Value's String value. */ /***********************************************************************/ -PSZ JVALUE::GetText(PGLOBAL g, PSZ text) +PSZ JVALUE::GetText(PGLOBAL g, PSTRG text) { if (Jsp) return Jsp->GetText(g, text); - PSZ s = (Val) ? GetString(g) : NULL; + char buff[32]; + PSZ s = (Val) ? GetString(g, buff) : NULL; - if (s) - strcat(strcat(text, " "), s); - else if (GetJsonNull()) - strcat(strcat(text, " "), GetJsonNull()); + if (s) + text->Append(s); + else if (GetJsonNull()) + text->Append(GetJsonNull()); - return text; + return NULL; } // end of GetText void JVALUE::SetValue(PJSON jsp) diff --git a/storage/connect/json.h b/storage/connect/json.h index c457a3fec45..d04577f1734 100644 --- a/storage/connect/json.h +++ b/storage/connect/json.h @@ -7,6 +7,7 @@ /***********************************************************************/ #include #include "value.h" +#include "xobject.h" #if defined(_DEBUG) #define X assert(false); @@ -157,8 +158,8 @@ public: virtual PJPR GetFirst(void) { X return NULL; } virtual int GetInteger(void) { X return 0; } virtual double GetFloat() { X return 0.0; } - virtual PSZ GetString(PGLOBAL g) { X return NULL; } - virtual PSZ GetText(PGLOBAL g, PSZ text) { X return NULL; } + virtual PSZ GetString(PGLOBAL g, char *buff = NULL) { X return NULL; } + virtual PSZ GetText(PGLOBAL g, PSTRG text) { X return NULL; } virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; } virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) { X return true; } virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) { X } @@ -198,7 +199,7 @@ public: virtual PJVAL GetValue(const char* key); virtual PJAR GetKeyList(PGLOBAL g); virtual PJAR GetValList(PGLOBAL g); - virtual PSZ GetText(PGLOBAL g, PSZ text); + virtual PSZ GetText(PGLOBAL g, PSTRG text); virtual bool Merge(PGLOBAL g, PJSON jsp); virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key); virtual void DeleteKey(PCSZ k); @@ -229,7 +230,7 @@ class JARRAY : public JSON { PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL); virtual void InitArray(PGLOBAL g); virtual PJVAL GetValue(int i); - virtual PSZ GetText(PGLOBAL g, PSZ text); + virtual PSZ GetText(PGLOBAL g, PSTRG text); virtual bool Merge(PGLOBAL g, PJSON jsp); virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i); virtual bool DeleteValue(int n); @@ -277,8 +278,8 @@ public: virtual int GetInteger(void); virtual long long GetBigint(void); virtual double GetFloat(void); - virtual PSZ GetString(PGLOBAL g); - virtual PSZ GetText(PGLOBAL g, PSZ text); + virtual PSZ GetString(PGLOBAL g, char *buff = NULL); + virtual PSZ GetText(PGLOBAL g, PSTRG text); virtual void SetValue(PJSON jsp); virtual void SetValue(PGLOBAL g, PVAL valp); inline void SetVal(PVL vlp) { Val = vlp; } diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 6c334111db0..bdd75a7fc3b 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -347,7 +347,7 @@ PVAL JSNX::MakeJson(PGLOBAL g, PJSON jsp) /*********************************************************************************/ /* SetValue: Set a value from a JVALUE contains. */ /*********************************************************************************/ -void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) +void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val) { if (val) { vp->SetNull(false); @@ -378,14 +378,11 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) break; case TYPE_JAR: - SetJsonValue(g, vp, val->GetArray()->GetValue(0), n); + vp->SetValue_psz(val->GetArray()->GetText(g, NULL)); break; case TYPE_JOB: -// if (!vp->IsTypeNum() || !Strict) { vp->SetValue_psz(val->GetObject()->GetText(g, NULL)); break; -// } // endif Type - case TYPE_NULL: vp->SetNull(true); default: @@ -420,11 +417,10 @@ void JSNX::ReadValue(PGLOBAL g) /*********************************************************************************/ PVAL JSNX::GetColumnValue(PGLOBAL g, PJSON row, int i) { - int n = Nod - 1; PJVAL val = NULL; val = GetRowValue(g, row, i); - SetJsonValue(g, Value, val, n); + SetJsonValue(g, Value, val); return Value; } // end of GetColumnValue @@ -497,7 +493,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b) } // endfor i - // SetJsonValue(g, Value, val, n); + // SetJsonValue(g, Value, val); return val; } // end of GetRowValue @@ -548,10 +544,10 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n) jvp->GetString(g), jvp->IsNull() ? 1 : 0); if (!nv++) { - SetJsonValue(g, vp, jvp, n); + SetJsonValue(g, vp, jvp); continue; } else - SetJsonValue(g, MulVal, jvp, n); + SetJsonValue(g, MulVal, jvp); if (!MulVal->IsNull()) { switch (op) { diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h index f5b2bf75654..e4785bc5c38 100644 --- a/storage/connect/jsonudf.h +++ b/storage/connect/jsonudf.h @@ -294,7 +294,7 @@ protected: PVAL ExpandArray(PGLOBAL g, PJAR arp, int n); PVAL CalculateArray(PGLOBAL g, PJAR arp, int n); PVAL MakeJson(PGLOBAL g, PJSON jsp); - void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n); + void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val); PJSON GetRow(PGLOBAL g); my_bool CompareValues(PVL v1, PVL v2); my_bool LocateArray(PGLOBAL g, PJAR jarp); diff --git a/storage/connect/mysql-test/connect/r/json.result b/storage/connect/mysql-test/connect/r/json.result index affaea604a8..dc527acd4a3 100644 --- a/storage/connect/mysql-test/connect/r/json.result +++ b/storage/connect/mysql-test/connect/r/json.result @@ -15,7 +15,7 @@ DATEPUB int(4) ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT * FROM t1; ISBN LANG SUBJECT AUTHOR TITLE TRANSLATION TRANSLATOR PUBLISHER DATEPUB -9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications Jean-Christophe Bernadac, Franois Knab Construire une application XML NULL NULL Eyrolles Paris 1999 9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 DROP TABLE t1; # diff --git a/storage/connect/mysql-test/connect/r/json_java_2.result b/storage/connect/mysql-test/connect/r/json_java_2.result index bc6ad16772e..e0b08889f40 100644 --- a/storage/connect/mysql-test/connect/r/json_java_2.result +++ b/storage/connect/mysql-test/connect/r/json_java_2.result @@ -80,11 +80,11 @@ t1 CREATE TABLE `t1` ( ) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096 SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id -58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445 -58ada47de5a51ddfcd5ed51d 469 -73.961704 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340 -58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841 -58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018 -58ada47de5a51ddfcd5ed520 97-22 -73.8601152 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068 DROP TABLE t1; # # Dropping a column @@ -93,16 +93,16 @@ CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants DATA_CHARSET= COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=2,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096; SELECT * FROM t1 LIMIT 10; _id address borough cuisine name restaurant_id -58ada47de5a51ddfcd5ed51c 1007 -73.856077 40.848447 Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 -58ada47de5a51ddfcd5ed51d 469 -73.961704 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 -58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 40.7676919 West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 -58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 40.579505 Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 -58ada47de5a51ddfcd5ed520 97-22 -73.8601152 40.7311739 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 -58ada47de5a51ddfcd5ed521 8825 -73.8803827 40.7643124 Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 -58ada47de5a51ddfcd5ed522 2206 -74.1377286 40.6119572 Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 -58ada47de5a51ddfcd5ed523 7114 -73.9068506 40.6199034 Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 -58ada47de5a51ddfcd5ed524 6409 -74.00528899999999 40.628886 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 -58ada47de5a51ddfcd5ed525 1839 -73.9482609 40.6408271 Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 +58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 +58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 +58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 +58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 +58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 DROP TABLE t1; # # Specifying Jpath diff --git a/storage/connect/mysql-test/connect/r/json_mongo_c.result b/storage/connect/mysql-test/connect/r/json_mongo_c.result index 7d26e0152f2..482ccc85b57 100644 --- a/storage/connect/mysql-test/connect/r/json_mongo_c.result +++ b/storage/connect/mysql-test/connect/r/json_mongo_c.result @@ -80,11 +80,11 @@ t1 CREATE TABLE `t1` ( ) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024 SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id -58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 -58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340 -58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841 -58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018 -58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089, 40.848447000000000173 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745, 40.66294200000000103 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451, 40.767691900000002647 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523, 40.579504999999997494 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639, 40.731173900000001709 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068 DROP TABLE t1; # # Dropping a column @@ -93,16 +93,16 @@ CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants DATA_CHARSET= COLIST='{"projection":{"grades":0}}' OPTION_LIST='Driver=C,Version=0,level=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; SELECT * FROM t1 LIMIT 10; _id address borough cuisine name restaurant_id -58ada47de5a51ddfcd5ed51c 1007 -73.856076999999999089 40.848447000000000173 Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 -58ada47de5a51ddfcd5ed51d 469 -73.96170399999999745 40.66294200000000103 Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 -58ada47de5a51ddfcd5ed51e 351 -73.985135599999992451 40.767691900000002647 West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 -58ada47de5a51ddfcd5ed51f 2780 -73.982419999999990523 40.579504999999997494 Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 -58ada47de5a51ddfcd5ed520 97-22 -73.860115199999995639 40.731173900000001709 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 -58ada47de5a51ddfcd5ed521 8825 -73.880382699999998408 40.764312400000001446 Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 -58ada47de5a51ddfcd5ed522 2206 -74.137728600000002643 40.611957199999999091 Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 -58ada47de5a51ddfcd5ed523 7114 -73.906850599999998508 40.619903399999998328 Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 -58ada47de5a51ddfcd5ed524 6409 -74.005288999999990551 40.628886000000001388 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 -58ada47de5a51ddfcd5ed525 1839 -73.948260899999993967 40.640827100000002758 Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 +58ada47de5a51ddfcd5ed51c 1007 (-73.856076999999999089, 40.848447000000000173) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 (-73.96170399999999745, 40.66294200000000103) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 (-73.985135599999992451, 40.767691900000002647) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 (-73.982419999999990523, 40.579504999999997494) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 (-73.860115199999995639, 40.731173900000001709) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed521 8825 (-73.880382699999998408, 40.764312400000001446) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 +58ada47de5a51ddfcd5ed522 2206 (-74.137728600000002643, 40.611957199999999091) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 +58ada47de5a51ddfcd5ed523 7114 (-73.906850599999998508, 40.619903399999998328) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 +58ada47de5a51ddfcd5ed524 6409 (-74.005288999999990551, 40.628886000000001388) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 +58ada47de5a51ddfcd5ed525 1839 (-73.948260899999993967, 40.640827100000002758) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 DROP TABLE t1; # # Specifying Jpath diff --git a/storage/connect/mysql-test/connect/r/json_udf.result b/storage/connect/mysql-test/connect/r/json_udf.result index 09544bb1ecb..8315fc3f3bf 100644 --- a/storage/connect/mysql-test/connect/r/json_udf.result +++ b/storage/connect/mysql-test/connect/r/json_udf.result @@ -187,11 +187,11 @@ DATEPUB int(4) ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='biblio.json'; SELECT Json_Make_Array(AUTHOR, TITLE, DATEPUB) FROM t2; Json_Make_Array(AUTHOR, TITLE, DATEPUB) -["Jean-Christophe Bernadac","Construire une application XML",1999] +[" Jean-Christophe Bernadac, Franois Knab","Construire une application XML",1999] ["William J. Pardi","XML en Action",1999] SELECT Json_Make_Object(AUTHOR, TITLE, DATEPUB) FROM t2; Json_Make_Object(AUTHOR, TITLE, DATEPUB) -{"AUTHOR":"Jean-Christophe Bernadac","TITLE":"Construire une application XML","DATEPUB":1999} +{"AUTHOR":" Jean-Christophe Bernadac, Franois Knab","TITLE":"Construire une application XML","DATEPUB":1999} {"AUTHOR":"William J. Pardi","TITLE":"XML en Action","DATEPUB":1999} SELECT Json_Array_Grp(TITLE, DATEPUB) FROM t2; ERROR HY000: Can't initialize function 'json_array_grp'; This function can only accept 1 argument @@ -610,7 +610,7 @@ JsonGet_String(Json_File('test/fx.json'), '1.*') {"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]} SELECT JsonGet_String(Json_File('test/fx.json'), '1'); JsonGet_String(Json_File('test/fx.json'), '1') -6 car roadster 56000 6 9 +6 car roadster 56000 (6, 9) SELECT JsonGet_Int(Json_File('test/fx.json'), '1.mileage') AS Mileage; Mileage 56000 diff --git a/storage/connect/mysql-test/connect/r/json_udf_bin.result b/storage/connect/mysql-test/connect/r/json_udf_bin.result index d0819619c33..c20cf7ce632 100644 --- a/storage/connect/mysql-test/connect/r/json_udf_bin.result +++ b/storage/connect/mysql-test/connect/r/json_udf_bin.result @@ -87,7 +87,7 @@ Json_Get_Item(Jbin_File('gloss.json'),'$.glossary.GlossDiv') {"title":"S","GlossList":{"GlossEntry":{"ID":"SGML","SortAs":"SGML","GlossTerm":"Standard Generalized Markup Language","Acronym":"SGML","Abbrev":"ISO 8879:1986","GlossDef":{"para":"A meta-markup language, used to create markup languages such as DocBook.","GlossSeeAlso":["GML","XML"]},"GlossSee":"markup"}}} SELECT JsonGet_String(Json_File('gloss.json'),'$.glossary.GlossDiv.GlossList.GlossEntry.GlossDef.GlossSeeAlso') lang; lang -GML +GML, XML SELECT Json_Get_Item(Jbin_File('gloss.json'),'$.glossary.GlossDiv.GlossList.GlossEntry.GlossDef.GlossSeeAlso') "See also"; See also ["GML","XML"] diff --git a/storage/connect/mysql-test/connect/r/zip.result b/storage/connect/mysql-test/connect/r/zip.result index c81546a4689..c696252ca43 100644 --- a/storage/connect/mysql-test/connect/r/zip.result +++ b/storage/connect/mysql-test/connect/r/zip.result @@ -171,32 +171,32 @@ DROP TABLE t1,t2,t3,t4; # CREATE TABLE t1 ( _id INT(2) NOT NULL, -name_first CHAR(9) NOT NULL FIELD_FORMAT='$.name.first', -name_aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka', -name_last CHAR(10) NOT NULL FIELD_FORMAT='$.name.last', +name_first CHAR(9) NOT NULL JPATH='$.name.first', +name_aka CHAR(4) DEFAULT NULL JPATH='$.name.aka', +name_last CHAR(10) NOT NULL JPATH='$.name.last', title CHAR(12) DEFAULT NULL, birth CHAR(20) DEFAULT NULL, death CHAR(20) DEFAULT NULL, -contribs CHAR(7) NOT NULL FIELD_FORMAT='$.contribs', -awards_award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards.award', -awards_year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards.year', -awards_by CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards.by' +contribs CHAR(50) NOT NULL JPATH='$.contribs', +awards_award CHAR(42) DEFAULT NULL JPATH='$.awards.award', +awards_year CHAR(4) DEFAULT NULL JPATH='$.awards.year', +awards_by CHAR(38) DEFAULT NULL JPATH='$.awards.by' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' OPTION_LIST='ENTRY=bios.json,LOAD=bios.json' ZIPPED=YES; SELECT * FROM t1; _id name_first name_aka name_last title birth death contribs awards_award awards_year awards_by -1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran W.W. McDowell Award 1967 IEEE Computer Society -2 John NULL McCarthy NULL 1927-09-04T04:00:00Z 2011-12-24T05:00:00Z Lisp Turing Award 1971 ACM -3 Grace NULL Hopper Rear Admiral 1906-12-09T05:00:00Z 1992-01-01T05:00:00Z UNIVAC Computer Sciences Man of the Year 1969 Data Processing Management Association -4 Kristen NULL Nygaard NULL 1926-08-27T04:00:00Z 2002-08-10T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association -5 Ole-Johan NULL Dahl NULL 1931-10-12T04:00:00Z 2002-06-29T04:00:00Z OOP Rosing Prize 1999 Norwegian Data Association +1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran, ALGOL, Backus-Naur Form, FP W.W. McDowell Award 1967 IEEE Computer Society +2 John NULL McCarthy NULL 1927-09-04T04:00:00Z 2011-12-24T05:00:00Z Lisp, Artificial Intelligence, ALGOL Turing Award 1971 ACM +3 Grace NULL Hopper Rear Admiral 1906-12-09T05:00:00Z 1992-01-01T05:00:00Z UNIVAC, compiler, FLOW-MATIC, COBOL Computer Sciences Man of the Year 1969 Data Processing Management Association +4 Kristen NULL Nygaard NULL 1926-08-27T04:00:00Z 2002-08-10T04:00:00Z OOP, Simula Rosing Prize 1999 Norwegian Data Association +5 Ole-Johan NULL Dahl NULL 1931-10-12T04:00:00Z 2002-06-29T04:00:00Z OOP, Simula Rosing Prize 1999 Norwegian Data Association 6 Guido NULL van Rossum NULL 1956-01-31T05:00:00Z NULL Python Award for the Advancement of Free Software 2001 Free Software Foundation -7 Dennis NULL Ritchie NULL 1941-09-09T04:00:00Z 2011-10-12T04:00:00Z UNIX Turing Award 1983 ACM +7 Dennis NULL Ritchie NULL 1941-09-09T04:00:00Z 2011-10-12T04:00:00Z UNIX, C Turing Award 1983 ACM 8 Yukihiro Matz Matsumoto NULL 1965-04-14T04:00:00Z NULL Ruby Award for the Advancement of Free Software 2011 Free Software Foundation 9 James NULL Gosling NULL 1955-05-19T04:00:00Z NULL Java The Economist Innovation Award 2002 The Economist 10 Martin NULL Odersky NULL NULL NULL Scala NULL NULL NULL CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' ZIPPED=1 -OPTION_LIST='LEVEL=5'; +OPTION_LIST='DEPTH=5'; SELECT * FROM t2; _id name_first name_aka name_last title birth death contribs awards_award awards_year awards_by 1 John NULL Backus NULL 1924-12-03T05:00:00Z 2007-03-17T04:00:00Z Fortran W.W. McDowell Award 1967 IEEE Computer Society @@ -211,16 +211,16 @@ _id name_first name_aka name_last title birth death contribs awards_award awards 10 Martin NULL Odersky NULL NULL NULL Scala NULL NULL NULL CREATE TABLE t3 ( _id INT(2) NOT NULL, -firstname CHAR(9) NOT NULL FIELD_FORMAT='$.name.first', -aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka', -lastname CHAR(10) NOT NULL FIELD_FORMAT='$.name.last', +firstname CHAR(9) NOT NULL JPATH='$.name.first', +aka CHAR(4) DEFAULT NULL JPATH='$.name.aka', +lastname CHAR(10) NOT NULL JPATH='$.name.last', title CHAR(12) DEFAULT NULL, birth date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'", death date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'", -contribs CHAR(64) NOT NULL FIELD_FORMAT='$.contribs.[", "]', -award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards[*].award', -year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards[*].year', -`by` CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards[*].by' +contribs CHAR(64) NOT NULL JPATH='$.contribs.[", "]', +award CHAR(42) DEFAULT NULL JPATH='$.awards[*].award', +year CHAR(4) DEFAULT NULL JPATH='$.awards[*].year', +`by` CHAR(38) DEFAULT NULL JPATH='$.awards[*].by' ) ENGINE=CONNECT TABLE_TYPE='json' FILE_NAME='bios.zip' ZIPPED=YES; SELECT * FROM t3 WHERE _id = 1; _id firstname aka lastname title birth death contribs award year by diff --git a/storage/connect/mysql-test/connect/t/zip.test b/storage/connect/mysql-test/connect/t/zip.test index dce68c17eee..1f0a4eedee9 100644 --- a/storage/connect/mysql-test/connect/t/zip.test +++ b/storage/connect/mysql-test/connect/t/zip.test @@ -83,37 +83,37 @@ DROP TABLE t1,t2,t3,t4; --echo # CREATE TABLE t1 ( _id INT(2) NOT NULL, -name_first CHAR(9) NOT NULL FIELD_FORMAT='$.name.first', -name_aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka', -name_last CHAR(10) NOT NULL FIELD_FORMAT='$.name.last', +name_first CHAR(9) NOT NULL JPATH='$.name.first', +name_aka CHAR(4) DEFAULT NULL JPATH='$.name.aka', +name_last CHAR(10) NOT NULL JPATH='$.name.last', title CHAR(12) DEFAULT NULL, birth CHAR(20) DEFAULT NULL, death CHAR(20) DEFAULT NULL, -contribs CHAR(7) NOT NULL FIELD_FORMAT='$.contribs', -awards_award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards.award', -awards_year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards.year', -awards_by CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards.by' +contribs CHAR(50) NOT NULL JPATH='$.contribs', +awards_award CHAR(42) DEFAULT NULL JPATH='$.awards.award', +awards_year CHAR(4) DEFAULT NULL JPATH='$.awards.year', +awards_by CHAR(38) DEFAULT NULL JPATH='$.awards.by' ) ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' OPTION_LIST='ENTRY=bios.json,LOAD=bios.json' ZIPPED=YES; SELECT * FROM t1; # Test discovery CREATE TABLE t2 ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='bios.zip' ZIPPED=1 -OPTION_LIST='LEVEL=5'; +OPTION_LIST='DEPTH=5'; SELECT * FROM t2; CREATE TABLE t3 ( _id INT(2) NOT NULL, -firstname CHAR(9) NOT NULL FIELD_FORMAT='$.name.first', -aka CHAR(4) DEFAULT NULL FIELD_FORMAT='$.name.aka', -lastname CHAR(10) NOT NULL FIELD_FORMAT='$.name.last', +firstname CHAR(9) NOT NULL JPATH='$.name.first', +aka CHAR(4) DEFAULT NULL JPATH='$.name.aka', +lastname CHAR(10) NOT NULL JPATH='$.name.last', title CHAR(12) DEFAULT NULL, birth date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'", death date DEFAULT NULL date_format="YYYY-DD-MM'T'hh:mm:ss'Z'", -contribs CHAR(64) NOT NULL FIELD_FORMAT='$.contribs.[", "]', -award CHAR(42) DEFAULT NULL FIELD_FORMAT='$.awards[*].award', -year CHAR(4) DEFAULT NULL FIELD_FORMAT='$.awards[*].year', -`by` CHAR(38) DEFAULT NULL FIELD_FORMAT='$.awards[*].by' +contribs CHAR(64) NOT NULL JPATH='$.contribs.[", "]', +award CHAR(42) DEFAULT NULL JPATH='$.awards[*].award', +year CHAR(4) DEFAULT NULL JPATH='$.awards[*].year', +`by` CHAR(38) DEFAULT NULL JPATH='$.awards[*].by' ) ENGINE=CONNECT TABLE_TYPE='json' FILE_NAME='bios.zip' ZIPPED=YES; SELECT * FROM t3 WHERE _id = 1; diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index a79f2a37f32..3de7ea0a35d 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1595,7 +1595,7 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp) /***********************************************************************/ /* SetValue: Set a value from a JVALUE contains. */ /***********************************************************************/ -void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) +void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val) { if (val) { vp->SetNull(false); @@ -1640,8 +1640,9 @@ void JSONCOL::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n) break; case TYPE_JAR: - SetJsonValue(g, vp, val->GetArray()->GetValue(0), n); - break; +// SetJsonValue(g, vp, val->GetArray()->GetValue(0)); + vp->SetValue_psz(val->GetArray()->GetText(g, NULL)); + break; case TYPE_JOB: // if (!vp->IsTypeNum() || !Strict) { vp->SetValue_psz(val->GetObject()->GetText(g, NULL)); @@ -1736,7 +1737,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i) } // endfor i - SetJsonValue(g, Value, val, n); + SetJsonValue(g, Value, val); return Value; } // end of GetColumnValue @@ -1776,7 +1777,7 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) Tjp->NextSame = Xnod; } // endif NextSame - SetJsonValue(g, Value, jvp, n); + SetJsonValue(g, Value, jvp); return Value; } // end of ExpandArray @@ -1822,10 +1823,10 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n) jvp->GetString(g), jvp->IsNull() ? 1 : 0); if (!nv++) { - SetJsonValue(g, vp, jvp, n); + SetJsonValue(g, vp, jvp); continue; } else - SetJsonValue(g, MulVal, jvp, n); + SetJsonValue(g, MulVal, jvp); if (!MulVal->IsNull()) { switch (op) { diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index b9313e4d809..ef944021d7f 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -228,8 +228,8 @@ public: PVAL ExpandArray(PGLOBAL g, PJAR arp, int n); PVAL CalculateArray(PGLOBAL g, PJAR arp, int n); PVAL MakeJson(PGLOBAL g, PJSON jsp); - void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val, int n); - PJSON GetRow(PGLOBAL g); + void SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val); + PJSON GetRow(PGLOBAL g); // Default constructor not to be used JSONCOL(void) {} diff --git a/storage/connect/xobject.h b/storage/connect/xobject.h index bc5912d3054..5b50e9320f5 100644 --- a/storage/connect/xobject.h +++ b/storage/connect/xobject.h @@ -130,6 +130,7 @@ class DllExport STRING : public BLOCK { inline void SetLength(uint n) {Length = n;} inline PSZ GetStr(void) {return Strp;} inline uint32 GetSize(void) {return Size;} + inline char GetLastChar(void) {return Length ? Strp[Length - 1] : 0;} inline bool IsTruncated(void) {return Trc;} // Methods From 3ad6c0ef8a10aa3a2ba89126f54aac811a717285 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 7 Nov 2020 16:33:01 +0100 Subject: [PATCH 011/150] Fix compile error. Modified ha_connect.cc --- storage/connect/ha_connect.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index d06ee5a6415..06029a3f670 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -251,6 +251,7 @@ bool ExactInfo(void); USETEMP UseTemp(void); int GetConvSize(void); TYPCONV GetTypeConv(void); +int GetDefaultDepth(void); bool JsonAllPath(void); char *GetJsonNull(void); uint GetJsonGrpSize(void); From e4294729d4bc4a2feafff673b3847905f8188b8e Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 7 Nov 2020 19:50:29 +0100 Subject: [PATCH 012/150] Fix compile error (overloaded-virtual) --- storage/connect/json.h | 100 ++++++++++-------- .../mysql-test/connect/r/json_java_3.result | 30 +++--- storage/connect/tabjson.h | 2 +- 3 files changed, 72 insertions(+), 60 deletions(-) diff --git a/storage/connect/json.h b/storage/connect/json.h index d04577f1734..9ebfbefdf6f 100644 --- a/storage/connect/json.h +++ b/storage/connect/json.h @@ -140,36 +140,36 @@ public: // Methods virtual int size(void) { return 1; } - virtual JTYP GetValType(void) { X return TYPE_JSON; } - virtual void InitArray(PGLOBAL g) { X } +// virtual JTYP GetValType(void) { X return TYPE_JSON; } +// virtual void InitArray(PGLOBAL g) { X } //virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL) {X return NULL;} - virtual PJPR AddPair(PGLOBAL g, PCSZ key) { X return NULL; } +// virtual PJPR AddPair(PGLOBAL g, PCSZ key) { X return NULL; } virtual void Clear(void) { X } - virtual PJAR GetKeyList(PGLOBAL g) { X return NULL; } - virtual PJAR GetValList(PGLOBAL g) { X return NULL; } - virtual PJVAL GetValue(const char* key) { X return NULL; } +// virtual PJAR GetKeyList(PGLOBAL g) { X return NULL; } +// virtual PJAR GetValList(PGLOBAL g) { X return NULL; } +// virtual PJVAL GetValue(const char* key) { X return NULL; } virtual PJOB GetObject(void) { return NULL; } virtual PJAR GetArray(void) { return NULL; } virtual PJVAL GetValue(int i) { X return NULL; } virtual int GetSize(bool b) { X return 0; } //virtual PVL GetVal(void) { X return NULL; } virtual PJSON GetJsp(void) { X return NULL; } - virtual PJSON GetJson(void) { X return NULL; } +// virtual PJSON GetJson(void) { X return NULL; } virtual PJPR GetFirst(void) { X return NULL; } - virtual int GetInteger(void) { X return 0; } - virtual double GetFloat() { X return 0.0; } - virtual PSZ GetString(PGLOBAL g, char *buff = NULL) { X return NULL; } +// virtual int GetInteger(void) { X return 0; } +// virtual double GetFloat() { X return 0.0; } +// virtual PSZ GetString(PGLOBAL g, char *buff = NULL) { X return NULL; } virtual PSZ GetText(PGLOBAL g, PSTRG text) { X return NULL; } virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; } - virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) { X return true; } - virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) { X } - virtual void SetVal(PVL vlp) { X } - virtual void SetValue(PGLOBAL g, PVAL valp) { X } +// virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) { X return true; } +// virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) { X } +// virtual void SetVal(PVL vlp) { X } +// virtual void SetValue(PGLOBAL g, PVAL valp) { X } virtual void SetValue(PJSON jsp) { X } - virtual void SetString(PGLOBAL g, PSZ s, short c) { X } - virtual void SetInteger(PGLOBAL g, int n) { X } - virtual void SetFloat(PGLOBAL g, double f) { X } - virtual void DeleteKey(PCSZ k) { X } +// virtual void SetString(PGLOBAL g, PSZ s, short c) { X } +// virtual void SetInteger(PGLOBAL g, int n) { X } +// virtual void SetFloat(PGLOBAL g, double f) { X } +// virtual void DeleteKey(PCSZ k) { X } virtual bool DeleteValue(int i) { X return true; } virtual bool IsNull(void) { X return true; } @@ -190,21 +190,25 @@ public: //using JSON::GetVal; //using JSON::SetVal; + + // Methods virtual void Clear(void) {First = Last = NULL;} //virtual JTYP GetValType(void) {return TYPE_JOB;} virtual PJPR GetFirst(void) {return First;} virtual int GetSize(bool b); - virtual PJPR AddPair(PGLOBAL g, PCSZ key); virtual PJOB GetObject(void) {return this;} - virtual PJVAL GetValue(const char* key); - virtual PJAR GetKeyList(PGLOBAL g); - virtual PJAR GetValList(PGLOBAL g); virtual PSZ GetText(PGLOBAL g, PSTRG text); virtual bool Merge(PGLOBAL g, PJSON jsp); - virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key); - virtual void DeleteKey(PCSZ k); virtual bool IsNull(void); + // Specific + PJPR AddPair(PGLOBAL g, PCSZ key); + PJVAL GetValue(const char* key); + PJAR GetKeyList(PGLOBAL g); + PJAR GetValList(PGLOBAL g); + void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key); + void DeleteKey(PCSZ k); + protected: PJPR First; PJPR Last; @@ -222,20 +226,24 @@ class JARRAY : public JSON { //using JSON::GetVal; //using JSON::SetVal; + + // Methods virtual void Clear(void) {First = Last = NULL; Size = 0;} virtual int size(void) { return Size; } - virtual JTYP GetType(void) {return TYPE_JAR;} +//virtual JTYP GetType(void) {return TYPE_JAR;} virtual PJAR GetArray(void) {return this;} virtual int GetSize(bool b); - PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL); - virtual void InitArray(PGLOBAL g); virtual PJVAL GetValue(int i); virtual PSZ GetText(PGLOBAL g, PSTRG text); virtual bool Merge(PGLOBAL g, PJSON jsp); - virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i); virtual bool DeleteValue(int n); virtual bool IsNull(void); + // Specific + PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int* x = NULL); + bool SetValue(PGLOBAL g, PJVAL jvp, int i); + void InitArray(PGLOBAL g); + protected: // Members int Size; // The number of items in the array @@ -265,31 +273,35 @@ public: //using JSON::GetVal; //using JSON::SetVal; + + // Methods virtual void Clear(void) {Jsp = NULL; Val = NULL; Next = NULL; Del = false;} - virtual JTYP GetType(void) {return TYPE_JVAL;} +//virtual JTYP GetType(void) {return TYPE_JVAL;} virtual JTYP GetValType(void); virtual PJOB GetObject(void); virtual PJAR GetArray(void); - inline PVL GetVal(void) {return Val;} - PVAL GetValue(PGLOBAL g); virtual PJSON GetJsp(void) {return Jsp;} - virtual PJSON GetJson(void) { return (Jsp ? Jsp : this); } - virtual int GetInteger(void); - virtual long long GetBigint(void); - virtual double GetFloat(void); - virtual PSZ GetString(PGLOBAL g, char *buff = NULL); virtual PSZ GetText(PGLOBAL g, PSTRG text); - virtual void SetValue(PJSON jsp); - virtual void SetValue(PGLOBAL g, PVAL valp); - inline void SetVal(PVL vlp) { Val = vlp; } - virtual void SetString(PGLOBAL g, PSZ s, int ci = 0); - virtual void SetInteger(PGLOBAL g, int n); - virtual void SetBigint(PGLOBAL g, longlong ll); - virtual void SetFloat(PGLOBAL g, double f); - virtual void SetBool(PGLOBAL g, bool b); virtual bool IsNull(void); + // Specific + inline PVL GetVal(void) { return Val; } + inline void SetVal(PVL vlp) { Val = vlp; } + inline PJSON GetJson(void) { return (Jsp ? Jsp : this); } + PSZ GetString(PGLOBAL g, char* buff = NULL); + int GetInteger(void); + long long GetBigint(void); + double GetFloat(void); + PVAL GetValue(PGLOBAL g); + void SetValue(PJSON jsp); + void SetValue(PGLOBAL g, PVAL valp); + void SetString(PGLOBAL g, PSZ s, int ci = 0); + void SetInteger(PGLOBAL g, int n); + void SetBigint(PGLOBAL g, longlong ll); + void SetFloat(PGLOBAL g, double f); + void SetBool(PGLOBAL g, bool b); + protected: PJSON Jsp; // To the json value PVL Val; // To the string or numeric value diff --git a/storage/connect/mysql-test/connect/r/json_java_3.result b/storage/connect/mysql-test/connect/r/json_java_3.result index e7dd9468845..b9ba919507d 100644 --- a/storage/connect/mysql-test/connect/r/json_java_3.result +++ b/storage/connect/mysql-test/connect/r/json_java_3.result @@ -80,11 +80,11 @@ t1 CREATE TABLE `t1` ( ) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='JSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096 SELECT * FROM t1 LIMIT 5; _id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id -58ada47de5a51ddfcd5ed51c 1007 -73.856077 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 -58ada47de5a51ddfcd5ed51d 469 -73.961704 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340 -58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841 -58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018 -58ada47de5a51ddfcd5ed520 97-22 -73.8601152 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068 DROP TABLE t1; # # Dropping a column @@ -93,16 +93,16 @@ CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=JSON TABNAME=restaurants DATA_CHARSET= COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=3,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096; SELECT * FROM t1 LIMIT 10; _id address borough cuisine name restaurant_id -58ada47de5a51ddfcd5ed51c 1007 -73.856077 40.848447 Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 -58ada47de5a51ddfcd5ed51d 469 -73.961704 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 -58ada47de5a51ddfcd5ed51e 351 -73.98513559999999 40.7676919 West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 -58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999 40.579505 Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 -58ada47de5a51ddfcd5ed520 97-22 -73.8601152 40.7311739 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 -58ada47de5a51ddfcd5ed521 8825 -73.8803827 40.7643124 Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 -58ada47de5a51ddfcd5ed522 2206 -74.1377286 40.6119572 Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 -58ada47de5a51ddfcd5ed523 7114 -73.9068506 40.6199034 Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 -58ada47de5a51ddfcd5ed524 6409 -74.00528899999999 40.628886 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 -58ada47de5a51ddfcd5ed525 1839 -73.9482609 40.6408271 Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 +58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 +58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 +58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 +58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 +58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 DROP TABLE t1; # # Specifying Jpath diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index ef944021d7f..f5d7b3153b2 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -173,7 +173,7 @@ protected: PGLOBAL G; // Support of parse memory PJSON Top; // The top JSON tree PJSON Row; // The current row - PJSON Val; // The value of the current row + PJVAL Val; // The value of the current row PJCOL Colp; // The multiple column JMODE Jmode; // MODE_OBJECT by default PCSZ Objname; // The table object name From fb86a496c04d688d4d4973d958e664f866b04881 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 7 Nov 2020 22:36:50 +0100 Subject: [PATCH 013/150] Re-fix compile error (overloaded-virtual) --- storage/connect/json.cpp | 42 ++++++++++++------- storage/connect/json.h | 42 ++++--------------- storage/connect/jsonudf.cpp | 82 ++++++++++++++++++------------------- storage/connect/tabjson.cpp | 74 ++++++++++++++++----------------- 4 files changed, 112 insertions(+), 128 deletions(-) diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index 9af5761c9ee..6fdb7e9c15c 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -531,7 +531,7 @@ PJAR JDOC::ParseArray(PGLOBAL g, int& i) sprintf(g->Message, "Unexpected value near %.*s", ARGS); throw 1; } else - jarp->AddValue(g, ParseValue(g, i)); + jarp->AddArrayValue(g, ParseValue(g, i)); level = (b) ? 1 : 2; break; @@ -886,7 +886,7 @@ bool JDOC::SerializeArray(PJAR jarp, bool b) } // endif b - if (SerializeValue(jarp->GetValue(i))) + if (SerializeValue(jarp->GetArrayValue(i))) return true; } // endfor i @@ -1010,7 +1010,7 @@ PJAR JOBJECT::GetKeyList(PGLOBAL g) PJAR jarp = new(g) JARRAY(); for (PJPR jpp = First; jpp; jpp = jpp->Next) - jarp->AddValue(g, new(g) JVALUE(g, jpp->Key)); + jarp->AddArrayValue(g, new(g) JVALUE(g, jpp->Key)); jarp->InitArray(g); return jarp; @@ -1024,7 +1024,7 @@ PJAR JOBJECT::GetValList(PGLOBAL g) PJAR jarp = new(g) JARRAY(); for (PJPR jpp = First; jpp; jpp = jpp->Next) - jarp->AddValue(g, jpp->Val); + jarp->AddArrayValue(g, jpp->Val); jarp->InitArray(g); return jarp; @@ -1033,7 +1033,7 @@ PJAR JOBJECT::GetValList(PGLOBAL g) /***********************************************************************/ /* Get the value corresponding to the given key. */ /***********************************************************************/ -PJVAL JOBJECT::GetValue(const char* key) +PJVAL JOBJECT::GetKeyValue(const char* key) { for (PJPR jp = First; jp; jp = jp->Next) if (!strcmp(jp->Key, key)) @@ -1111,7 +1111,7 @@ bool JOBJECT::Merge(PGLOBAL g, PJSON jsp) PJOB jobp = (PJOB)jsp; for (PJPR jpp = jobp->First; jpp; jpp = jpp->Next) - SetValue(g, jpp->Val, jpp->Key); + SetKeyValue(g, jpp->Val, jpp->Key); return false; } // end of Marge; @@ -1119,7 +1119,7 @@ bool JOBJECT::Merge(PGLOBAL g, PJSON jsp) /***********************************************************************/ /* Set or add a value corresponding to the given key. */ /***********************************************************************/ -void JOBJECT::SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) +void JOBJECT::SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key) { PJPR jp; @@ -1166,6 +1166,18 @@ bool JOBJECT::IsNull(void) /* -------------------------- Class JARRAY --------------------------- */ +/***********************************************************************/ +/* JARRAY constructor. */ +/***********************************************************************/ +JARRAY::JARRAY(void) : JSON() +{ + Type = TYPE_JAR; + Size = 0; + Alloc = 0; + First = Last = NULL; + Mvals = NULL; +} // end of JARRAY constructor + /***********************************************************************/ /* Return the number of values in this object. */ /***********************************************************************/ @@ -1216,7 +1228,7 @@ void JARRAY::InitArray(PGLOBAL g) /***********************************************************************/ /* Get the Nth value of an Array. */ /***********************************************************************/ -PJVAL JARRAY::GetValue(int i) +PJVAL JARRAY::GetArrayValue(int i) { if (Mvals && i >= 0 && i < Size) return Mvals[i]; @@ -1227,7 +1239,7 @@ PJVAL JARRAY::GetValue(int i) /***********************************************************************/ /* Add a Value to the Array Value list. */ /***********************************************************************/ -PJVAL JARRAY::AddValue(PGLOBAL g, PJVAL jvp, int *x) +PJVAL JARRAY::AddArrayValue(PGLOBAL g, PJVAL jvp, int *x) { if (!jvp) jvp = new(g) JVALUE; @@ -1271,7 +1283,7 @@ bool JARRAY::Merge(PGLOBAL g, PJSON jsp) PJAR arp = (PJAR)jsp; for (int i = 0; i < arp->size(); i++) - AddValue(g, arp->GetValue(i)); + AddArrayValue(g, arp->GetArrayValue(i)); InitArray(g); return false; @@ -1280,7 +1292,7 @@ bool JARRAY::Merge(PGLOBAL g, PJSON jsp) /***********************************************************************/ /* Set the nth Value of the Array Value list. */ /***********************************************************************/ -bool JARRAY::SetValue(PGLOBAL g, PJVAL jvp, int n) +bool JARRAY::SetArrayValue(PGLOBAL g, PJVAL jvp, int n) { int i = 0; PJVAL jp, *jpp = &First; @@ -1340,7 +1352,7 @@ PSZ JARRAY::GetText(PGLOBAL g, PSTRG text) /***********************************************************************/ bool JARRAY::DeleteValue(int n) { - PJVAL jvp = GetValue(n); + PJVAL jvp = GetArrayValue(n); if (jvp) { jvp->Del = true; @@ -1365,7 +1377,7 @@ bool JARRAY::IsNull(void) /* -------------------------- Class JVALUE- -------------------------- */ /***********************************************************************/ -/* Constructor for a JSON. */ +/* Constructor for a JVALUE. */ /***********************************************************************/ JVALUE::JVALUE(PJSON jsp) : JSON() { @@ -1383,7 +1395,7 @@ JVALUE::JVALUE(PJSON jsp) : JSON() } // end of JVALUE constructor /***********************************************************************/ -/* Constructor for a Val with a given string or numeric value. */ +/* Constructor for a JVALUE with a given string or numeric value. */ /***********************************************************************/ JVALUE::JVALUE(PGLOBAL g, PVL vlp) : JSON() { @@ -1395,7 +1407,7 @@ JVALUE::JVALUE(PGLOBAL g, PVL vlp) : JSON() } // end of JVALUE constructor /***********************************************************************/ -/* Constructor for a Value with a given string or numeric value. */ +/* Constructor for a JVALUE with a given string or numeric value. */ /***********************************************************************/ JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() { Jsp = NULL; diff --git a/storage/connect/json.h b/storage/connect/json.h index 9ebfbefdf6f..31175777985 100644 --- a/storage/connect/json.h +++ b/storage/connect/json.h @@ -140,36 +140,16 @@ public: // Methods virtual int size(void) { return 1; } -// virtual JTYP GetValType(void) { X return TYPE_JSON; } -// virtual void InitArray(PGLOBAL g) { X } - //virtual PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int *x = NULL) {X return NULL;} -// virtual PJPR AddPair(PGLOBAL g, PCSZ key) { X return NULL; } virtual void Clear(void) { X } -// virtual PJAR GetKeyList(PGLOBAL g) { X return NULL; } -// virtual PJAR GetValList(PGLOBAL g) { X return NULL; } -// virtual PJVAL GetValue(const char* key) { X return NULL; } virtual PJOB GetObject(void) { return NULL; } virtual PJAR GetArray(void) { return NULL; } - virtual PJVAL GetValue(int i) { X return NULL; } + virtual PJVAL GetArrayValue(int i) { X return NULL; } virtual int GetSize(bool b) { X return 0; } - //virtual PVL GetVal(void) { X return NULL; } virtual PJSON GetJsp(void) { X return NULL; } -// virtual PJSON GetJson(void) { X return NULL; } virtual PJPR GetFirst(void) { X return NULL; } -// virtual int GetInteger(void) { X return 0; } -// virtual double GetFloat() { X return 0.0; } -// virtual PSZ GetString(PGLOBAL g, char *buff = NULL) { X return NULL; } virtual PSZ GetText(PGLOBAL g, PSTRG text) { X return NULL; } virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; } -// virtual bool SetValue(PGLOBAL g, PJVAL jvp, int i) { X return true; } -// virtual void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key) { X } -// virtual void SetVal(PVL vlp) { X } -// virtual void SetValue(PGLOBAL g, PVAL valp) { X } virtual void SetValue(PJSON jsp) { X } -// virtual void SetString(PGLOBAL g, PSZ s, short c) { X } -// virtual void SetInteger(PGLOBAL g, int n) { X } -// virtual void SetFloat(PGLOBAL g, double f) { X } -// virtual void DeleteKey(PCSZ k) { X } virtual bool DeleteValue(int i) { X return true; } virtual bool IsNull(void) { X return true; } @@ -188,9 +168,6 @@ public: JOBJECT(void) : JSON() { Type = TYPE_JOB; First = Last = NULL; } JOBJECT(int i) : JSON(i) {} - //using JSON::GetVal; - //using JSON::SetVal; - // Methods virtual void Clear(void) {First = Last = NULL;} //virtual JTYP GetValType(void) {return TYPE_JOB;} @@ -203,10 +180,10 @@ public: // Specific PJPR AddPair(PGLOBAL g, PCSZ key); - PJVAL GetValue(const char* key); + PJVAL GetKeyValue(const char* key); PJAR GetKeyList(PGLOBAL g); PJAR GetValList(PGLOBAL g); - void SetValue(PGLOBAL g, PJVAL jvp, PCSZ key); + void SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key); void DeleteKey(PCSZ k); protected: @@ -220,28 +197,23 @@ public: class JARRAY : public JSON { friend class SWAP; public: - JARRAY(void) : JSON() - { Type = TYPE_JAR; Alloc = 0; First = Last = NULL; Mvals = NULL; } + JARRAY(void); JARRAY(int i) : JSON(i) {} - //using JSON::GetVal; - //using JSON::SetVal; - // Methods virtual void Clear(void) {First = Last = NULL; Size = 0;} virtual int size(void) { return Size; } -//virtual JTYP GetType(void) {return TYPE_JAR;} virtual PJAR GetArray(void) {return this;} virtual int GetSize(bool b); - virtual PJVAL GetValue(int i); + virtual PJVAL GetArrayValue(int i); virtual PSZ GetText(PGLOBAL g, PSTRG text); virtual bool Merge(PGLOBAL g, PJSON jsp); virtual bool DeleteValue(int n); virtual bool IsNull(void); // Specific - PJVAL AddValue(PGLOBAL g, PJVAL jvp = NULL, int* x = NULL); - bool SetValue(PGLOBAL g, PJVAL jvp, int i); + PJVAL AddArrayValue(PGLOBAL g, PJVAL jvp = NULL, int* x = NULL); + bool SetArrayValue(PGLOBAL g, PJVAL jvp, int i); void InitArray(PGLOBAL g); protected: diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index bdd75a7fc3b..407661004b3 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -457,7 +457,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b) } //endif Op } else - val = ((PJOB)row)->GetValue(Nodes[i].Key); + val = ((PJOB)row)->GetKeyValue(Nodes[i].Key); break; case TYPE_JAR: @@ -465,7 +465,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b) if (!Nodes[i].Key) { if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) - val = arp->GetValue(Nodes[i].Rank); + val = arp->GetArrayValue(Nodes[i].Rank); else if (Nodes[i].Op == OP_EXP) return (PJVAL)ExpandArray(g, arp, i); else @@ -473,7 +473,7 @@ PJVAL JSNX::GetRowValue(PGLOBAL g, PJSON row, int i, my_bool b) } else { // Unexpected array, unwrap it as [0] - val = arp->GetValue(0); + val = arp->GetArrayValue(0); i--; } // endif's @@ -524,7 +524,7 @@ PVAL JSNX::CalculateArray(PGLOBAL g, PJAR arp, int n) htrc("CalculateArray size=%d op=%d\n", ars, op); for (i = 0; i < ars; i++) { - jvrp = arp->GetValue(i); + jvrp = arp->GetArrayValue(i); if (trace(1)) htrc("i=%d nv=%d\n", i, nv); @@ -617,13 +617,13 @@ my_bool JSNX::CheckPath(PGLOBAL g) } else switch (row->GetType()) { case TYPE_JOB: if (Nodes[i].Key) - val = ((PJOB)row)->GetValue(Nodes[i].Key); + val = ((PJOB)row)->GetKeyValue(Nodes[i].Key); break; case TYPE_JAR: if (!Nodes[i].Key) if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) - val = ((PJAR)row)->GetValue(Nodes[i].Rank); + val = ((PJAR)row)->GetArrayValue(Nodes[i].Rank); break; case TYPE_JVAL: @@ -660,20 +660,20 @@ PJSON JSNX::GetRow(PGLOBAL g) // Expected Array was not there, wrap the value continue; - val = ((PJOB)row)->GetValue(Nodes[i].Key); + val = ((PJOB)row)->GetKeyValue(Nodes[i].Key); break; case TYPE_JAR: arp = (PJAR)row; if (!Nodes[i].Key) { if (Nodes[i].Op == OP_EQ) - val = arp->GetValue(Nodes[i].Rank); + val = arp->GetArrayValue(Nodes[i].Rank); else - val = arp->GetValue(Nodes[i].Rx); + val = arp->GetArrayValue(Nodes[i].Rx); } else { // Unexpected array, unwrap it as [0] - val = arp->GetValue(0); + val = arp->GetArrayValue(0); i--; } // endif Nodes @@ -700,9 +700,9 @@ PJSON JSNX::GetRow(PGLOBAL g) nwr = new(g)JOBJECT; if (row->GetType() == TYPE_JOB) { - ((PJOB)row)->SetValue(g, new(g)JVALUE(nwr), Nodes[i-1].Key); + ((PJOB)row)->SetKeyValue(g, new(g)JVALUE(nwr), Nodes[i-1].Key); } else if (row->GetType() == TYPE_JAR) { - ((PJAR)row)->AddValue(g, new(g)JVALUE(nwr)); + ((PJAR)row)->AddArrayValue(g, new(g)JVALUE(nwr)); ((PJAR)row)->InitArray(g); } else { strcpy(g->Message, "Wrong type when writing new row"); @@ -745,16 +745,16 @@ my_bool JSNX::WriteValue(PGLOBAL g, PJVAL jvalp) if (arp) { if (!Nodes[Nod-1].Key) { if (Nodes[Nod-1].Op == OP_EQ) - arp->SetValue(g, jvalp, Nodes[Nod-1].Rank); + arp->SetArrayValue(g, jvalp, Nodes[Nod-1].Rank); else - arp->AddValue(g, jvalp); + arp->AddArrayValue(g, jvalp); arp->InitArray(g); } // endif Key } else if (objp) { if (Nodes[Nod-1].Key) - objp->SetValue(g, jvalp, Nodes[Nod-1].Key); + objp->SetKeyValue(g, jvalp, Nodes[Nod-1].Key); } else if (jvp) jvp->SetValue(jvalp); @@ -835,7 +835,7 @@ my_bool JSNX::LocateArray(PGLOBAL g, PJAR jarp) if (Jp->WriteStr(s)) return true; - if (LocateValue(g, jarp->GetValue(i))) + if (LocateValue(g, jarp->GetArrayValue(i))) return true; } // endfor i @@ -958,7 +958,7 @@ my_bool JSNX::LocateArrayAll(PGLOBAL g, PJAR jarp) for (int i = 0; i < jarp->size(); i++) { Jpnp[I].N = i; - if (LocateValueAll(g, jarp->GetValue(i))) + if (LocateValueAll(g, jarp->GetArrayValue(i))) return true; } // endfor i @@ -1027,7 +1027,7 @@ my_bool JSNX::CompareTree(PGLOBAL g, PJSON jp1, PJSON jp2) } else if (jp1->GetType() == TYPE_JAR) { for (int i = 0; found && i < jp1->size(); i++) - found = (CompareTree(g, jp1->GetValue(i), jp2->GetValue(i))); + found = (CompareTree(g, jp1->GetArrayValue(i), jp2->GetArrayValue(i))); } else if (jp1->GetType() == TYPE_JOB) { PJPR p1 = jp1->GetFirst(), p2 = jp2->GetFirst(); @@ -2018,7 +2018,7 @@ char *json_make_array(UDF_INIT *initid, UDF_ARGS *args, char *result, PJAR arp = new(g)JARRAY; for (uint i = 0; i < args->arg_count; i++) - arp->AddValue(g, MakeValue(g, args, i)); + arp->AddArrayValue(g, MakeValue(g, args, i)); arp->InitArray(g); @@ -2088,13 +2088,13 @@ char *json_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result, if (jvp->GetValType() != TYPE_JAR) { arp = new(g)JARRAY; - arp->AddValue(g, jvp); + arp->AddArrayValue(g, jvp); top = arp; } else arp = jvp->GetArray(); for (uint i = 1; i < args->arg_count; i++) - arp->AddValue(g, MakeValue(g, args, i)); + arp->AddArrayValue(g, MakeValue(g, args, i)); arp->InitArray(g); str = MakeResult(g, args, top, args->arg_count); @@ -2186,7 +2186,7 @@ char *json_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, if (jvp->GetValType() != TYPE_JAR) { if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) { - arp->AddValue(gb, JvalNew(gb, TYPE_JVAL, jvp)); + arp->AddArrayValue(gb, JvalNew(gb, TYPE_JVAL, jvp)); jvp->SetValue(arp); if (!top) @@ -2198,7 +2198,7 @@ char *json_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, arp = jvp->GetArray(); if (arp) { - arp->AddValue(gb, MakeValue(gb, args, 1), x); + arp->AddArrayValue(gb, MakeValue(gb, args, 1), x); arp->InitArray(gb); str = MakeResult(g, args, top, n); } else @@ -2367,7 +2367,7 @@ long long jsonsum_int(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *err PJAR arp = jvp->GetArray(); for (int i = 0; i < arp->size(); i++) - n += arp->GetValue(i)->GetBigint(); + n += arp->GetArrayValue(i)->GetBigint(); } else { PUSH_WARNING("First argument target is not an array"); @@ -2442,7 +2442,7 @@ double jsonsum_real(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *error PJAR arp = jvp->GetArray(); for (int i = 0; i < arp->size(); i++) - n += arp->GetValue(i)->GetFloat(); + n += arp->GetArrayValue(i)->GetFloat(); } else { PUSH_WARNING("First argument target is not an array"); @@ -2507,7 +2507,7 @@ double jsonavg_real(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *error if (arp->size()) { for (int i = 0; i < arp->size(); i++) - n += arp->GetValue(i)->GetFloat(); + n += arp->GetArrayValue(i)->GetFloat(); n /= arp->size(); } // endif size @@ -2566,7 +2566,7 @@ char *json_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i++) - objp->SetValue(g, MakeValue(g, args, i), MakeKey(g, args, i)); + objp->SetKeyValue(g, MakeValue(g, args, i), MakeKey(g, args, i)); str = Serialize(g, objp, NULL, 0); } // endif objp @@ -2616,7 +2616,7 @@ char *json_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i++) if (!(jvp = MakeValue(g, args, i))->IsNull()) - objp->SetValue(g, jvp, MakeKey(g, args, i)); + objp->SetKeyValue(g, jvp, MakeKey(g, args, i)); str = Serialize(g, objp, NULL, 0); } // endif objp @@ -2668,7 +2668,7 @@ char *json_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i += 2) - objp->SetValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i)); + objp->SetKeyValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i)); str = Serialize(g, objp, NULL, 0); } // endif objp @@ -2752,7 +2752,7 @@ char *json_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, jobp = jvp->GetObject(); jvp = MakeValue(gb, args, 1); key = MakeKey(gb, args, 1); - jobp->SetValue(gb, jvp, key); + jobp->SetKeyValue(gb, jvp, key); str = MakeResult(g, args, top); } else { PUSH_WARNING("First argument target is not an object"); @@ -3105,7 +3105,7 @@ void json_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*) PJAR arp = (PJAR)g->Activityp; if (arp && g->N-- > 0) - arp->AddValue(g, MakeValue(g, args, 0)); + arp->AddArrayValue(g, MakeValue(g, args, 0)); } // end of json_array_grp_add @@ -3182,7 +3182,7 @@ void json_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*) PJOB objp = (PJOB)g->Activityp; if (g->N-- > 0) - objp->SetValue(g, MakeValue(g, args, 1), MakePSZ(g, args, 0)); + objp->SetKeyValue(g, MakeValue(g, args, 1), MakePSZ(g, args, 0)); } // end of json_object_grp_add @@ -4664,7 +4664,7 @@ char *jbin_array(UDF_INIT *initid, UDF_ARGS *args, char *result, strcat(bsp->Msg, " array"); for (uint i = 0; i < args->arg_count; i++) - arp->AddValue(g, MakeValue(g, args, i)); + arp->AddArrayValue(g, MakeValue(g, args, i)); arp->InitArray(g); } // endif arp && bsp @@ -4725,7 +4725,7 @@ char *jbin_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result, if (jvp->GetValType() != TYPE_JAR) { if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) { - arp->AddValue(gb, jvp); + arp->AddArrayValue(gb, jvp); top = arp; } // endif arp @@ -4733,7 +4733,7 @@ char *jbin_array_add_values(UDF_INIT *initid, UDF_ARGS *args, char *result, arp = jvp->GetArray(); for (uint i = 1; i < args->arg_count; i++) - arp->AddValue(gb, MakeValue(gb, args, i)); + arp->AddArrayValue(gb, MakeValue(gb, args, i)); arp->InitArray(gb); @@ -4816,7 +4816,7 @@ char *jbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, if (jvp->GetValType() != TYPE_JAR) { if ((arp = (PJAR)JsonNew(gb, TYPE_JAR))) { - arp->AddValue(gb, (PJVAL)JvalNew(gb, TYPE_JVAL, jvp)); + arp->AddArrayValue(gb, (PJVAL)JvalNew(gb, TYPE_JVAL, jvp)); jvp->SetValue(arp); if (!top) @@ -4827,7 +4827,7 @@ char *jbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, } else arp = jvp->GetArray(); - arp->AddValue(gb, MakeValue(gb, args, 1), x); + arp->AddArrayValue(gb, MakeValue(gb, args, 1), x); arp->InitArray(gb); } else { PUSH_WARNING("First argument target is not an array"); @@ -4955,7 +4955,7 @@ char *jbin_object(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i++) - objp->SetValue(g, MakeValue(g, args, i), MakeKey(g, args, i)); + objp->SetKeyValue(g, MakeValue(g, args, i), MakeKey(g, args, i)); if ((bsp = JbinAlloc(g, args, initid->max_length, objp))) @@ -5012,7 +5012,7 @@ char *jbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i++) if (!(jvp = MakeValue(g, args, i))->IsNull()) - objp->SetValue(g, jvp, MakeKey(g, args, i)); + objp->SetKeyValue(g, jvp, MakeKey(g, args, i)); if ((bsp = JbinAlloc(g, args, initid->max_length, objp))) strcat(bsp->Msg, " object"); @@ -5071,7 +5071,7 @@ char *jbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = (PJOB)JsonNew(g, TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i += 2) - objp->SetValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i)); + objp->SetKeyValue(g, MakeValue(g, args, i + 1), MakePSZ(g, args, i)); if ((bsp = JbinAlloc(g, args, initid->max_length, objp))) strcat(bsp->Msg, " object"); @@ -5149,7 +5149,7 @@ char *jbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, jobp = jvp->GetObject(); jvp = MakeValue(gb, args, 1); key = MakeKey(gb, args, 1); - jobp->SetValue(gb, jvp, key); + jobp->SetKeyValue(gb, jvp, key); } else { PUSH_WARNING("First argument target is not an object"); // if (g->Mrr) *error = 1; (only if no path) diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 3de7ea0a35d..9ebece5d4e7 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -240,7 +240,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) if (tjsp->MakeDocument(g)) return 0; - jsp = (tjsp->GetDoc()) ? tjsp->GetDoc()->GetValue(0) : NULL; + jsp = (tjsp->GetDoc()) ? tjsp->GetDoc()->GetArrayValue(0) : NULL; } else { if (!((tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0)))) { if (!mgo) { @@ -365,7 +365,7 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) } // endswitch ReadDB } else - jsp = tjsp->GetDoc()->GetValue(i); + jsp = tjsp->GetDoc()->GetArrayValue(i); if (!(row = (jsp) ? jsp->GetObject() : NULL)) break; @@ -483,7 +483,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) strncat(fmt, (tdp->Uri ? sep : "[*]"), n); } - if (Find(g, jar->GetValue(k), "", j)) + if (Find(g, jar->GetArrayValue(k), "", j)) return true; *p = *pc = 0; @@ -922,7 +922,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g) if (*objpath != '[' && !IsNum(objpath)) { // objpass is a key val = (jsp->GetType() == TYPE_JOB) ? - jsp->GetObject()->GetValue(objpath) : NULL; + jsp->GetObject()->GetKeyValue(objpath) : NULL; } else { if (*objpath == '[') { if (objpath[strlen(objpath) - 1] == ']') @@ -932,7 +932,7 @@ PJSON TDBJSN::FindRow(PGLOBAL g) } // endif [ val = (jsp->GetType() == TYPE_JAR) ? - jsp->GetArray()->GetValue(atoi(objpath) - B) : NULL; + jsp->GetArray()->GetArrayValue(atoi(objpath) - B) : NULL; } // endif objpath jsp = (val) ? val->GetJson() : NULL; @@ -1128,7 +1128,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp) val->SetValue(objp); val = new(g) JVALUE; - objp->SetValue(g, val, objpath); + objp->SetKeyValue(g, val, objpath); } else { if (*objpath == '[') { // Old style @@ -1150,7 +1150,7 @@ int TDBJSN::MakeTopTree(PGLOBAL g, PJSON jsp) val = new(g) JVALUE; i = atoi(objpath) - B; - arp->SetValue(g, val, i); + arp->SetArrayValue(g, val, i); arp->InitArray(g); } // endif objpath @@ -1703,7 +1703,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i) val = new(G) JVALUE(row); } else - val = ((PJOB)row)->GetValue(Nodes[i].Key); + val = ((PJOB)row)->GetKeyValue(Nodes[i].Key); break; case TYPE_JAR: @@ -1711,7 +1711,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i) if (!Nodes[i].Key) { if (Nodes[i].Op == OP_EQ) - val = arp->GetValue(Nodes[i].Rank); + val = arp->GetArrayValue(Nodes[i].Rank); else if (Nodes[i].Op == OP_EXP) return ExpandArray(g, arp, i); else @@ -1719,7 +1719,7 @@ PVAL JSONCOL::GetColumnValue(PGLOBAL g, PJSON row, int i) } else { // Unexpected array, unwrap it as [0] - val = arp->GetValue(0); + val = arp->GetArrayValue(0); i--; } // endif's @@ -1757,7 +1757,7 @@ PVAL JSONCOL::ExpandArray(PGLOBAL g, PJAR arp, int n) return Value; } // endif ars - if (!(jvp = arp->GetValue((Nodes[n].Rx = Nodes[n].Nx)))) { + if (!(jvp = arp->GetArrayValue((Nodes[n].Rx = Nodes[n].Nx)))) { strcpy(g->Message, "Logical error expanding array"); throw 666; } // endif jvp @@ -1801,7 +1801,7 @@ PVAL JSONCOL::CalculateArray(PGLOBAL g, PJAR arp, int n) ars, op, nextsame); for (i = 0; i < ars; i++) { - jvrp = arp->GetValue(i); + jvrp = arp->GetArrayValue(i); if (trace(1)) htrc("i=%d nv=%d\n", i, nv); @@ -1901,20 +1901,20 @@ PJSON JSONCOL::GetRow(PGLOBAL g) // Expected Array was not there, wrap the value continue; - val = ((PJOB)row)->GetValue(Nodes[i].Key); + val = ((PJOB)row)->GetKeyValue(Nodes[i].Key); break; case TYPE_JAR: arp = (PJAR)row; if (!Nodes[i].Key) { if (Nodes[i].Op == OP_EQ) - val = arp->GetValue(Nodes[i].Rank); + val = arp->GetArrayValue(Nodes[i].Rank); else - val = arp->GetValue(Nodes[i].Rx); + val = arp->GetArrayValue(Nodes[i].Rx); } else { // Unexpected array, unwrap it as [0] - val = arp->GetValue(0); + val = arp->GetArrayValue(0); i--; } // endif Nodes @@ -1941,9 +1941,9 @@ PJSON JSONCOL::GetRow(PGLOBAL g) nwr = new(G) JOBJECT; if (row->GetType() == TYPE_JOB) { - ((PJOB)row)->SetValue(G, new(G) JVALUE(nwr), Nodes[i-1].Key); + ((PJOB)row)->SetKeyValue(G, new(G) JVALUE(nwr), Nodes[i-1].Key); } else if (row->GetType() == TYPE_JAR) { - ((PJAR)row)->AddValue(G, new(G) JVALUE(nwr)); + ((PJAR)row)->AddArrayValue(G, new(G) JVALUE(nwr)); ((PJAR)row)->InitArray(G); } else { strcpy(g->Message, "Wrong type when writing new row"); @@ -2008,14 +2008,14 @@ void JSONCOL::WriteColumn(PGLOBAL g) if (arp) { if (Nod > 1 && Nodes[Nod-2].Op == OP_EQ) - arp->SetValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Rank); + arp->SetArrayValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Rank); else - arp->AddValue(G, new(G) JVALUE(jsp)); + arp->AddArrayValue(G, new(G) JVALUE(jsp)); arp->InitArray(G); } else if (objp) { if (Nod > 1 && Nodes[Nod-2].Key) - objp->SetValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Key); + objp->SetKeyValue(G, new(G) JVALUE(jsp), Nodes[Nod-2].Key); } else if (jvp) jvp->SetValue(jsp); @@ -2032,14 +2032,14 @@ void JSONCOL::WriteColumn(PGLOBAL g) case TYPE_DOUBLE: if (arp) { if (Nodes[Nod-1].Op == OP_EQ) - arp->SetValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Rank); + arp->SetArrayValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Rank); else - arp->AddValue(G, new(G) JVALUE(G, Value)); + arp->AddArrayValue(G, new(G) JVALUE(G, Value)); arp->InitArray(G); } else if (objp) { if (Nodes[Nod-1].Key) - objp->SetValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Key); + objp->SetKeyValue(G, new(G) JVALUE(G, Value), Nodes[Nod-1].Key); } else if (jvp) jvp->SetValue(g, Value); @@ -2189,7 +2189,7 @@ int TDBJSON::MakeDocument(PGLOBAL g) key = p; objp = jsp->GetObject(); arp = NULL; - val = objp->GetValue(key); + val = objp->GetKeyValue(key); if (!val || !(jsp = val->GetJson())) { sprintf(g->Message, "Cannot find object key %s", key); @@ -2215,7 +2215,7 @@ int TDBJSON::MakeDocument(PGLOBAL g) arp = jsp->GetArray(); objp = NULL; i = atoi(p) - B; - val = arp->GetValue(i); + val = arp->GetArrayValue(i); if (!val) { sprintf(g->Message, "Cannot find array value %d", i); @@ -2236,17 +2236,17 @@ int TDBJSON::MakeDocument(PGLOBAL g) Doc = new(g) JARRAY; if (val) { - Doc->AddValue(g, val); + Doc->AddArrayValue(g, val); Doc->InitArray(g); } else if (jsp) { - Doc->AddValue(g, new(g) JVALUE(jsp)); + Doc->AddArrayValue(g, new(g) JVALUE(jsp)); Doc->InitArray(g); } // endif val if (objp) - objp->SetValue(g, new(g) JVALUE(Doc), key); + objp->SetKeyValue(g, new(g) JVALUE(Doc), key); else if (arp) - arp->SetValue(g, new(g) JVALUE(Doc), i); + arp->SetArrayValue(g, new(g) JVALUE(Doc), i); else Top = Doc; @@ -2409,7 +2409,7 @@ int TDBJSON::ReadDB(PGLOBAL) M++; rc = RC_OK; } else if (++Fpos < (signed)Doc->size()) { - Row = Doc->GetValue(Fpos); + Row = Doc->GetArrayValue(Fpos); if (Row->GetType() == TYPE_JVAL) Row = ((PJVAL)Row)->GetJson(); @@ -2432,25 +2432,25 @@ int TDBJSON::WriteDB(PGLOBAL g) PJVAL vp = new(g) JVALUE(Row); if (Mode == MODE_INSERT) { - Doc->AddValue(g, vp); + Doc->AddArrayValue(g, vp); Row = new(g) JOBJECT; - } else if (Doc->SetValue(g, vp, Fpos)) + } else if (Doc->SetArrayValue(g, vp, Fpos)) return RC_FX; } else if (Jmode == MODE_ARRAY) { PJVAL vp = new(g) JVALUE(Row); if (Mode == MODE_INSERT) { - Doc->AddValue(g, vp); + Doc->AddArrayValue(g, vp); Row = new(g) JARRAY; - } else if (Doc->SetValue(g, vp, Fpos)) + } else if (Doc->SetArrayValue(g, vp, Fpos)) return RC_FX; } else { // if (Jmode == MODE_VALUE) if (Mode == MODE_INSERT) { - Doc->AddValue(g, (PJVAL)Row); + Doc->AddArrayValue(g, (PJVAL)Row); Row = new(g) JVALUE; - } else if (Doc->SetValue(g, (PJVAL)Row, Fpos)) + } else if (Doc->SetArrayValue(g, (PJVAL)Row, Fpos)) return RC_FX; } // endif Jmode From 8985933881e94374a10481aeb10714fd77470d48 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 7 Nov 2020 23:22:28 +0100 Subject: [PATCH 014/150] Re-fix compile error (sign-unsign) Modified filamtxt.cpp --- storage/connect/filamtxt.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index 28a6894325b..758a4b1d8cf 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -1833,7 +1833,7 @@ int BINFAM::ReadBuffer(PGLOBAL g) { } else return RC_EF; - } else if (Recsize > Buflen) { + } else if (Recsize > (unsigned)Buflen) { sprintf(g->Message, "Record too big (Recsize=%zd Buflen=%d)\n", Recsize, Buflen); return RC_FX; } // endif Recsize From 90405763cf71a18ae1c7556fe51e13feb5333e46 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 7 Nov 2020 23:58:57 +0100 Subject: [PATCH 015/150] Re-fix compile error (sign-unsign) Modified filamtxt.cpp --- storage/connect/json.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index 6fdb7e9c15c..5a7570567c8 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -1873,7 +1873,7 @@ PJSON SWAP::MptrJson(PJSON ojp) { // ojp is an offset PJAR SWAP::MptrArray(PJAR ojar) { PJAR jarp = (PJAR)MakePtr(Base, (size_t)ojar); - jarp = (PJAR)new((long long)jarp) JARRAY(NULL); + jarp = (PJAR)new((long long)jarp) JARRAY(0); if (jarp->First) { for (int i = 0; i < jarp->Size; i++) From 73850edd044130402abf796beaa8c4bc94b6d81e Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sun, 8 Nov 2020 12:14:33 +0100 Subject: [PATCH 016/150] Re-fix compile error (conversion-null) Modified json.cpp --- storage/connect/json.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index 5a7570567c8..5e04f65f748 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -1892,7 +1892,7 @@ PJAR SWAP::MptrArray(PJAR ojar) { PJOB SWAP::MptrObject(PJOB ojob) { PJOB jobp = (PJOB)MakePtr(Base, (size_t)ojob); - jobp = (PJOB)new((long long)jobp) JOBJECT(NULL); + jobp = (PJOB)new((long long)jobp) JOBJECT(0); if (jobp->First) { jobp->First = (PJPR)MptrPair(jobp->First); From 878065b4dfe5d6d08f99fce17c6ecf6f6707a109 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sun, 8 Nov 2020 14:25:35 +0100 Subject: [PATCH 017/150] Fix compile error (sign-compare) Modified jsonudf.cpp jsonudf.h --- storage/connect/jsonudf.cpp | 3 ++- storage/connect/jsonudf.h | 4 ++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 407661004b3..3908e2a37a8 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -6013,7 +6013,8 @@ JUP::JUP(PGLOBAL g) { fs = NULL; s = buff = NULL; len = 0; - i = k = recl = 0; + k = recl = 0; + i = 0; } // end of JUP constructor /*********************************************************************************/ diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h index e4785bc5c38..9b3d478b018 100644 --- a/storage/connect/jsonudf.h +++ b/storage/connect/jsonudf.h @@ -364,7 +364,7 @@ public: char *s; char *buff; size_t len; - int recl; - int i, k; + uint i; + int k, recl; }; // end of class JUP From 8c617e99015c8aec1eb597698ebbdc0fcbd068fb Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 11 Nov 2020 12:55:07 +0100 Subject: [PATCH 018/150] Add getting REST query answer via curl. modified storage/connect/tabrest.cpp --- storage/connect/tabrest.cpp | 85 +++++++++++++++++++++++++++++-------- 1 file changed, 68 insertions(+), 17 deletions(-) diff --git a/storage/connect/tabrest.cpp b/storage/connect/tabrest.cpp index b1bdeffc880..a41ca5dec87 100644 --- a/storage/connect/tabrest.cpp +++ b/storage/connect/tabrest.cpp @@ -42,7 +42,13 @@ #include "tabfmt.h" #include "tabrest.h" +#if defined(__WIN__) || defined(_WINDOWS) +#define popen _popen +#define pclose _pclose +#endif + static XGETREST getRestFnc = NULL; +static int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename); #if !defined(MARIADB) /***********************************************************************/ @@ -71,6 +77,40 @@ PTABDEF __stdcall GetREST(PGLOBAL g, void *memp) } // end of GetREST #endif // !MARIADB +/***********************************************************************/ +/* Xcurl: retrieve the REST answer by executing curl. */ +/***********************************************************************/ +int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename) +{ + char buf[1024]; + int rc; + FILE *pipe; + + if (Uri) { + if (*Uri == '/' || Http[strlen(Http) - 1] == '/') + sprintf(buf, "curl %s%s > %s", Http, Uri, filename); + else + sprintf(buf, "curl %s/%s > %s", Http, Uri, filename); + + } else + sprintf(buf, "curl %s > %s", Http, filename); + + if ((pipe = popen(buf, "rt"))) { + if (trace(515)) + while (fgets(buf, sizeof(buf), pipe)) { + htrc("%s", buf); + } // endwhile + + pclose(pipe); + rc = 0; + } else { + sprintf(g->Message, "curl failed, errno =%d", errno); + rc = 1; + } // endif pipe + + return rc; +} // end od Xcurl + /***********************************************************************/ /* GetREST: get the external TABDEF from OEM module. */ /***********************************************************************/ @@ -148,13 +188,15 @@ PQRYRES RESTColumns(PGLOBAL g, PTOS tp, char *tab, char *db, bool info) PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info) #endif // !MARIADB { - PQRYRES qrp= NULL; - char filename[_MAX_PATH + 1]; // MAX PATH ??? - PCSZ http, uri, fn, ftype; + PQRYRES qrp= NULL; + char filename[_MAX_PATH + 1]; // MAX PATH ??? + int rc; + bool curl = false; + PCSZ http, uri, fn, ftype; XGETREST grf = GetRestFunction(g); if (!grf) - return NULL; + curl = true; http = GetStringTableOption(g, tp, "Http", NULL); uri = GetStringTableOption(g, tp, "Uri", NULL); @@ -182,13 +224,17 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info) // We used the file name relative to recorded datapath PlugSetPath(filename, fn, db); - //strcat(strcat(strcat(strcpy(filename, "."), slash), db), slash); - //strncat(filename, fn, _MAX_PATH - strlen(filename)); + curl = GetBooleanTableOption(g, tp, "Curl", curl); // Retrieve the file from the web and copy it locally - if (http && grf(g->Message, trace(515), http, uri, filename)) { - // sprintf(g->Message, "Failed to get file at %s", http); - } else if (!stricmp(ftype, "JSON")) + if (curl) + rc = Xcurl(g, http, uri, filename); + else + rc = grf(g->Message, trace(515), http, uri, filename); + + if (rc) + return NULL; + else if (!stricmp(ftype, "JSON")) qrp = JSONColumns(g, db, NULL, tp, info); else if (!stricmp(ftype, "CSV")) qrp = CSVColumns(g, NULL, tp, info); @@ -209,14 +255,14 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info) /***********************************************************************/ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) { - char filename[_MAX_PATH + 1]; - int rc = 0, n; - bool xt = trace(515); - LPCSTR ftype; + char filename[_MAX_PATH + 1]; + int rc = 0, n; + bool curl = false, xt = trace(515); + LPCSTR ftype; XGETREST grf = GetRestFunction(g); if (!grf) - return true; + curl = true; #if defined(MARIADB) ftype = GetStringCatInfo(g, "Type", "JSON"); @@ -235,8 +281,8 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) : (!stricmp(ftype, "CSV")) ? 3 : 0; if (n == 0) { - htrc("DefineAM: Unsupported REST table type %s", am); - sprintf(g->Message, "Unsupported REST table type %s", am); + htrc("DefineAM: Unsupported REST table type %s\n", ftype); + sprintf(g->Message, "Unsupported REST table type %s", ftype); return true; } // endif n @@ -247,8 +293,13 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) // We used the file name relative to recorded datapath PlugSetPath(filename, Fn, GetPath()); + curl = GetBoolCatInfo("Curl", curl); + // Retrieve the file from the web and copy it locally - rc = grf(g->Message, xt, Http, Uri, filename); + if (curl) + rc = Xcurl(g, Http, Uri, filename); + else + rc = grf(g->Message, xt, Http, Uri, filename); if (xt) htrc("Return from restGetFile: rc=%d\n", rc); From 17ea1efe0a06e95a15f7355986f890f995530c56 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 11 Nov 2020 17:41:11 +0100 Subject: [PATCH 019/150] Fix using a null pointer. modified storage/connect/tabrest.cpp --- storage/connect/tabrest.cpp | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/storage/connect/tabrest.cpp b/storage/connect/tabrest.cpp index a41ca5dec87..a38e8bb5fec 100644 --- a/storage/connect/tabrest.cpp +++ b/storage/connect/tabrest.cpp @@ -229,8 +229,12 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info) // Retrieve the file from the web and copy it locally if (curl) rc = Xcurl(g, http, uri, filename); - else + else if (grf) rc = grf(g->Message, trace(515), http, uri, filename); + else { + strcpy(g->Message, "Cannot access to curl nor casablanca"); + rc = 1; + } // endif !grf if (rc) return NULL; @@ -296,13 +300,16 @@ bool RESTDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) curl = GetBoolCatInfo("Curl", curl); // Retrieve the file from the web and copy it locally - if (curl) + if (curl) { rc = Xcurl(g, Http, Uri, filename); - else + xtrc(515, "Return from Xcurl: rc=%d\n", rc); + } else if (grf) { rc = grf(g->Message, xt, Http, Uri, filename); - - if (xt) - htrc("Return from restGetFile: rc=%d\n", rc); + xtrc(515, "Return from restGetFile: rc=%d\n", rc); + } else { + strcpy(g->Message, "Cannot access to curl nor casablanca"); + rc = 1; + } // endif !grf if (rc) return true; From 9193ceb2c4ed32757f567e3db865ab93f66b91a8 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 13 Nov 2020 19:42:56 +0100 Subject: [PATCH 020/150] Fix getting proper table type in discovery. modified storage/connect/ha_connect.cc --- storage/connect/ha_connect.cc | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 06029a3f670..e9b9e5c24aa 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -170,7 +170,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.07.0002 November 05, 2020"; + char version[]= "Version 1.07.0002 November 13, 2020"; #if defined(__WIN__) char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__; char slash= '\\'; @@ -6093,6 +6093,10 @@ static int connect_assisted_discovery(handlerton *, THD* thd, goto err; } // endif !nblin + // Restore language type + if (ttp == TAB_REST) + ttp = GetTypeID(topt->type); + for (i= 0; !rc && i < qrp->Nblin; i++) { typ= len= prec= dec= flg= 0; tm= NOT_NULL_FLAG; From 8771390dfd3a98ae47b76b5f262af76aa8232cd7 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 14 Nov 2020 18:28:16 +0100 Subject: [PATCH 021/150] Change cURL option from > to -o. modified storage/connect/tabrest.cpp --- storage/connect/tabrest.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/storage/connect/tabrest.cpp b/storage/connect/tabrest.cpp index a38e8bb5fec..ee3a289c2e3 100644 --- a/storage/connect/tabrest.cpp +++ b/storage/connect/tabrest.cpp @@ -88,12 +88,12 @@ int Xcurl(PGLOBAL g, PCSZ Http, PCSZ Uri, PCSZ filename) if (Uri) { if (*Uri == '/' || Http[strlen(Http) - 1] == '/') - sprintf(buf, "curl %s%s > %s", Http, Uri, filename); + sprintf(buf, "curl %s%s -o %s", Http, Uri, filename); else - sprintf(buf, "curl %s/%s > %s", Http, Uri, filename); + sprintf(buf, "curl %s/%s -o %s", Http, Uri, filename); } else - sprintf(buf, "curl %s > %s", Http, filename); + sprintf(buf, "curl %s -o %s", Http, filename); if ((pipe = popen(buf, "rt"))) { if (trace(515)) From da10bf2d5694759cf435190150cf82f597902057 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 18 Nov 2020 16:10:23 +0100 Subject: [PATCH 022/150] remove large file From 000268d46fea93655c8e8652f73a97210bc539da Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Thu, 19 Nov 2020 19:05:04 +0100 Subject: [PATCH 023/150] Fix some json discovery problems. Modified tabjson.cpp tabjson.h --- storage/connect/tabjson.cpp | 32 ++++++++++++++++++++++++++------ storage/connect/tabjson.h | 2 +- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index b0866ba1b18..d5aa1be892d 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -159,7 +159,7 @@ JSONDISC::JSONDISC(PGLOBAL g, uint *lg) jsp = NULL; row = NULL; sep = NULL; - i = n = bf = ncol = lvl = sz = 0; + i = n = bf = ncol = lvl = sz = limit = 0; all = strfy = false; } // end of JSONDISC constructor @@ -172,7 +172,8 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) lvl = GetIntegerTableOption(g, topt, "Depth", lvl); sep = GetStringTableOption(g, topt, "Separator", "."); sz = GetIntegerTableOption(g, topt, "Jsize", 1024); - strfy = GetBooleanTableOption(g, topt, "Stringify", false); + limit = GetIntegerTableOption(g, topt, "Limit", 10); + strfy = GetBooleanTableOption(g, topt, "Stringify", false); /*********************************************************************/ /* Open the input file. */ @@ -458,7 +459,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) jar = (PJAR)jsp; if (all || (tdp->Xcol && !stricmp(tdp->Xcol, key))) - ars = jar->GetSize(false); + ars = MY_MIN(jar->GetSize(false), limit); else ars = MY_MIN(jar->GetSize(false), 1); @@ -527,10 +528,29 @@ void JSONDISC::AddColumn(PGLOBAL g) if (jcp) { if (jcp->Type != jcol.Type) { - if (jcp->Type == TYPE_UNKNOWN) + if (jcp->Type == TYPE_UNKNOWN || jcol.Type == TYPE_VOID) jcp->Type = jcol.Type; - else if (jcol.Type != TYPE_UNKNOWN && jcol.Type != TYPE_VOID) - jcp->Type = TYPE_STRING; +// else if (jcol.Type != TYPE_UNKNOWN && jcol.Type != TYPE_VOID) +// jcp->Type = TYPE_STRING; + else if (jcp->Type != TYPE_STRING) + switch (jcol.Type) { + case TYPE_STRING: + case TYPE_DOUBLE: + jcp->Type = jcol.Type; + break; + case TYPE_BIGINT: + if (jcp->Type == TYPE_INT || jcp->Type == TYPE_TINY) + jcp->Type = jcol.Type; + + break; + case TYPE_INT: + if (jcp->Type == TYPE_TINY) + jcp->Type = jcol.Type; + + break; + default: + break; + } // endswith Type } // endif Type diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index de5115a4e09..9994c9106ca 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -68,7 +68,7 @@ public: PCSZ sep; char colname[65], fmt[129], buf[16]; uint *length; - int i, n, bf, ncol, lvl, sz; + int i, n, bf, ncol, lvl, sz, limit; bool all, strfy; }; // end of JSONDISC From eb21ac65c1b4e8be9087d7b7f0993218e1d7e810 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 20 Nov 2020 11:43:39 +0100 Subject: [PATCH 024/150] ??? --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 8477515105b..b9490fd8312 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,4 @@ +bld2/ *-t *.ctest *.reject From a526965c61de49dd5698fa6cefa9dc73ed51f8ae Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 20 Nov 2020 12:17:50 +0100 Subject: [PATCH 025/150] delete bld2 From 038381e110c4cb03d79839f5672299a22bbc1489 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 20 Nov 2020 15:21:06 +0100 Subject: [PATCH 026/150] Fix compile error. Modified json.cpp --- storage/connect/json.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index eba9d0d8575..bf7ff7170ff 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -1780,7 +1780,7 @@ void SWAP::SwapJson(PJSON jsp, bool move) /* Replace all pointers by offsets. */ /***********************************************************************/ size_t SWAP::MoffJson(PJSON jsp) { - size_t res = NULL; + size_t res = 0; if (jsp) switch (jsp->Type) { From 477b5256ddca1431b94aad7bc78e339ee399e5bb Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 21 Nov 2020 21:52:48 +0100 Subject: [PATCH 027/150] Fix some test failure --- storage/connect/Header.h | 395 +++++++++ storage/connect/bson.cpp | 1601 +++++++++++++++++++++++++++++++++++ storage/connect/bson.h | 291 +++++++ storage/connect/json.h | 28 +- storage/connect/jsonudf.cpp | 8 +- storage/connect/jsonudf.h | 2 +- storage/connect/myutil.cpp | 3 +- storage/connect/tabfmt.cpp | 2 +- storage/connect/tabjson.cpp | 24 +- storage/connect/tabjson.h | 2 +- 10 files changed, 2323 insertions(+), 33 deletions(-) create mode 100644 storage/connect/Header.h create mode 100644 storage/connect/bson.cpp create mode 100644 storage/connect/bson.h diff --git a/storage/connect/Header.h b/storage/connect/Header.h new file mode 100644 index 00000000000..f9664befaa3 --- /dev/null +++ b/storage/connect/Header.h @@ -0,0 +1,395 @@ +#pragma once +/**************** json H Declares Source Code File (.H) ****************/ +/* Name: json.h Version 1.2 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */ +/* */ +/* This file contains the JSON classes declares. */ +/***********************************************************************/ +#include +#include "value.h" +#include "xobject.h" + +#if defined(_DEBUG) +#define X assert(false); +#else +#define X +#endif + +enum JTYP { + TYPE_NULL = TYPE_VOID, + TYPE_STRG = TYPE_STRING, + TYPE_DBL = TYPE_DOUBLE, + TYPE_BOOL = TYPE_TINY, + TYPE_BINT = TYPE_BIGINT, + TYPE_DTM = TYPE_DATE, + TYPE_INTG = TYPE_INT, + TYPE_VAL = 12, + TYPE_JSON, + TYPE_JAR, + TYPE_JOB, + TYPE_JVAL +}; + +class JDOC; +class JOUT; +class JSON; +class JVALUE; +class JOBJECT; +class JARRAY; + +typedef class JDOC* PJDOC; +typedef class JSON* PJSON; +typedef class JVALUE* PJVAL; +typedef class JOBJECT* PJOB; +typedef class JARRAY* PJAR; + +// BSON size should be equal on Linux and Windows +#define BMX 255 +typedef struct BSON* PBSON; +typedef struct JPAIR* PJPR; +//typedef struct VAL *PVL; + +/***********************************************************************/ +/* Structure JPAIR. The pairs of a json Object. */ +/***********************************************************************/ +struct JPAIR { + PCSZ Key; // This pair key name + PJVAL Val; // To the value of the pair + PJPR Next; // To the next pair +}; // end of struct JPAIR + +#if 0 +/***********************************************************************/ +/* Structure VAL (string, int, float, bool or null) */ +/***********************************************************************/ +struct VAL { + union { + char* Strp; // Ptr to a string + int N; // An integer value + long long LLn; // A big integer value + double F; // A float value + bool B; // True or false + }; + int Nd; // Decimal number + JTYP Type; // The value type +}; // end of struct VAL +#endif // 0 + +/***********************************************************************/ +/* Structure used to return binary json to Json UDF functions. */ +/***********************************************************************/ +struct BSON { + char Msg[BMX + 1]; + char* Filename; + PGLOBAL G; + int Pretty; + ulong Reslen; + my_bool Changed; + PJSON Top; + PJSON Jsp; + PBSON Bsp; +}; // end of struct BSON + +PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp); +//PVL AllocVal(PGLOBAL g, JTYP type); +char* NextChr(PSZ s, char sep); +char* GetJsonNull(void); +const char* GetFmt(int type, bool un); + +PJSON ParseJson(PGLOBAL g, char* s, size_t n, int* prty = NULL, bool* b = NULL); +PSZ Serialize(PGLOBAL g, PJSON jsp, char* fn, int pretty); +DllExport bool IsNum(PSZ s); + +/***********************************************************************/ +/* Class JDOC. The class for parsing and serializing json documents. */ +/***********************************************************************/ +class JDOC : public BLOCK { + friend PJSON ParseJson(PGLOBAL, char*, size_t, int*, bool*); + friend PSZ Serialize(PGLOBAL, PJSON, char*, int); +public: + JDOC(void) : js(NULL), s(NULL), len(0), pty(NULL) {} + + void SetJp(JOUT* jp) { js = jp; } + +protected: + PJAR ParseArray(PGLOBAL g, int& i); + PJOB ParseObject(PGLOBAL g, int& i); + PJVAL ParseValue(PGLOBAL g, int& i); + char* ParseString(PGLOBAL g, int& i); + void ParseNumeric(PGLOBAL g, int& i, PJVAL jvp); + PJAR ParseAsArray(PGLOBAL g, int& i, int pretty, int* ptyp); + bool SerializeArray(PJAR jarp, bool b); + bool SerializeObject(PJOB jobp); + bool SerializeValue(PJVAL jvp); + + // Members used when parsing and serializing +private: + JOUT* js; + char* s; + int len; + bool* pty; +}; // end of class JDOC + +/***********************************************************************/ +/* Class JSON. The base class for all other json classes. */ +/***********************************************************************/ +class JSON : public BLOCK { +public: + // Constructor + JSON(void) { Type = TYPE_JSON; } + JSON(int) {} + + // Implementation + inline JTYP GetType(void) { return Type; } + + // Methods + virtual int size(void) { return 1; } + virtual void Clear(void) { X } + virtual PJOB GetObject(void) { return NULL; } + virtual PJAR GetArray(void) { return NULL; } + virtual PJVAL GetArrayValue(int i) { X return NULL; } + virtual int GetSize(bool b) { X return 0; } + virtual PJSON GetJsp(void) { X return NULL; } + virtual PJPR GetFirst(void) { X return NULL; } + virtual PSZ GetText(PGLOBAL g, PSTRG text) { X return NULL; } + virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; } + virtual void SetValue(PJSON jsp) { X } + virtual bool DeleteValue(int i) { X return true; } + virtual bool IsNull(void) { X return true; } + + // Members + JTYP Type; +}; // end of class JSON + +/***********************************************************************/ +/* Class JOBJECT: contains a list of value pairs. */ +/***********************************************************************/ +class JOBJECT : public JSON { + friend class JDOC; + friend class JSNX; + friend class SWAP; +public: + JOBJECT(void) : JSON() { Type = TYPE_JOB; First = Last = NULL; } + JOBJECT(int i) : JSON(i) {} + + // Methods + virtual void Clear(void) { First = Last = NULL; } + //virtual JTYP GetValType(void) {return TYPE_JOB;} + virtual PJPR GetFirst(void) { return First; } + virtual int GetSize(bool b); + virtual PJOB GetObject(void) { return this; } + virtual PSZ GetText(PGLOBAL g, PSTRG text); + virtual bool Merge(PGLOBAL g, PJSON jsp); + virtual bool IsNull(void); + + // Specific + PJPR AddPair(PGLOBAL g, PCSZ key); + PJVAL GetKeyValue(const char* key); + PJAR GetKeyList(PGLOBAL g); + PJAR GetValList(PGLOBAL g); + void SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key); + void DeleteKey(PCSZ k); + +protected: + PJPR First; + PJPR Last; +}; // end of class JOBJECT + +/***********************************************************************/ +/* Class JARRAY. */ +/***********************************************************************/ +class JARRAY : public JSON { + friend class SWAP; +public: + JARRAY(void); + JARRAY(int i) : JSON(i) {} + + // Methods + virtual void Clear(void) { First = Last = NULL; Size = 0; } + virtual int size(void) { return Size; } + virtual PJAR GetArray(void) { return this; } + virtual int GetSize(bool b); + virtual PJVAL GetArrayValue(int i); + virtual PSZ GetText(PGLOBAL g, PSTRG text); + virtual bool Merge(PGLOBAL g, PJSON jsp); + virtual bool DeleteValue(int n); + virtual bool IsNull(void); + + // Specific + PJVAL AddArrayValue(PGLOBAL g, PJVAL jvp = NULL, int* x = NULL); + bool SetArrayValue(PGLOBAL g, PJVAL jvp, int i); + void InitArray(PGLOBAL g); + +protected: + // Members + int Size; // The number of items in the array + int Alloc; // The Mvals allocated size + PJVAL First; // Used when constructing + PJVAL Last; // Last constructed value + PJVAL* Mvals; // Allocated when finished +}; // end of class JARRAY + +/***********************************************************************/ +/* Class JVALUE. */ +/***********************************************************************/ +class JVALUE : public JSON { + friend class JARRAY; + friend class JSNX; + friend class JSONDISC; + friend class JSONCOL; + friend class JSON; + friend class JDOC; + friend class SWAP; +public: + JVALUE(void) : JSON() { Type = TYPE_JVAL; Clear(); } + JVALUE(PJSON jsp); + //JVALUE(PGLOBAL g, PVL vlp); + JVALUE(PGLOBAL g, PVAL valp); + JVALUE(PGLOBAL g, PCSZ strp); + JVALUE(int i) : JSON(i) {} + + //using JSON::GetVal; + //using JSON::SetVal; + + // Methods + virtual void Clear(void); + //virtual JTYP GetType(void) {return TYPE_JVAL;} + virtual JTYP GetValType(void); + virtual PJOB GetObject(void); + virtual PJAR GetArray(void); + virtual PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); } + virtual PSZ GetText(PGLOBAL g, PSTRG text); + virtual bool IsNull(void); + + // Specific + //inline PVL GetVal(void) { return Val; } + //inline void SetVal(PVL vlp) { Val = vlp; } + inline PJSON GetJson(void) { return (DataType == TYPE_JSON ? Jsp : this); } + PSZ GetString(PGLOBAL g, char* buff = NULL); + int GetInteger(void); + long long GetBigint(void); + double GetFloat(void); + PVAL GetValue(PGLOBAL g); + void SetValue(PJSON jsp); + void SetValue(PGLOBAL g, PVAL valp); + void SetString(PGLOBAL g, PSZ s, int ci = 0); + void SetInteger(PGLOBAL g, int n); + void SetBigint(PGLOBAL g, longlong ll); + void SetFloat(PGLOBAL g, double f); + void SetBool(PGLOBAL g, bool b); + +protected: + union { + PJSON Jsp; // To the json value + char* Strp; // Ptr to a string + int N; // An integer value + long long LLn; // A big integer value + double F; // A (double) float value + bool B; // True or false + }; + //PVL Val; // To the string or numeric value + PJVAL Next; // Next value in array + JTYP DataType; // The data value type + int Nd; // Decimal number + bool Del; // True when deleted +}; // end of class JVALUE + + +/***********************************************************************/ +/* Class JOUT. Used by Serialize. */ +/***********************************************************************/ +class JOUT : public BLOCK { +public: + JOUT(PGLOBAL gp) : BLOCK() { g = gp; Pretty = 3; } + + virtual bool WriteStr(const char* s) = 0; + virtual bool WriteChr(const char c) = 0; + virtual bool Escape(const char* s) = 0; + int Prty(void) { return Pretty; } + + // Member + PGLOBAL g; + int Pretty; +}; // end of class JOUT + +/***********************************************************************/ +/* Class JOUTSTR. Used to Serialize to a string. */ +/***********************************************************************/ +class JOUTSTR : public JOUT { +public: + JOUTSTR(PGLOBAL g); + + virtual bool WriteStr(const char* s); + virtual bool WriteChr(const char c); + virtual bool Escape(const char* s); + + // Member + char* Strp; // The serialized string + size_t N; // Position of next char + size_t Max; // String max size +}; // end of class JOUTSTR + +/***********************************************************************/ +/* Class JOUTFILE. Used to Serialize to a file. */ +/***********************************************************************/ +class JOUTFILE : public JOUT { +public: + JOUTFILE(PGLOBAL g, FILE* str, int pty) : JOUT(g) { Stream = str; Pretty = pty; } + + virtual bool WriteStr(const char* s); + virtual bool WriteChr(const char c); + virtual bool Escape(const char* s); + + // Member + FILE* Stream; +}; // end of class JOUTFILE + +/***********************************************************************/ +/* Class JOUTPRT. Used to Serialize to a pretty file. */ +/***********************************************************************/ +class JOUTPRT : public JOUTFILE { +public: + JOUTPRT(PGLOBAL g, FILE* str) : JOUTFILE(g, str, 2) { M = 0; B = false; } + + virtual bool WriteStr(const char* s); + virtual bool WriteChr(const char c); + + // Member + int M; + bool B; +}; // end of class JOUTPRT + + +/***********************************************************************/ +/* Class SWAP. Used to make or unmake a JSON tree movable. */ +/* This is done by making all pointers to offsets. */ +/***********************************************************************/ +class SWAP : public BLOCK { +public: + // Constructor + SWAP(PGLOBAL g, PJSON jsp) { + G = g, Base = (char*)jsp - 8; + } + + // Methods + void SwapJson(PJSON jsp, bool move); + +protected: + size_t MoffJson(PJSON jnp); + size_t MoffArray(PJAR jarp); + size_t MoffObject(PJOB jobp); + size_t MoffJValue(PJVAL jvp); + size_t MoffPair(PJPR jpp); + //size_t MoffVal(PVL vlp); + PJSON MptrJson(PJSON jnp); + PJAR MptrArray(PJAR jarp); + PJOB MptrObject(PJOB jobp); + PJVAL MptrJValue(PJVAL jvp); + PJPR MptrPair(PJPR jpp); + //PVL MptrVal(PVL vlp); + + // Member + PGLOBAL G; + void* Base; +}; // end of class SWAP diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp new file mode 100644 index 00000000000..86825f5808e --- /dev/null +++ b/storage/connect/bson.cpp @@ -0,0 +1,1601 @@ +/*************** json CPP Declares Source Code File (.H) ***************/ +/* Name: json.cpp Version 1.5 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */ +/* */ +/* This file contains the JSON classes functions. */ +/***********************************************************************/ + +/***********************************************************************/ +/* Include relevant sections of the MariaDB header file. */ +/***********************************************************************/ +#include + +/***********************************************************************/ +/* Include application header files: */ +/* global.h is header containing all global declarations. */ +/* plgdbsem.h is header containing the DB application declarations. */ +/* xjson.h is header containing the JSON classes declarations. */ +/***********************************************************************/ +#include "global.h" +#include "plgdbsem.h" +#include "bson.h" + +#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0) + +#if defined(__WIN__) +#define EL "\r\n" +#else +#define EL "\n" +#undef SE_CATCH // Does not work for Linux +#endif + +#if defined(SE_CATCH) +/**************************************************************************/ +/* This is the support of catching C interrupts to prevent crashes. */ +/**************************************************************************/ +#include + +class SE_Exception { +public: + SE_Exception(unsigned int n, PEXCEPTION_RECORD p) : nSE(n), eRec(p) {} + ~SE_Exception() {} + + unsigned int nSE; + PEXCEPTION_RECORD eRec; +}; // end of class SE_Exception + +void trans_func(unsigned int u, _EXCEPTION_POINTERS* pExp) { + throw SE_Exception(u, pExp->ExceptionRecord); +} // end of trans_func + +char* GetExceptionDesc(PGLOBAL g, unsigned int e); +#endif // SE_CATCH + +#if 0 +char* GetJsonNull(void); + +/***********************************************************************/ +/* IsNum: check whether this string is all digits. */ +/***********************************************************************/ +bool IsNum(PSZ s) { + for (char* p = s; *p; p++) + if (*p == ']') + break; + else if (!isdigit(*p) || *p == '-') + return false; + + return true; +} // end of IsNum + +/***********************************************************************/ +/* NextChr: return the first found '[' or Sep pointer. */ +/***********************************************************************/ +char* NextChr(PSZ s, char sep) { + char* p1 = strchr(s, '['); + char* p2 = strchr(s, sep); + + if (!p2) + return p1; + else if (p1) + return MY_MIN(p1, p2); + + return p2; +} // end of NextChr +#endif // 0 + +/* --------------------------- Class BDOC ---------------------------- */ + +/***********************************************************************/ +/* BDOC constructor. */ +/***********************************************************************/ +BDOC::BDOC(void) : jp(NULL), base(NULL), s(NULL), len(0) +{ + pty[0] = pty[1] = pty[2] = true; +} // end of BDOC constructor + +/***********************************************************************/ +/* Program for sub-allocating Bson structures. */ +/***********************************************************************/ +void* BDOC::BsonSubAlloc(PGLOBAL g, size_t size) { + PPOOLHEADER pph; /* Points on area header. */ + void* memp = g->Sarea; + + size = ((size + 3) / 4) * 4; /* Round up size to multiple of 4 */ + pph = (PPOOLHEADER)memp; + + xtrc(16, "SubAlloc in %p size=%zd used=%zd free=%zd\n", + memp, size, pph->To_Free, pph->FreeBlk); + + if (size > pph->FreeBlk) { /* Not enough memory left in pool */ + sprintf(g->Message, + "Not enough memory for request of %zd (used=%zd free=%zd)", + size, pph->To_Free, pph->FreeBlk); + xtrc(1, "BsonSubAlloc: %s\n", g->Message); + throw(1234); + } /* endif size OS32 code */ + + // Do the suballocation the simplest way + memp = MakePtr(memp, pph->To_Free); /* Points to suballocated block */ + pph->To_Free += size; /* New offset of pool free block */ + pph->FreeBlk -= size; /* New size of pool free block */ + xtrc(16, "Done memp=%p used=%zd free=%zd\n", + memp, pph->To_Free, pph->FreeBlk); + return memp; +} /* end of BsonSubAlloc */ + + + +/***********************************************************************/ +/* Parse a json string. */ +/* Note: when pretty is not known, the caller set pretty to 3. */ +/***********************************************************************/ +PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { + int i, pretty = (ptyp) ? *ptyp : 3; + bool b = false; + PBVAL bvp = NULL; + + xtrc(1, "ParseJson: s=%.10s len=%zd\n", s, len); + + if (!s || !len) { + strcpy(g->Message, "Void JSON object"); + return NULL; + } else if (comma) + *comma = false; + + // Trying to guess the pretty format + if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n'))) + pty[0] = false; + + s = js; + len = lng; + + try { + bvp = (PBVAL)PlugSubAlloc(g, NULL, sizeof(BVAL)); + bvp->Type = TYPE_UNKNOWN; + base = bvp; + + for (i = 0; i < len; i++) + switch (s[i]) { + case '[': + if (bvp->Type != TYPE_UNKNOWN) + bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); + else + bvp->To_Val = ParseArray(g, ++i); + + bvp->Type = TYPE_JAR; + break; + case '{': + if (bvp->Type != TYPE_UNKNOWN) { + bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); + bvp->Type = TYPE_JAR; + } else if ((bvp->To_Val = ParseObject(g, ++i))) + bvp->Type = TYPE_JOB; + else + throw 2; + + break; + case ' ': + case '\t': + case '\n': + case '\r': + break; + case ',': + if (bvp->Type != TYPE_UNKNOWN && (pretty == 1 || pretty == 3)) { + if (comma) + *comma = true; + + pty[0] = pty[2] = false; + break; + } // endif pretty + + sprintf(g->Message, "Unexpected ',' (pretty=%d)", pretty); + throw 3; + case '(': + b = true; + break; + case ')': + if (b) { + b = false; + break; + } // endif b + + default: + if (bvp->Type != TYPE_UNKNOWN) { + bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); + bvp->Type = TYPE_JAR; + } else if ((bvp->To_Val = MakeOff(base, ParseValue(g, i)))) + bvp->Type = TYPE_JVAL; + else + throw 4; + + break; + }; // endswitch s[i] + + if (bvp->Type == TYPE_UNKNOWN) + sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN((int)len, 50), s); + else if (ptyp && pretty == 3) { + *ptyp = 3; // Not recognized pretty + + for (i = 0; i < 3; i++) + if (pty[i]) { + *ptyp = i; + break; + } // endif pty + + } // endif ptyp + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + bvp = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + bvp = NULL; + } // end catch + + return bvp; +} // end of ParseJson + +/***********************************************************************/ +/* Parse several items as being in an array. */ +/***********************************************************************/ +OFFSET BDOC::ParseAsArray(PGLOBAL g, int& i, int pretty, int* ptyp) { + if (pty[0] && (!pretty || pretty > 2)) { + OFFSET jsp; + + if ((jsp = ParseArray(g, (i = 0))) && ptyp && pretty == 3) + *ptyp = (pty[0]) ? 0 : 3; + + return jsp; + } else + strcpy(g->Message, "More than one item in file"); + + return 0; +} // end of ParseAsArray + +/***********************************************************************/ +/* Parse a JSON Array. */ +/***********************************************************************/ +OFFSET BDOC::ParseArray(PGLOBAL g, int& i) { + int level = 0; + bool b = (!i); + PBVAL vlp, firstvlp, lastvlp; + + vlp = firstvlp = lastvlp = NULL; + + for (; i < len; i++) + switch (s[i]) { + case ',': + if (level < 2) { + sprintf(g->Message, "Unexpected ',' near %.*s", ARGS); + throw 1; + } else + level = 1; + + break; + case ']': + if (level == 1) { + sprintf(g->Message, "Unexpected ',]' near %.*s", ARGS); + throw 1; + } // endif level + + return MakeOff(base, vlp); + case '\n': + if (!b) + pty[0] = pty[1] = false; + case '\r': + case ' ': + case '\t': + break; + default: + if (level == 2) { + sprintf(g->Message, "Unexpected value near %.*s", ARGS); + throw 1; + } else if (lastvlp) { + vlp = ParseValue(g, i); + lastvlp->Next = MakeOff(base, vlp); + lastvlp = vlp; + } else + firstvlp = lastvlp = ParseValue(g, i); + + level = (b) ? 1 : 2; + break; + }; // endswitch s[i] + + if (b) { + // Case of Pretty == 0 + return MakeOff(base, vlp); + } // endif b + + throw ("Unexpected EOF in array"); +} // end of ParseArray + +/***********************************************************************/ +/* Sub-allocate and initialize a BPAIR. */ +/***********************************************************************/ +PBPR BDOC::SubAllocPair(PGLOBAL g, OFFSET key) +{ + PBPR bpp = (PBPR)BsonSubAlloc(g, sizeof(BPAIR)); + + bpp->Key = key; + bpp->Vlp = 0; + bpp->Next = 0; + return bpp; +} // end of SubAllocPair + +/***********************************************************************/ +/* Parse a JSON Object. */ +/***********************************************************************/ +OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { + OFFSET key; + int level = 0; + PBPR bpp, firstbpp, lastbpp; + + bpp = firstbpp = lastbpp = NULL; + + for (; i < len; i++) + switch (s[i]) { + case '"': + if (level < 2) { + key = ParseString(g, ++i); + bpp = SubAllocPair(g, key); + + if (lastbpp) { + lastbpp->Next = MakeOff(base, bpp); + lastbpp = bpp; + } else + firstbpp = lastbpp = bpp; + + level = 1; + } else { + sprintf(g->Message, "misplaced string near %.*s", ARGS); + throw 2; + } // endif level + + break; + case ':': + if (level == 1) { + lastbpp->Vlp = MakeOff(base, ParseValue(g, ++i)); + level = 2; + } else { + sprintf(g->Message, "Unexpected ':' near %.*s", ARGS); + throw 2; + } // endif level + + break; + case ',': + if (level < 2) { + sprintf(g->Message, "Unexpected ',' near %.*s", ARGS); + throw 2; + } else + level = 0; + + break; + case '}': + if (level < 2) { + sprintf(g->Message, "Unexpected '}' near %.*s", ARGS); + throw 2; + } // endif level + + return MakeOff(base, firstbpp); + case '\n': + pty[0] = pty[1] = false; + case '\r': + case ' ': + case '\t': + break; + default: + sprintf(g->Message, "Unexpected character '%c' near %.*s", + s[i], ARGS); + throw 2; + }; // endswitch s[i] + + strcpy(g->Message, "Unexpected EOF in Object"); + throw 2; +} // end of ParseObject + +/***********************************************************************/ +/* Sub-allocate and initialize a BVAL. */ +/***********************************************************************/ +PBVAL BDOC::SubAllocVal(PGLOBAL g) +{ + PBVAL bvp = (PBVAL)BsonSubAlloc(g, sizeof(BVAL)); + + bvp->To_Val = 0; + bvp->Nd = 0; + bvp->Type = TYPE_UNKNOWN; + bvp->Next = 0; + return bvp; +} // end of SubAllocVal + +/***********************************************************************/ +/* Parse a JSON Value. */ +/***********************************************************************/ +PBVAL BDOC::ParseValue(PGLOBAL g, int& i) { + PBVAL bvp = SubAllocVal(g); + + for (; i < len; i++) + switch (s[i]) { + case '\n': + pty[0] = pty[1] = false; + case '\r': + case ' ': + case '\t': + break; + default: + goto suite; + } // endswitch + +suite: + switch (s[i]) { + case '[': + bvp->To_Val = ParseArray(g, ++i); + bvp->Type = TYPE_JAR; + break; + case '{': + bvp->To_Val = ParseObject(g, ++i); + bvp->Type = TYPE_JOB; + break; + case '"': + // jvp->Val = AllocVal(g, TYPE_STRG); + bvp->To_Val = ParseString(g, ++i); + bvp->Type = TYPE_STRG; + break; + case 't': + if (!strncmp(s + i, "true", 4)) { + // jvp->Val = AllocVal(g, TYPE_BOOL); + bvp->B = true; + bvp->Type = TYPE_BOOL; + i += 3; + } else + goto err; + + break; + case 'f': + if (!strncmp(s + i, "false", 5)) { + // jvp->Val = AllocVal(g, TYPE_BOOL); + bvp->B = false; + bvp->Type = TYPE_BOOL; + i += 4; + } else + goto err; + + break; + case 'n': + if (!strncmp(s + i, "null", 4)) { + bvp->Type = TYPE_NULL; + i += 3; + } else + goto err; + + break; + case '-': + default: + if (s[i] == '-' || isdigit(s[i])) + ParseNumeric(g, i, bvp); + else + goto err; + + }; // endswitch s[i] + + return bvp; + +err: + sprintf(g->Message, "Unexpected character '%c' near %.*s", s[i], ARGS); + throw 3; +} // end of ParseValue + +/***********************************************************************/ +/* Unescape and parse a JSON string. */ +/***********************************************************************/ +OFFSET BDOC::ParseString(PGLOBAL g, int& i) { + uchar* p; + int n = 0; + + // Be sure of memory availability + if (((size_t)len + 1 - i) > ((PPOOLHEADER)g->Sarea)->FreeBlk) + throw("ParseString: Out of memory"); + + // The size to allocate is not known yet + p = (uchar*)PlugSubAlloc(g, NULL, 0); + + for (; i < len; i++) + switch (s[i]) { + case '"': + p[n++] = 0; + PlugSubAlloc(g, NULL, n); + return MakeOff(base, p); + case '\\': + if (++i < len) { + if (s[i] == 'u') { + if (len - i > 5) { + // if (charset == utf8) { + char xs[5]; + uint hex; + + xs[0] = s[++i]; + xs[1] = s[++i]; + xs[2] = s[++i]; + xs[3] = s[++i]; + xs[4] = 0; + hex = strtoul(xs, NULL, 16); + + if (hex < 0x80) { + p[n] = (uchar)hex; + } else if (hex < 0x800) { + p[n++] = (uchar)(0xC0 | (hex >> 6)); + p[n] = (uchar)(0x80 | (hex & 0x3F)); + } else if (hex < 0x10000) { + p[n++] = (uchar)(0xE0 | (hex >> 12)); + p[n++] = (uchar)(0x80 | ((hex >> 6) & 0x3f)); + p[n] = (uchar)(0x80 | (hex & 0x3f)); + } else + p[n] = '?'; + +#if 0 + } else { + char xs[3]; + UINT hex; + + i += 2; + xs[0] = s[++i]; + xs[1] = s[++i]; + xs[2] = 0; + hex = strtoul(xs, NULL, 16); + p[n] = (char)hex; + } // endif charset +#endif // 0 + } else + goto err; + + } else switch (s[i]) { + case 't': p[n] = '\t'; break; + case 'n': p[n] = '\n'; break; + case 'r': p[n] = '\r'; break; + case 'b': p[n] = '\b'; break; + case 'f': p[n] = '\f'; break; + default: p[n] = s[i]; break; + } // endswitch + + n++; + } else + goto err; + + break; + default: + p[n++] = s[i]; + break; +}; // endswitch s[i] + +err: +throw("Unexpected EOF in String"); +} // end of ParseString + +/***********************************************************************/ +/* Parse a JSON numeric value. */ +/***********************************************************************/ +void BDOC::ParseNumeric(PGLOBAL g, int& i, PBVAL vlp) { + char buf[50]; + int n = 0; + short nd = 0; + bool has_dot = false; + bool has_e = false; + bool found_digit = false; + + for (; i < len; i++) { + switch (s[i]) { + case '.': + if (!found_digit || has_dot || has_e) + goto err; + + has_dot = true; + break; + case 'e': + case 'E': + if (!found_digit || has_e) + goto err; + + has_e = true; + found_digit = false; + break; + case '+': + if (!has_e) + goto err; + + // fall through + case '-': + if (found_digit) + goto err; + + break; + default: + if (isdigit(s[i])) { + if (has_dot && !has_e) + nd++; // Number of decimals + + found_digit = true; + } else + goto fin; + + }; // endswitch s[i] + + buf[n++] = s[i]; + } // endfor i + +fin: + if (found_digit) { + buf[n] = 0; + + if (has_dot || has_e) { + double dv = strtod(buf, NULL); + + if (nd > 6) { + double* dvp = (double*)PlugSubAlloc(g, NULL, sizeof(double)); + + *dvp = dv; + vlp->To_Val = MakeOff(base, dvp); + vlp->Type = TYPE_DBL; + } else { + vlp->F = (float)dv; + vlp->Type = TYPE_FLOAT; + } // endif nd + + vlp->Nd = nd; + } else { + long long iv = strtoll(buf, NULL, 10); + + if (iv > INT_MAX32 || iv < INT_MIN32) { + long long *llp = (long long*)PlugSubAlloc(g, NULL, sizeof(long long)); + + *llp = iv; + vlp->To_Val = MakeOff(base, llp); + vlp->Type = TYPE_BINT; + } else { + vlp->N = (int)iv; + vlp->Type = TYPE_INTG; + } // endif iv + + } // endif has + + i--; // Unstack following character + return; + } else + throw("No digit found"); + +err: + throw("Unexpected EOF in number"); +} // end of ParseNumeric + +/***********************************************************************/ +/* Serialize a JSON document tree: */ +/***********************************************************************/ +PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty) { + PSZ str = NULL; + bool b = false, err = true; + JOUT* jp; + FILE* fs = NULL; + + g->Message[0] = 0; + + try { + if (!bvp) { + strcpy(g->Message, "Null json tree"); + throw 1; + } else if (!fn) { + // Serialize to a string + jp = new(g) JOUTSTR(g); + b = pretty == 1; + } else { + if (!(fs = fopen(fn, "wb"))) { + sprintf(g->Message, MSG(OPEN_MODE_ERROR), + "w", (int)errno, fn); + strcat(strcat(g->Message, ": "), strerror(errno)); + throw 2; + } else if (pretty >= 2) { + // Serialize to a pretty file + jp = new(g)JOUTPRT(g, fs); + } else { + // Serialize to a flat file + b = true; + jp = new(g)JOUTFILE(g, fs, pretty); + } // endif's + + } // endif's + + switch (bvp->Type) { + case TYPE_JAR: + err = SerializeArray(bvp->To_Val, b); + break; + case TYPE_JOB: + err = ((b && jp->Prty()) && jp->WriteChr('\t')); + err |= SerializeObject(bvp->To_Val); + break; + case TYPE_JVAL: + err = SerializeValue((PBVAL)MakePtr(base, bvp->To_Val)); + break; + default: + strcpy(g->Message, "Invalid json tree"); + } // endswitch Type + + if (fs) { + fputs(EL, fs); + fclose(fs); + str = (err) ? NULL : strcpy(g->Message, "Ok"); + } else if (!err) { + str = ((JOUTSTR*)jp)->Strp; + jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N); + } else { + if (!g->Message[0]) + strcpy(g->Message, "Error in Serialize"); + + } // endif's + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + str = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + str = NULL; + } // end catch + + return str; +} // end of Serialize + + +/***********************************************************************/ +/* Serialize a JSON Array. */ +/***********************************************************************/ +bool BDOC::SerializeArray(OFFSET arp, bool b) { + bool first = true; + PBVAL vp = (PBVAL)MakePtr(base, arp); + + if (b) { + if (jp->Prty()) { + if (jp->WriteChr('[')) + return true; + else if (jp->Prty() == 1 && (jp->WriteStr(EL) || jp->WriteChr('\t'))) + return true; + + } // endif Prty + + } else if (jp->WriteChr('[')) + return true; + + for (vp; vp; vp = (PBVAL)MakePtr(base, vp->Next)) { + if (first) + first = false; + else if ((!b || jp->Prty()) && jp->WriteChr(',')) + return true; + else if (b) { + if (jp->Prty() < 2 && jp->WriteStr(EL)) + return true; + else if (jp->Prty() == 1 && jp->WriteChr('\t')) + return true; + + } // endif b + + if (SerializeValue(vp)) + return true; + + } // endfor i + + if (b && jp->Prty() == 1 && jp->WriteStr(EL)) + return true; + + return ((!b || jp->Prty()) && jp->WriteChr(']')); +} // end of SerializeArray + +/***********************************************************************/ +/* Serialize a JSON Object. */ +/***********************************************************************/ +bool BDOC::SerializeObject(OFFSET obp) { + bool first = true; + PBPR prp = (PBPR)MakePtr(base, obp); + + if (jp->WriteChr('{')) + return true; + + for (prp; prp; prp = (PBPR)MakePtr(base, prp->Next)) { + if (first) + first = false; + else if (jp->WriteChr(',')) + return true; + + if (jp->WriteChr('"') || + jp->WriteStr((const char*)MakePtr(base, prp->Key)) || + jp->WriteChr('"') || + jp->WriteChr(':') || + SerializeValue((PBVAL)MakePtr(base, prp->Vlp))) + return true; + + } // endfor i + + return jp->WriteChr('}'); +} // end of SerializeObject + +/***********************************************************************/ +/* Serialize a JSON Value. */ +/***********************************************************************/ +bool BDOC::SerializeValue(PBVAL jvp) { + char buf[64]; + + switch (jvp->Type) { + case TYPE_JAR: + return SerializeArray(jvp->To_Val, false); + case TYPE_JOB: + return SerializeObject(jvp->To_Val); + case TYPE_BOOL: + return jp->WriteStr(jvp->B ? "true" : "false"); + case TYPE_STRG: + case TYPE_DTM: + return jp->Escape((const char*)MakePtr(base, jvp->To_Val)); + case TYPE_INTG: + sprintf(buf, "%d", jvp->N); + return jp->WriteStr(buf); + case TYPE_BINT: + sprintf(buf, "%lld", *(long long*)MakePtr(base, jvp->To_Val)); + return jp->WriteStr(buf); + case TYPE_FLOAT: + sprintf(buf, "%.*f", jvp->Nd, jvp->F); + return jp->WriteStr(buf); + case TYPE_DBL: + sprintf(buf, "%.*lf", jvp->Nd, *(double*)MakePtr(base, jvp->To_Val)); + return jp->WriteStr(buf); + case TYPE_NULL: + return jp->WriteStr("null"); + default: + return jp->WriteStr("???"); // TODO + } // endswitch Type + + strcpy(jp->g->Message, "Unrecognized value"); + return true; +} // end of SerializeValue + +#if 0 +/* -------------------------- Class JOBJECT -------------------------- */ + +/***********************************************************************/ +/* Return the number of pairs in this object. */ +/***********************************************************************/ +int JOBJECT::GetSize(bool b) { + int n = 0; + + for (PJPR jpp = First; jpp; jpp = jpp->Next) + // If b return only non null pairs + if (!b || jpp->Val && !jpp->Val->IsNull()) + n++; + + return n; +} // end of GetSize + +/***********************************************************************/ +/* Add a new pair to an Object. */ +/***********************************************************************/ +PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key) { + PJPR jpp = (PJPR)PlugSubAlloc(g, NULL, sizeof(JPAIR)); + + jpp->Key = key; + jpp->Next = NULL; + jpp->Val = NULL; + + if (Last) + Last->Next = jpp; + else + First = jpp; + + Last = jpp; + return jpp; +} // end of AddPair + +/***********************************************************************/ +/* Return all keys as an array. */ +/***********************************************************************/ +PJAR JOBJECT::GetKeyList(PGLOBAL g) { + PJAR jarp = new(g) JARRAY(); + + for (PJPR jpp = First; jpp; jpp = jpp->Next) + jarp->AddArrayValue(g, new(g) JVALUE(g, jpp->Key)); + + jarp->InitArray(g); + return jarp; +} // end of GetKeyList + +/***********************************************************************/ +/* Return all values as an array. */ +/***********************************************************************/ +PJAR JOBJECT::GetValList(PGLOBAL g) { + PJAR jarp = new(g) JARRAY(); + + for (PJPR jpp = First; jpp; jpp = jpp->Next) + jarp->AddArrayValue(g, jpp->Val); + + jarp->InitArray(g); + return jarp; +} // end of GetValList + +/***********************************************************************/ +/* Get the value corresponding to the given key. */ +/***********************************************************************/ +PJVAL JOBJECT::GetKeyValue(const char* key) { + for (PJPR jp = First; jp; jp = jp->Next) + if (!strcmp(jp->Key, key)) + return jp->Val; + + return NULL; +} // end of GetValue; + +/***********************************************************************/ +/* Return the text corresponding to all keys (XML like). */ +/***********************************************************************/ +PSZ JOBJECT::GetText(PGLOBAL g, PSTRG text) { + if (First) { + bool b; + + if (!text) { + text = new(g) STRING(g, 256); + b = true; + } else { + if (text->GetLastChar() != ' ') + text->Append(' '); + + b = false; + } // endif text + + if (b && !First->Next && !strcmp(First->Key, "$date")) { + int i; + PSZ s; + + First->Val->GetText(g, text); + s = text->GetStr(); + i = (s[1] == '-' ? 2 : 1); + + if (IsNum(s + i)) { + // Date is in milliseconds + int j = text->GetLength(); + + if (j >= 4 + i) { + s[j - 3] = 0; // Change it to seconds + text->SetLength((uint)strlen(s)); + } else + text->Set(" 0"); + + } // endif text + + } else for (PJPR jp = First; jp; jp = jp->Next) { + jp->Val->GetText(g, text); + + if (jp->Next) + text->Append(' '); + + } // endfor jp + + if (b) { + text->Trim(); + return text->GetStr(); + } // endif b + + } // endif First + + return NULL; +} // end of GetText; + +/***********************************************************************/ +/* Merge two objects. */ +/***********************************************************************/ +bool JOBJECT::Merge(PGLOBAL g, PJSON jsp) { + if (jsp->GetType() != TYPE_JOB) { + strcpy(g->Message, "Second argument is not an object"); + return true; + } // endif Type + + PJOB jobp = (PJOB)jsp; + + for (PJPR jpp = jobp->First; jpp; jpp = jpp->Next) + SetKeyValue(g, jpp->Val, jpp->Key); + + return false; +} // end of Marge; + +/***********************************************************************/ +/* Set or add a value corresponding to the given key. */ +/***********************************************************************/ +void JOBJECT::SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key) { + PJPR jp; + + for (jp = First; jp; jp = jp->Next) + if (!strcmp(jp->Key, key)) { + jp->Val = jvp; + break; + } // endif key + + if (!jp) { + jp = AddPair(g, key); + jp->Val = jvp; + } // endif jp + +} // end of SetValue + +/***********************************************************************/ +/* Delete a value corresponding to the given key. */ +/***********************************************************************/ +void JOBJECT::DeleteKey(PCSZ key) { + PJPR jp, * pjp = &First; + + for (jp = First; jp; jp = jp->Next) + if (!strcmp(jp->Key, key)) { + *pjp = jp->Next; + break; + } else + pjp = &jp->Next; + +} // end of DeleteKey + +/***********************************************************************/ +/* True if void or if all members are nulls. */ +/***********************************************************************/ +bool JOBJECT::IsNull(void) { + for (PJPR jp = First; jp; jp = jp->Next) + if (!jp->Val->IsNull()) + return false; + + return true; +} // end of IsNull + +/* -------------------------- Class JARRAY --------------------------- */ + +/***********************************************************************/ +/* JARRAY constructor. */ +/***********************************************************************/ +JARRAY::JARRAY(void) : JSON() { + Type = TYPE_JAR; + Size = 0; + Alloc = 0; + First = Last = NULL; + Mvals = NULL; +} // end of JARRAY constructor + +/***********************************************************************/ +/* Return the number of values in this object. */ +/***********************************************************************/ +int JARRAY::GetSize(bool b) { + if (b) { + // Return only non null values + int n = 0; + + for (PJVAL jvp = First; jvp; jvp = jvp->Next) + if (!jvp->IsNull()) + n++; + + return n; + } else + return Size; + +} // end of GetSize + +/***********************************************************************/ +/* Make the array of values from the values list. */ +/***********************************************************************/ +void JARRAY::InitArray(PGLOBAL g) { + int i; + PJVAL jvp, * pjvp = &First; + + for (Size = 0, jvp = First; jvp; jvp = jvp->Next) + if (!jvp->Del) + Size++; + + if (Size > Alloc) { + // No need to realloc after deleting values + Mvals = (PJVAL*)PlugSubAlloc(g, NULL, Size * sizeof(PJVAL)); + Alloc = Size; + } // endif Size + + for (i = 0, jvp = First; jvp; jvp = jvp->Next) + if (!jvp->Del) { + Mvals[i++] = jvp; + pjvp = &jvp->Next; + Last = jvp; + } else + *pjvp = jvp->Next; + +} // end of InitArray + +/***********************************************************************/ +/* Get the Nth value of an Array. */ +/***********************************************************************/ +PJVAL JARRAY::GetArrayValue(int i) { + if (Mvals && i >= 0 && i < Size) + return Mvals[i]; + else + return NULL; +} // end of GetValue + +/***********************************************************************/ +/* Add a Value to the Array Value list. */ +/***********************************************************************/ +PJVAL JARRAY::AddArrayValue(PGLOBAL g, PJVAL jvp, int* x) { + if (!jvp) + jvp = new(g) JVALUE; + + if (x) { + int i = 0, n = *x; + PJVAL jp, * jpp = &First; + + for (jp = First; jp && i < n; i++, jp = *(jpp = &jp->Next)); + + (*jpp) = jvp; + + if (!(jvp->Next = jp)) + Last = jvp; + + } else { + if (!First) + First = jvp; + else if (Last == First) + First->Next = Last = jvp; + else + Last->Next = jvp; + + Last = jvp; + Last->Next = NULL; + } // endif x + + return jvp; +} // end of AddValue + +/***********************************************************************/ +/* Merge two arrays. */ +/***********************************************************************/ +bool JARRAY::Merge(PGLOBAL g, PJSON jsp) { + if (jsp->GetType() != TYPE_JAR) { + strcpy(g->Message, "Second argument is not an array"); + return true; + } // endif Type + + PJAR arp = (PJAR)jsp; + + for (int i = 0; i < arp->size(); i++) + AddArrayValue(g, arp->GetArrayValue(i)); + + InitArray(g); + return false; +} // end of Merge + +/***********************************************************************/ +/* Set the nth Value of the Array Value list. */ +/***********************************************************************/ +bool JARRAY::SetArrayValue(PGLOBAL g, PJVAL jvp, int n) { + int i = 0; + PJVAL jp, * jpp = &First; + + for (jp = First; i < n; i++, jp = *(jpp = &jp->Next)) + if (!jp) + *jpp = jp = new(g) JVALUE; + + *jpp = jvp; + jvp->Next = (jp ? jp->Next : NULL); + return false; +} // end of SetValue + +/***********************************************************************/ +/* Return the text corresponding to all values. */ +/***********************************************************************/ +PSZ JARRAY::GetText(PGLOBAL g, PSTRG text) { + if (First) { + bool b; + PJVAL jp; + + if (!text) { + text = new(g) STRING(g, 256); + b = true; + } else { + if (text->GetLastChar() != ' ') + text->Append(" ("); + else + text->Append('('); + + b = false; + } + + for (jp = First; jp; jp = jp->Next) { + jp->GetText(g, text); + + if (jp->Next) + text->Append(", "); + else if (!b) + text->Append(')'); + + } // endfor jp + + if (b) { + text->Trim(); + return text->GetStr(); + } // endif b + + } // endif First + + return NULL; +} // end of GetText; + +/***********************************************************************/ +/* Delete a Value from the Arrays Value list. */ +/***********************************************************************/ +bool JARRAY::DeleteValue(int n) { + PJVAL jvp = GetArrayValue(n); + + if (jvp) { + jvp->Del = true; + return false; + } else + return true; + +} // end of DeleteValue + +/***********************************************************************/ +/* True if void or if all members are nulls. */ +/***********************************************************************/ +bool JARRAY::IsNull(void) { + for (int i = 0; i < Size; i++) + if (!Mvals[i]->IsNull()) + return false; + + return true; +} // end of IsNull + +/* -------------------------- Class JVALUE- -------------------------- */ + +/***********************************************************************/ +/* Constructor for a JVALUE. */ +/***********************************************************************/ +JVALUE::JVALUE(PJSON jsp) : JSON() { + if (jsp->GetType() == TYPE_JVAL) { + PJVAL jvp = (PJVAL)jsp; + + // Val = ((PJVAL)jsp)->GetVal(); + if (jvp->DataType == TYPE_JSON) { + Jsp = jvp->GetJsp(); + DataType = TYPE_JSON; + Nd = 0; + } else { + LLn = jvp->LLn; // Must be LLn on 32 bit machines + Nd = jvp->Nd; + DataType = jvp->DataType; + } // endelse Jsp + + } else { + Jsp = jsp; + // Val = NULL; + DataType = TYPE_JSON; + Nd = 0; + } // endif Type + + Next = NULL; + Del = false; + Type = TYPE_JVAL; +} // end of JVALUE constructor + +#if 0 +/***********************************************************************/ +/* Constructor for a JVALUE with a given string or numeric value. */ +/***********************************************************************/ +JVALUE::JVALUE(PGLOBAL g, PVL vlp) : JSON() { + Jsp = NULL; + Val = vlp; + Next = NULL; + Del = false; + Type = TYPE_JVAL; +} // end of JVALUE constructor +#endif // 0 + +/***********************************************************************/ +/* Constructor for a JVALUE with a given string or numeric value. */ +/***********************************************************************/ +JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() { + Jsp = NULL; + //Val = NULL; + SetValue(g, valp); + Next = NULL; + Del = false; + Type = TYPE_JVAL; +} // end of JVALUE constructor + +/***********************************************************************/ +/* Constructor for a given string. */ +/***********************************************************************/ +JVALUE::JVALUE(PGLOBAL g, PCSZ strp) : JSON() { + Jsp = NULL; + //Val = AllocVal(g, TYPE_STRG); + Strp = (char*)strp; + DataType = TYPE_STRG; + Nd = 0; + Next = NULL; + Del = false; + Type = TYPE_JVAL; +} // end of JVALUE constructor + +/***********************************************************************/ +/* Set or reset all Jvalue members. */ +/***********************************************************************/ +void JVALUE::Clear(void) { + Jsp = NULL; + Next = NULL; + Type = TYPE_JVAL; + Del = false; + Nd = 0; + DataType = TYPE_NULL; +} // end of Clear + +/***********************************************************************/ +/* Returns the type of the Value's value. */ +/***********************************************************************/ +JTYP JVALUE::GetValType(void) { + if (DataType == TYPE_JSON) + return Jsp->GetType(); + //else if (Val) + // return Val->Type; + else + return DataType; + +} // end of GetValType + +/***********************************************************************/ +/* Return the Value's Object value. */ +/***********************************************************************/ +PJOB JVALUE::GetObject(void) { + if (DataType == TYPE_JSON && Jsp->GetType() == TYPE_JOB) + return (PJOB)Jsp; + + return NULL; +} // end of GetObject + +/***********************************************************************/ +/* Return the Value's Array value. */ +/***********************************************************************/ +PJAR JVALUE::GetArray(void) { + if (DataType == TYPE_JSON && Jsp->GetType() == TYPE_JAR) + return (PJAR)Jsp; + + return NULL; +} // end of GetArray + +/***********************************************************************/ +/* Return the Value's as a Value class. */ +/***********************************************************************/ +PVAL JVALUE::GetValue(PGLOBAL g) { + PVAL valp = NULL; + + if (DataType != TYPE_JSON) + if (DataType == TYPE_STRG) + valp = AllocateValue(g, Strp, DataType, Nd); + else + valp = AllocateValue(g, &LLn, DataType, Nd); + + return valp; +} // end of GetValue + +/***********************************************************************/ +/* Return the Value's Integer value. */ +/***********************************************************************/ +int JVALUE::GetInteger(void) { + int n; + + switch (DataType) { + case TYPE_INTG: n = N; break; + case TYPE_DBL: n = (int)F; break; + case TYPE_DTM: + case TYPE_STRG: n = atoi(Strp); break; + case TYPE_BOOL: n = (B) ? 1 : 0; break; + case TYPE_BINT: n = (int)LLn; break; + default: + n = 0; + } // endswitch Type + + return n; +} // end of GetInteger + +/***********************************************************************/ +/* Return the Value's Big integer value. */ +/***********************************************************************/ +long long JVALUE::GetBigint(void) { + long long lln; + + switch (DataType) { + case TYPE_BINT: lln = LLn; break; + case TYPE_INTG: lln = (long long)N; break; + case TYPE_DBL: lln = (long long)F; break; + case TYPE_DTM: + case TYPE_STRG: lln = atoll(Strp); break; + case TYPE_BOOL: lln = (B) ? 1 : 0; break; + default: + lln = 0; + } // endswitch Type + + return lln; +} // end of GetBigint + +/***********************************************************************/ +/* Return the Value's Double value. */ +/***********************************************************************/ +double JVALUE::GetFloat(void) { + double d; + + switch (DataType) { + case TYPE_DBL: d = F; break; + case TYPE_BINT: d = (double)LLn; break; + case TYPE_INTG: d = (double)N; break; + case TYPE_DTM: + case TYPE_STRG: d = atof(Strp); break; + case TYPE_BOOL: d = (B) ? 1.0 : 0.0; break; + default: + d = 0.0; + } // endswitch Type + + return d; +} // end of GetFloat + +/***********************************************************************/ +/* Return the Value's String value. */ +/***********************************************************************/ +PSZ JVALUE::GetString(PGLOBAL g, char* buff) { + char buf[32]; + char* p = (buff) ? buff : buf; + + switch (DataType) { + case TYPE_DTM: + case TYPE_STRG: + p = Strp; + break; + case TYPE_INTG: + sprintf(p, "%d", N); + break; + case TYPE_BINT: + sprintf(p, "%lld", LLn); + break; + case TYPE_DBL: + sprintf(p, "%.*lf", Nd, F); + break; + case TYPE_BOOL: + p = (char*)((B) ? "true" : "false"); + break; + case TYPE_NULL: + p = (char*)"null"; + break; + default: + p = NULL; + } // endswitch Type + + + return (p == buf) ? (char*)PlugDup(g, buf) : p; +} // end of GetString + +/***********************************************************************/ +/* Return the Value's String value. */ +/***********************************************************************/ +PSZ JVALUE::GetText(PGLOBAL g, PSTRG text) { + if (DataType == TYPE_JSON) + return Jsp->GetText(g, text); + + char buff[32]; + PSZ s = (DataType == TYPE_NULL) ? NULL : GetString(g, buff); + + if (s) + text->Append(s); + else if (GetJsonNull()) + text->Append(GetJsonNull()); + + return NULL; +} // end of GetText + +void JVALUE::SetValue(PJSON jsp) { + if (DataType == TYPE_JSON && jsp->GetType() == TYPE_JVAL) { + Jsp = jsp->GetJsp(); + Nd = ((PJVAL)jsp)->Nd; + DataType = ((PJVAL)jsp)->DataType; + // Val = ((PJVAL)jsp)->GetVal(); + } else { + Jsp = jsp; + DataType = TYPE_JSON; + } // endif Type + +} // end of SetValue; + +void JVALUE::SetValue(PGLOBAL g, PVAL valp) { + //if (!Val) + // Val = AllocVal(g, TYPE_VAL); + + if (!valp || valp->IsNull()) { + DataType = TYPE_NULL; + } else switch (valp->GetType()) { + case TYPE_DATE: + if (((DTVAL*)valp)->IsFormatted()) + Strp = valp->GetCharValue(); + else { + char buf[32]; + + Strp = PlugDup(g, valp->GetCharString(buf)); + } // endif Formatted + + DataType = TYPE_DTM; + break; + case TYPE_STRING: + Strp = valp->GetCharValue(); + DataType = TYPE_STRG; + break; + case TYPE_DOUBLE: + case TYPE_DECIM: + F = valp->GetFloatValue(); + + if (IsTypeNum(valp->GetType())) + Nd = valp->GetValPrec(); + + DataType = TYPE_DBL; + break; + case TYPE_TINY: + B = valp->GetTinyValue() != 0; + DataType = TYPE_BOOL; + case TYPE_INT: + N = valp->GetIntValue(); + DataType = TYPE_INTG; + break; + case TYPE_BIGINT: + LLn = valp->GetBigintValue(); + DataType = TYPE_BINT; + break; + default: + sprintf(g->Message, "Unsupported typ %d\n", valp->GetType()); + throw(777); + } // endswitch Type + +} // end of SetValue + +/***********************************************************************/ +/* Set the Value's value as the given integer. */ +/***********************************************************************/ +void JVALUE::SetInteger(PGLOBAL g, int n) { + N = n; + DataType = TYPE_INTG; +} // end of SetInteger + +/***********************************************************************/ +/* Set the Value's Boolean value as a tiny integer. */ +/***********************************************************************/ +void JVALUE::SetBool(PGLOBAL g, bool b) { + B = b; + DataType = TYPE_BOOL; +} // end of SetTiny + +/***********************************************************************/ +/* Set the Value's value as the given big integer. */ +/***********************************************************************/ +void JVALUE::SetBigint(PGLOBAL g, long long ll) { + LLn = ll; + DataType = TYPE_BINT; +} // end of SetBigint + +/***********************************************************************/ +/* Set the Value's value as the given DOUBLE. */ +/***********************************************************************/ +void JVALUE::SetFloat(PGLOBAL g, double f) { + F = f; + Nd = 6; + DataType = TYPE_DBL; +} // end of SetFloat + +/***********************************************************************/ +/* Set the Value's value as the given string. */ +/***********************************************************************/ +void JVALUE::SetString(PGLOBAL g, PSZ s, int ci) { + Strp = s; + Nd = ci; + DataType = TYPE_STRG; +} // end of SetString + +/***********************************************************************/ +/* True when its JSON or normal value is null. */ +/***********************************************************************/ +bool JVALUE::IsNull(void) { + return (DataType == TYPE_JSON) ? Jsp->IsNull() : DataType == TYPE_NULL; +} // end of IsNull +#endif // 0 diff --git a/storage/connect/bson.h b/storage/connect/bson.h new file mode 100644 index 00000000000..7cf0820dc7a --- /dev/null +++ b/storage/connect/bson.h @@ -0,0 +1,291 @@ +#pragma once +/**************** bson H Declares Source Code File (.H) ****************/ +/* Name: bson.h Version 1.0 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2020 */ +/* */ +/* This file contains the BSON classe declares. */ +/***********************************************************************/ +#include +#include "json.h" +#include "xobject.h" + +#if defined(_DEBUG) +#define X assert(false); +#else +#define X +#endif + +class BDOC; +class BOUT; +//class JSON; + +typedef class BDOC* PBDOC; +//typedef class BJSON* PBSON; + +// BSON size should be equal on Linux and Windows +#define BMX 255 + +typedef uint OFFSET; + +/***********************************************************************/ +/* Structure JVALUE. */ +/***********************************************************************/ +typedef struct _jvalue { + union { + OFFSET To_Val; // Offset to a value + int N; // An integer value + float F; // A float value + bool B; // A boolean value True or false (0) + }; + short Nd; // Number of decimals + JTYP Type; // The value type + OFFSET Next; // Offset to the next value in array +} BVAL, *PBVAL; // end of struct BVALUE + +/***********************************************************************/ +/* Structure JPAIR. The pairs of a json Object. */ +/***********************************************************************/ +typedef struct _jpair { + OFFSET Key; // Offset to this pair key name + OFFSET Vlp; // To the value of the pair + OFFSET Next; // Offset to the next pair in object +} BPAIR, *PBPR; // end of struct BPAIR + +#if 0 +/***********************************************************************/ +/* Structure used to return binary json to Json UDF functions. */ +/* (should be moved to jsonudf.h). */ +/***********************************************************************/ +typedef struct _JsonBin { + char Msg[BMX + 1]; + char *Filename; + PGLOBAL G; + int Pretty; + ulong Reslen; + my_bool Changed; + PBSON Top; + PBSON Jsp; + PBJN Bsp; +} BJSON, *PBJN ; // end of struct BJSON + +PBJN JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp); +#endif // 0 + +char* NextChr(PSZ s, char sep); +char* GetJsonNull(void); +const char* GetFmt(int type, bool un); + +DllExport bool IsNum(PSZ s); + +/***********************************************************************/ +/* Class JDOC. The class for parsing and serializing json documents. */ +/***********************************************************************/ +class BDOC : public BLOCK { +public: + BDOC(void); + + void *BsonSubAlloc(PGLOBAL g, size_t size); + PBPR SubAllocPair(PGLOBAL g, OFFSET key); + PBVAL SubAllocVal(PGLOBAL g); + PBVAL ParseJson(PGLOBAL g, char* s, size_t n, int* prty = NULL, bool* b = NULL); + PSZ Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty); + +protected: + OFFSET ParseArray(PGLOBAL g, int& i); + OFFSET ParseObject(PGLOBAL g, int& i); + PBVAL ParseValue(PGLOBAL g, int& i); + OFFSET ParseString(PGLOBAL g, int& i); + void ParseNumeric(PGLOBAL g, int& i, PBVAL bvp); + OFFSET ParseAsArray(PGLOBAL g, int& i, int pretty, int* ptyp); + bool SerializeArray(OFFSET arp, bool b); + bool SerializeObject(OFFSET obp); + bool SerializeValue(PBVAL vp); + + // Members used when parsing and serializing +private: + JOUT* jp; // Used with serialize + void* base; // The base for making offsets or pointers + char* s; // The Json string to parse + int len; // The Json string length + bool pty[3]; // Used to guess what pretty is +}; // end of class BDOC + +#if 0 +/***********************************************************************/ +/* Class BJSON. The class handling all BSON operations. */ +/***********************************************************************/ +class BJSON : public BLOCK { +public: + // Constructor + BJSON(PBVAL vp, void* base) { Vlp = vp; Base = base; } + + // Array functions + int GetSize(bool b); + PBVAL GetArrayValue(int i); + PSZ GetText(PGLOBAL g, PSTRG text); + bool Merge(PGLOBAL g, PBVAL jsp); + bool DeleteValue(int n); + PBVAL AddArrayValue(PGLOBAL g, PBVAL jvp = NULL, int* x = NULL); + bool SetArrayValue(PGLOBAL g, PBVAL jvp, int i); + + // Object functions + int GetObjectSize(PBPR prp, bool b); + PSZ GetObjectText(PGLOBAL g, PBPR prp, PSTRG text); + bool MergeObject(PGLOBAL g, PBPR prp); + PJPR AddPair(PGLOBAL g, PCSZ key); + PJVAL GetKeyValue(const char* key); + PJAR GetKeyList(PGLOBAL g); + PJAR GetValList(PGLOBAL g); + void SetKeyValue(PGLOBAL g, PBVAL jvp, PCSZ key); + void DeleteKey(PCSZ k); + + // Value functions + PBPR GetObject(void); + PBVAL GetArray(void); + PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); } + PSZ GetValueText(PGLOBAL g, PSTRG text); + inline PJSON GetJson(void) { return (DataType == TYPE_JSON ? Jsp : this); } + PSZ GetString(PGLOBAL g, char* buff = NULL); + int GetInteger(void); + long long GetBigint(void); + double GetFloat(void); + PVAL GetValue(PGLOBAL g); + void SetValue(PJSON jsp); + void SetValue(PGLOBAL g, PVAL valp); + void SetString(PGLOBAL g, PSZ s, int ci = 0); + void SetInteger(PGLOBAL g, int n); + void SetBigint(PGLOBAL g, longlong ll); + void SetFloat(PGLOBAL g, double f); + void SetBool(PGLOBAL g, bool b); + + // Members + PBVAL Vlp; + void* Base; +}; // end of class BJSON + +/***********************************************************************/ +/* Class JOBJECT: contains a list of value pairs. */ +/***********************************************************************/ +class JOBJECT : public JSON { + friend class JDOC; + friend class JSNX; + friend class SWAP; +public: + JOBJECT(void) : JSON() { Type = TYPE_JOB; First = Last = NULL; } + JOBJECT(int i) : JSON(i) {} + + // Methods + virtual void Clear(void) { First = Last = NULL; } + virtual PJPR GetFirst(void) { return First; } + virtual int GetSize(PBPR prp, bool b); + virtual PJOB GetObject(void) { return this; } + virtual PSZ GetText(PGLOBAL g, PSTRG text); + virtual bool Merge(PGLOBAL g, PJSON jsp); + virtual bool IsNull(void); + + // Specific + PJPR AddPair(PGLOBAL g, PCSZ key); + PJVAL GetKeyValue(const char* key); + PJAR GetKeyList(PGLOBAL g); + PJAR GetValList(PGLOBAL g); + void SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key); + void DeleteKey(PCSZ k); + +protected: + PJPR First; + PJPR Last; +}; // end of class JOBJECT + +/***********************************************************************/ +/* Class JARRAY. */ +/***********************************************************************/ +class JARRAY : public JSON { + friend class SWAP; +public: + JARRAY(void); + JARRAY(int i) : JSON(i) {} + + // Methods + virtual void Clear(void) { First = Last = NULL; Size = 0; } + virtual int size(void) { return Size; } + virtual PJAR GetArray(void) { return this; } + virtual int GetSize(bool b); + virtual PJVAL GetArrayValue(int i); + virtual PSZ GetText(PGLOBAL g, PSTRG text); + virtual bool Merge(PGLOBAL g, PJSON jsp); + virtual bool DeleteValue(int n); + virtual bool IsNull(void); + + // Specific + PJVAL AddArrayValue(PGLOBAL g, PJVAL jvp = NULL, int* x = NULL); + bool SetArrayValue(PGLOBAL g, PJVAL jvp, int i); + void InitArray(PGLOBAL g); + +protected: + // Members + int Size; // The number of items in the array + int Alloc; // The Mvals allocated size + PJVAL First; // Used when constructing + PJVAL Last; // Last constructed value + PJVAL* Mvals; // Allocated when finished +}; // end of class JARRAY + +/***********************************************************************/ +/* Class JVALUE. */ +/***********************************************************************/ +class JVALUE : public JSON { + friend class JARRAY; + friend class JSNX; + friend class JSONDISC; + friend class JSONCOL; + friend class JSON; + friend class JDOC; + friend class SWAP; +public: + JVALUE(void) : JSON() { Type = TYPE_JVAL; Clear(); } + JVALUE(PJSON jsp); + JVALUE(PGLOBAL g, PVAL valp); + JVALUE(PGLOBAL g, PCSZ strp); + JVALUE(int i) : JSON(i) {} + + // Methods + virtual void Clear(void); + //virtual JTYP GetType(void) {return TYPE_JVAL;} + virtual JTYP GetValType(void); + virtual PJOB GetObject(void); + virtual PJAR GetArray(void); + virtual PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); } + virtual PSZ GetText(PGLOBAL g, PSTRG text); + virtual bool IsNull(void); + + // Specific + inline PJSON GetJson(void) { return (DataType == TYPE_JSON ? Jsp : this); } + PSZ GetString(PGLOBAL g, char* buff = NULL); + int GetInteger(void); + long long GetBigint(void); + double GetFloat(void); + PVAL GetValue(PGLOBAL g); + void SetValue(PJSON jsp); + void SetValue(PGLOBAL g, PVAL valp); + void SetString(PGLOBAL g, PSZ s, int ci = 0); + void SetInteger(PGLOBAL g, int n); + void SetBigint(PGLOBAL g, longlong ll); + void SetFloat(PGLOBAL g, double f); + void SetBool(PGLOBAL g, bool b); + +protected: + union { + PJSON Jsp; // To the json value + char* Strp; // Ptr to a string + int N; // An integer value + long long LLn; // A big integer value + double F; // A (double) float value + bool B; // True or false + }; + PJVAL Next; // Next value in array + JTYP DataType; // The data value type + int Nd; // Decimal number + bool Del; // True when deleted +}; // end of class JVALUE +#endif // 0 diff --git a/storage/connect/json.h b/storage/connect/json.h index 999b2cd8a1c..97931b173e8 100644 --- a/storage/connect/json.h +++ b/storage/connect/json.h @@ -15,18 +15,22 @@ #define X #endif -enum JTYP {TYPE_NULL = TYPE_VOID, - TYPE_STRG = TYPE_STRING, - TYPE_DBL = TYPE_DOUBLE, - TYPE_BOOL = TYPE_TINY, - TYPE_BINT = TYPE_BIGINT, - TYPE_DTM = TYPE_DATE, - TYPE_INTG = TYPE_INT, - TYPE_VAL = 12, - TYPE_JSON, - TYPE_JAR, - TYPE_JOB, - TYPE_JVAL}; +enum JTYP : short { + TYPE_NULL = TYPE_VOID, + TYPE_STRG = TYPE_STRING, + TYPE_DBL = TYPE_DOUBLE, + TYPE_BOOL = TYPE_TINY, + TYPE_BINT = TYPE_BIGINT, + TYPE_INTG = TYPE_INT, + TYPE_DTM = TYPE_DATE, + TYPE_FLOAT, + TYPE_JAR, + TYPE_JOB, + TYPE_JVAL, + TYPE_JSON, + TYPE_DEL, + TYPE_UNKNOWN +}; class JDOC; class JOUT; diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 9ec70512823..c9f0ea9239a 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -1252,9 +1252,9 @@ static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp) case TYPE_JOB: jvp = new(g) JVALUE((PJSON)vp); break; - case TYPE_VAL: - jvp = new(g) JVALUE(g, (PVAL)vp); - break; +// case TYPE_VAL: +// jvp = new(g) JVALUE(g, (PVAL)vp); +// break; case TYPE_DTM: case TYPE_STRG: jvp = new(g) JVALUE(g, (PCSZ)vp); @@ -5376,7 +5376,7 @@ char *jbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, // Get the json tree if ((jvp = jsx->GetRowValue(g, jsp, 0, false))) { - jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_VAL, jvp->GetValue(g)); + jsp = (jvp->GetJsp()) ? jvp->GetJsp() : JvalNew(g, TYPE_JVAL, jvp->GetValue(g)); if ((bsp = JbinAlloc(g, args, initid->max_length, jsp))) strcat(bsp->Msg, " item"); diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h index 69d6c644d9c..46bac66b607 100644 --- a/storage/connect/jsonudf.h +++ b/storage/connect/jsonudf.h @@ -10,7 +10,7 @@ #include "block.h" #include "osutil.h" #include "maputil.h" -#include "json.h" +#include "bson.h" #define UDF_EXEC_ARGS \ UDF_INIT*, UDF_ARGS*, char*, unsigned long*, char*, char* diff --git a/storage/connect/myutil.cpp b/storage/connect/myutil.cpp index 338a79d9455..ed25abc4bc5 100644 --- a/storage/connect/myutil.cpp +++ b/storage/connect/myutil.cpp @@ -168,10 +168,9 @@ const char *PLGtoMYSQLtype(int type, bool dbf, char v) case TYPE_BIGINT: return "BIGINT"; case TYPE_TINY: return "TINYINT"; case TYPE_DECIM: return "DECIMAL"; - default: return "CHAR(0)"; + default: return (v) ? "VARCHAR" : "CHAR"; } // endswitch mytype - return "CHAR(0)"; } // end of PLGtoMYSQLtype /************************************************************************/ diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index b395c49c95d..9a1e43dd798 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -67,7 +67,7 @@ /* This should be an option. */ /***********************************************************************/ #define MAXCOL 200 /* Default max column nb in result */ -#define TYPE_UNKNOWN 10 /* Must be greater than other types */ +#define TYPE_UNKNOWN 12 /* Must be greater than other types */ /***********************************************************************/ /* External function. */ diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index d5aa1be892d..a9aeadd7bf4 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -46,7 +46,7 @@ /* This should be an option. */ /***********************************************************************/ #define MAXCOL 200 /* Default max column nb in result */ -#define TYPE_UNKNOWN 12 /* Must be greater than other types */ +//#define TYPE_UNKNOWN 12 /* Must be greater than other types */ /***********************************************************************/ /* External functions. */ @@ -114,7 +114,7 @@ PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) /*********************************************************************/ for (i = 0, jcp = pjdc->fjcp; jcp; i++, jcp = jcp->Next) { if (jcp->Type == TYPE_UNKNOWN) - jcp->Type = TYPE_STRING; // Void column + jcp->Type = TYPE_STRG; // Void column crp = qrp->Colresp; // Column Name crp->Kdata->SetValue(jcp->Name, i); @@ -395,7 +395,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) PJAR jar; if (jvp && jvp->DataType != TYPE_JSON) { - if (JsonAllPath() && !fmt[bf]) + if (JsonAllPath() && !fmt[bf]) strcat(fmt, colname); jcol.Type = jvp->DataType; @@ -506,7 +506,7 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) } else if (JsonAllPath() && !fmt[bf]) strcat(fmt, colname); - jcol.Type = TYPE_STRING; + jcol.Type = TYPE_STRG; jcol.Len = sz; jcol.Scale = 0; jcol.Cbn = true; @@ -528,23 +528,23 @@ void JSONDISC::AddColumn(PGLOBAL g) if (jcp) { if (jcp->Type != jcol.Type) { - if (jcp->Type == TYPE_UNKNOWN || jcol.Type == TYPE_VOID) + if (jcp->Type == TYPE_UNKNOWN || jcp->Type == TYPE_NULL) jcp->Type = jcol.Type; // else if (jcol.Type != TYPE_UNKNOWN && jcol.Type != TYPE_VOID) // jcp->Type = TYPE_STRING; - else if (jcp->Type != TYPE_STRING) + else if (jcp->Type != TYPE_STRG) switch (jcol.Type) { - case TYPE_STRING: - case TYPE_DOUBLE: + case TYPE_STRG: + case TYPE_DBL: jcp->Type = jcol.Type; break; - case TYPE_BIGINT: - if (jcp->Type == TYPE_INT || jcp->Type == TYPE_TINY) + case TYPE_BINT: + if (jcp->Type == TYPE_INTG || jcp->Type == TYPE_BOOL) jcp->Type = jcol.Type; break; - case TYPE_INT: - if (jcp->Type == TYPE_TINY) + case TYPE_INTG: + if (jcp->Type == TYPE_BOOL) jcp->Type = jcol.Type; break; diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index 9994c9106ca..c254c3429de 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -35,7 +35,7 @@ typedef struct _jncol { struct _jncol *Next; char *Name; char *Fmt; - int Type; + JTYP Type; int Len; int Scale; bool Cbn; From dae4bd0b36b83cc50d827c62f02ed1b8b1aa2045 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 21 Nov 2020 23:14:06 +0100 Subject: [PATCH 028/150] Fix xml.test failure. Fix compile error modified json.h --- storage/connect/json.h | 3 + .../connect/mysql-test/connect/r/xml.result | 138 +++++++++--------- storage/connect/mysql-test/connect/t/xml.test | 46 +++--- 3 files changed, 95 insertions(+), 92 deletions(-) diff --git a/storage/connect/json.h b/storage/connect/json.h index 97931b173e8..c5251af01a9 100644 --- a/storage/connect/json.h +++ b/storage/connect/json.h @@ -15,6 +15,9 @@ #define X #endif +// Required by some compilers +enum JTYP : short; + enum JTYP : short { TYPE_NULL = TYPE_VOID, TYPE_STRG = TYPE_STRING, diff --git a/storage/connect/mysql-test/connect/r/xml.result b/storage/connect/mysql-test/connect/r/xml.result index 92e1abb615d..99739b1ec10 100644 --- a/storage/connect/mysql-test/connect/r/xml.result +++ b/storage/connect/mysql-test/connect/r/xml.result @@ -85,9 +85,9 @@ DROP TABLE t1; # Testing mixed tag and attribute values # CREATE TABLE t1 ( -ISBN CHAR(15) XPATH='@', -LANG CHAR(2) XPATH='@', -SUBJECT CHAR(32) XPATH='@', +ISBN CHAR(15) FIELD_FORMAT='@', +LANG CHAR(2) FIELD_FORMAT='@', +SUBJECT CHAR(32) FIELD_FORMAT='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -118,9 +118,9 @@ DROP TABLE t1; # Testing INSERT on mixed tag and attribute values # CREATE TABLE t1 ( -ISBN CHAR(15) XPATH='@', -LANG CHAR(2) XPATH='@', -SUBJECT CHAR(32) XPATH='@', +ISBN CHAR(15) FIELD_FORMAT='@', +LANG CHAR(2) FIELD_FORMAT='@', +SUBJECT CHAR(32) FIELD_FORMAT='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -158,65 +158,65 @@ TRANSLATOR NULL PUBLISHER Eyrolles Paris DATEPUB 1998 SELECT LOAD_FILE('MYSQLD_DATADIR/test/xsample2.xml') AS xml; -xml - - - - Jean-Christophe - Bernadac - - - François - Knab - - Construire une application XML - - Eyrolles - Paris - - 1999 - - - - William J. - Pardi - - - James - Guerin - - XML en Action - - Microsoft Press - Paris - - 1999 - - - Alain Michard - XML, Langage et Applications - Eyrolles Paris - 1998 - - +xml + + + + Jean-Christophe + Bernadac + + + François + Knab + + Construire une application XML + + Eyrolles + Paris + + 1999 + + + + William J. + Pardi + + + James + Guerin + + XML en Action + + Microsoft Press + Paris + + 1999 + + + Alain Michard + XML, Langage et Applications + Eyrolles Paris + 1998 + + DROP TABLE t1; # # Testing XPath # CREATE TABLE t1 ( -isbn CHAR(15) XPATH='@ISBN', -language CHAR(2) XPATH='@LANG', -subject CHAR(32) XPATH='@SUBJECT', -authorfn CHAR(20) XPATH='AUTHOR/FIRSTNAME', -authorln CHAR(20) XPATH='AUTHOR/LASTNAME', -title CHAR(32) XPATH='TITLE', -translated CHAR(32) XPATH='TRANSLATOR/@PREFIX', -tranfn CHAR(20) XPATH='TRANSLATOR/FIRSTNAME', -tranln CHAR(20) XPATH='TRANSLATOR/LASTNAME', -publisher CHAR(20) XPATH='PUBLISHER/NAME', -location CHAR(20) XPATH='PUBLISHER/PLACE', -year INT(4) XPATH='DATEPUB' +isbn CHAR(15) FIELD_FORMAT='@ISBN', +language CHAR(2) FIELD_FORMAT='@LANG', +subject CHAR(32) FIELD_FORMAT='@SUBJECT', +authorfn CHAR(20) FIELD_FORMAT='AUTHOR/FIRSTNAME', +authorln CHAR(20) FIELD_FORMAT='AUTHOR/LASTNAME', +title CHAR(32) FIELD_FORMAT='TITLE', +translated CHAR(32) FIELD_FORMAT='TRANSLATOR/@PREFIX', +tranfn CHAR(20) FIELD_FORMAT='TRANSLATOR/FIRSTNAME', +tranln CHAR(20) FIELD_FORMAT='TRANSLATOR/LASTNAME', +publisher CHAR(20) FIELD_FORMAT='PUBLISHER/NAME', +location CHAR(20) FIELD_FORMAT='PUBLISHER/PLACE', +year INT(4) FIELD_FORMAT='DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=domdoc'; SELECT * FROM t1; @@ -258,7 +258,7 @@ DROP TABLE t1; # CREATE TABLE t1 ( -isbn CHAR(15) XPATH='@isbn' +isbn CHAR(15) FIELD_FORMAT='@isbn' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=domdoc'; SELECT * FROM t1; @@ -323,7 +323,7 @@ HEX(c) 3F3F3F3F3F3F3F Warnings: Level Warning Code 1366 -Message Incorrect string value: '\xC3\x81\xC3\x82\xC3\x83...' for column `test`.`t1`.`c` at row 1 +Message Incorrect string value: '\xC3\x81\xC3\x82\xC3\x83...' for column 'c' at row 1 Level Warning Code 1105 Message Out of range value ÁÂÃÄÅÆÇ for column 'c' at row 1 @@ -374,7 +374,7 @@ INSERT INTO t1 VALUES (_cp1251 0xC0C1C2C3); Warnings: Level Warning Code 1105 -Message Com error: Unable to save character to 'iso-8859-1' encoding. +Message Com error: Unable to save character to 'iso-8859-1' encoding. INSERT INTO t1 VALUES ('&<>"\''); SELECT node, hex(node) FROM t1; @@ -383,11 +383,11 @@ hex(node) 263C3E2227 DROP TABLE t1; SET @a=LOAD_FILE('MYSQLD_DATADIR/test/t1.xml'); SELECT CAST(@a AS CHAR CHARACTER SET latin1); -CAST(@a AS CHAR CHARACTER SET latin1) - - - - &<>"' - - +CAST(@a AS CHAR CHARACTER SET latin1) + + + + &<>"' + + diff --git a/storage/connect/mysql-test/connect/t/xml.test b/storage/connect/mysql-test/connect/t/xml.test index 669fa3f64dc..0fdf8e90b6e 100644 --- a/storage/connect/mysql-test/connect/t/xml.test +++ b/storage/connect/mysql-test/connect/t/xml.test @@ -77,9 +77,9 @@ DROP TABLE t1; --echo # Testing mixed tag and attribute values --echo # CREATE TABLE t1 ( - ISBN CHAR(15) XPATH='@', - LANG CHAR(2) XPATH='@', - SUBJECT CHAR(32) XPATH='@', + ISBN CHAR(15) FIELD_FORMAT='@', + LANG CHAR(2) FIELD_FORMAT='@', + SUBJECT CHAR(32) FIELD_FORMAT='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -98,9 +98,9 @@ DROP TABLE t1; --copy_file $MTR_SUITE_DIR/std_data/xsample.xml $MYSQLD_DATADIR/test/xsample2.xml --chmod 0644 $MYSQLD_DATADIR/test/xsample2.xml CREATE TABLE t1 ( - ISBN CHAR(15) XPATH='@', - LANG CHAR(2) XPATH='@', - SUBJECT CHAR(32) XPATH='@', + ISBN CHAR(15) FIELD_FORMAT='@', + LANG CHAR(2) FIELD_FORMAT='@', + SUBJECT CHAR(32) FIELD_FORMAT='@', AUTHOR CHAR(50), TITLE CHAR(32), TRANSLATOR CHAR(40), @@ -123,18 +123,18 @@ DROP TABLE t1; --echo # Testing XPath --echo # CREATE TABLE t1 ( - isbn CHAR(15) XPATH='@ISBN', - language CHAR(2) XPATH='@LANG', - subject CHAR(32) XPATH='@SUBJECT', - authorfn CHAR(20) XPATH='AUTHOR/FIRSTNAME', - authorln CHAR(20) XPATH='AUTHOR/LASTNAME', - title CHAR(32) XPATH='TITLE', - translated CHAR(32) XPATH='TRANSLATOR/@PREFIX', - tranfn CHAR(20) XPATH='TRANSLATOR/FIRSTNAME', - tranln CHAR(20) XPATH='TRANSLATOR/LASTNAME', - publisher CHAR(20) XPATH='PUBLISHER/NAME', - location CHAR(20) XPATH='PUBLISHER/PLACE', - year INT(4) XPATH='DATEPUB' + isbn CHAR(15) FIELD_FORMAT='@ISBN', + language CHAR(2) FIELD_FORMAT='@LANG', + subject CHAR(32) FIELD_FORMAT='@SUBJECT', + authorfn CHAR(20) FIELD_FORMAT='AUTHOR/FIRSTNAME', + authorln CHAR(20) FIELD_FORMAT='AUTHOR/LASTNAME', + title CHAR(32) FIELD_FORMAT='TITLE', + translated CHAR(32) FIELD_FORMAT='TRANSLATOR/@PREFIX', + tranfn CHAR(20) FIELD_FORMAT='TRANSLATOR/FIRSTNAME', + tranln CHAR(20) FIELD_FORMAT='TRANSLATOR/LASTNAME', + publisher CHAR(20) FIELD_FORMAT='PUBLISHER/NAME', + location CHAR(20) FIELD_FORMAT='PUBLISHER/PLACE', + year INT(4) FIELD_FORMAT='DATEPUB' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=domdoc'; SELECT * FROM t1; @@ -150,8 +150,8 @@ DROP TABLE t1; #--echo # Relative paths are not supported #--echo # #CREATE TABLE t1 ( -# authorfn CHAR(20) XPATH='//FIRSTNAME', -# authorln CHAR(20) XPATH='//LASTNAME' +# authorfn CHAR(20) FIELD_FORMAT='//FIRSTNAME', +# authorln CHAR(20) FIELD_FORMAT='//LASTNAME' #) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' # TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1'; #SELECT * FROM t1; @@ -165,8 +165,8 @@ DROP TABLE t1; #--echo # Absolute path is not supported #--echo # #CREATE TABLE t1 ( -# authorfn CHAR(20) XPATH='/BIBLIO/BOOK/AUTHOR/FIRSTNAME', -# authorln CHAR(20) XPATH='/BIBLIO/BOOK/AUTHOR/LASTNAME' +# authorfn CHAR(20) FIELD_FORMAT='/BIBLIO/BOOK/AUTHOR/FIRSTNAME', +# authorln CHAR(20) FIELD_FORMAT='/BIBLIO/BOOK/AUTHOR/LASTNAME' #) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' # TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1'; #SELECT * FROM t1; @@ -178,7 +178,7 @@ DROP TABLE t1; --echo # CREATE TABLE t1 ( - isbn CHAR(15) XPATH='@isbn' + isbn CHAR(15) FIELD_FORMAT='@isbn' ) ENGINE=CONNECT TABLE_TYPE=XML FILE_NAME='xsample.xml' TABNAME='BIBLIO' OPTION_LIST='rownode=BOOK,skipnull=1,xmlsup=domdoc'; SELECT * FROM t1; From dc8f914c383366d11b6a995ba184b99d5ec663cf Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 25 Nov 2020 12:56:45 +0100 Subject: [PATCH 029/150] Remove based enum not accepted by most gcc compilers --- storage/connect/bson.cpp | 1026 +++++++++++++----------- storage/connect/bson.h | 187 ++--- storage/connect/json.h | 41 +- storage/connect/jsonudf.cpp | 1476 ++++++++++++++++++++++++++++++++++- storage/connect/jsonudf.h | 114 ++- storage/connect/tabjson.cpp | 2 + storage/connect/value.cpp | 2 +- 7 files changed, 2275 insertions(+), 573 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index 86825f5808e..2f380752c0d 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -21,8 +21,6 @@ #include "plgdbsem.h" #include "bson.h" -#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0) - #if defined(__WIN__) #define EL "\r\n" #else @@ -89,43 +87,14 @@ char* NextChr(PSZ s, char sep) { /***********************************************************************/ /* BDOC constructor. */ /***********************************************************************/ -BDOC::BDOC(void) : jp(NULL), base(NULL), s(NULL), len(0) +BDOC::BDOC(void *base) : BJSON(base, NULL) { + jp = NULL; + s = NULL; + len = 0; pty[0] = pty[1] = pty[2] = true; } // end of BDOC constructor -/***********************************************************************/ -/* Program for sub-allocating Bson structures. */ -/***********************************************************************/ -void* BDOC::BsonSubAlloc(PGLOBAL g, size_t size) { - PPOOLHEADER pph; /* Points on area header. */ - void* memp = g->Sarea; - - size = ((size + 3) / 4) * 4; /* Round up size to multiple of 4 */ - pph = (PPOOLHEADER)memp; - - xtrc(16, "SubAlloc in %p size=%zd used=%zd free=%zd\n", - memp, size, pph->To_Free, pph->FreeBlk); - - if (size > pph->FreeBlk) { /* Not enough memory left in pool */ - sprintf(g->Message, - "Not enough memory for request of %zd (used=%zd free=%zd)", - size, pph->To_Free, pph->FreeBlk); - xtrc(1, "BsonSubAlloc: %s\n", g->Message); - throw(1234); - } /* endif size OS32 code */ - - // Do the suballocation the simplest way - memp = MakePtr(memp, pph->To_Free); /* Points to suballocated block */ - pph->To_Free += size; /* New offset of pool free block */ - pph->FreeBlk -= size; /* New size of pool free block */ - xtrc(16, "Done memp=%p used=%zd free=%zd\n", - memp, pph->To_Free, pph->FreeBlk); - return memp; -} /* end of BsonSubAlloc */ - - - /***********************************************************************/ /* Parse a json string. */ /* Note: when pretty is not known, the caller set pretty to 3. */ @@ -133,9 +102,10 @@ void* BDOC::BsonSubAlloc(PGLOBAL g, size_t size) { PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { int i, pretty = (ptyp) ? *ptyp : 3; bool b = false; - PBVAL bvp = NULL; - xtrc(1, "ParseJson: s=%.10s len=%zd\n", s, len); + s = js; + len = lng; + xtrc(1, "BDOC::ParseJson: s=%.10s len=%zd\n", s, len); if (!s || !len) { strcpy(g->Message, "Void JSON object"); @@ -147,30 +117,26 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n'))) pty[0] = false; - s = js; - len = lng; - try { - bvp = (PBVAL)PlugSubAlloc(g, NULL, sizeof(BVAL)); - bvp->Type = TYPE_UNKNOWN; - base = bvp; + Bvp = SubAllocVal(g); + Bvp->Type = TYPE_UNKNOWN; for (i = 0; i < len; i++) switch (s[i]) { case '[': - if (bvp->Type != TYPE_UNKNOWN) - bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); + if (Bvp->Type != TYPE_UNKNOWN) + Bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); else - bvp->To_Val = ParseArray(g, ++i); + Bvp->To_Val = ParseArray(g, ++i); - bvp->Type = TYPE_JAR; + Bvp->Type = TYPE_JAR; break; case '{': - if (bvp->Type != TYPE_UNKNOWN) { - bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); - bvp->Type = TYPE_JAR; - } else if ((bvp->To_Val = ParseObject(g, ++i))) - bvp->Type = TYPE_JOB; + if (Bvp->Type != TYPE_UNKNOWN) { + Bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); + Bvp->Type = TYPE_JAR; + } else if ((Bvp->To_Val = ParseObject(g, ++i))) + Bvp->Type = TYPE_JOB; else throw 2; @@ -181,7 +147,7 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { case '\r': break; case ',': - if (bvp->Type != TYPE_UNKNOWN && (pretty == 1 || pretty == 3)) { + if (Bvp->Type != TYPE_UNKNOWN && (pretty == 1 || pretty == 3)) { if (comma) *comma = true; @@ -201,18 +167,18 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { } // endif b default: - if (bvp->Type != TYPE_UNKNOWN) { - bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); - bvp->Type = TYPE_JAR; - } else if ((bvp->To_Val = MakeOff(base, ParseValue(g, i)))) - bvp->Type = TYPE_JVAL; + if (Bvp->Type != TYPE_UNKNOWN) { + Bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); + Bvp->Type = TYPE_JAR; + } else if ((Bvp->To_Val = MOF(ParseValue(g, i)))) + Bvp->Type = TYPE_JVAL; else throw 4; break; }; // endswitch s[i] - if (bvp->Type == TYPE_UNKNOWN) + if (Bvp->Type == TYPE_UNKNOWN) sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN((int)len, 50), s); else if (ptyp && pretty == 3) { *ptyp = 3; // Not recognized pretty @@ -228,13 +194,13 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { } catch (int n) { if (trace(1)) htrc("Exception %d: %s\n", n, g->Message); - bvp = NULL; + Bvp = NULL; } catch (const char* msg) { strcpy(g->Message, msg); - bvp = NULL; + Bvp = NULL; } // end catch - return bvp; + return Bvp; } // end of ParseJson /***********************************************************************/ @@ -280,7 +246,7 @@ OFFSET BDOC::ParseArray(PGLOBAL g, int& i) { throw 1; } // endif level - return MakeOff(base, vlp); + return MOF(firstvlp); case '\n': if (!b) pty[0] = pty[1] = false; @@ -294,7 +260,7 @@ OFFSET BDOC::ParseArray(PGLOBAL g, int& i) { throw 1; } else if (lastvlp) { vlp = ParseValue(g, i); - lastvlp->Next = MakeOff(base, vlp); + lastvlp->Next = MOF(vlp); lastvlp = vlp; } else firstvlp = lastvlp = ParseValue(g, i); @@ -305,25 +271,12 @@ OFFSET BDOC::ParseArray(PGLOBAL g, int& i) { if (b) { // Case of Pretty == 0 - return MakeOff(base, vlp); + return MOF(firstvlp); } // endif b throw ("Unexpected EOF in array"); } // end of ParseArray -/***********************************************************************/ -/* Sub-allocate and initialize a BPAIR. */ -/***********************************************************************/ -PBPR BDOC::SubAllocPair(PGLOBAL g, OFFSET key) -{ - PBPR bpp = (PBPR)BsonSubAlloc(g, sizeof(BPAIR)); - - bpp->Key = key; - bpp->Vlp = 0; - bpp->Next = 0; - return bpp; -} // end of SubAllocPair - /***********************************************************************/ /* Parse a JSON Object. */ /***********************************************************************/ @@ -342,7 +295,7 @@ OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { bpp = SubAllocPair(g, key); if (lastbpp) { - lastbpp->Next = MakeOff(base, bpp); + lastbpp->Next = MOF(bpp); lastbpp = bpp; } else firstbpp = lastbpp = bpp; @@ -356,7 +309,7 @@ OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { break; case ':': if (level == 1) { - lastbpp->Vlp = MakeOff(base, ParseValue(g, ++i)); + lastbpp->Vlp = MOF(ParseValue(g, ++i)); level = 2; } else { sprintf(g->Message, "Unexpected ':' near %.*s", ARGS); @@ -378,7 +331,7 @@ OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { throw 2; } // endif level - return MakeOff(base, firstbpp); + return MOF(firstbpp); case '\n': pty[0] = pty[1] = false; case '\r': @@ -395,20 +348,6 @@ OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { throw 2; } // end of ParseObject -/***********************************************************************/ -/* Sub-allocate and initialize a BVAL. */ -/***********************************************************************/ -PBVAL BDOC::SubAllocVal(PGLOBAL g) -{ - PBVAL bvp = (PBVAL)BsonSubAlloc(g, sizeof(BVAL)); - - bvp->To_Val = 0; - bvp->Nd = 0; - bvp->Type = TYPE_UNKNOWN; - bvp->Next = 0; - return bvp; -} // end of SubAllocVal - /***********************************************************************/ /* Parse a JSON Value. */ /***********************************************************************/ @@ -505,7 +444,7 @@ OFFSET BDOC::ParseString(PGLOBAL g, int& i) { case '"': p[n++] = 0; PlugSubAlloc(g, NULL, n); - return MakeOff(base, p); + return MOF(p); case '\\': if (++i < len) { if (s[i] == 'u') { @@ -634,7 +573,7 @@ fin: double* dvp = (double*)PlugSubAlloc(g, NULL, sizeof(double)); *dvp = dv; - vlp->To_Val = MakeOff(base, dvp); + vlp->To_Val = MOF(dvp); vlp->Type = TYPE_DBL; } else { vlp->F = (float)dv; @@ -643,13 +582,13 @@ fin: vlp->Nd = nd; } else { - long long iv = strtoll(buf, NULL, 10); + longlong iv = strtoll(buf, NULL, 10); if (iv > INT_MAX32 || iv < INT_MIN32) { - long long *llp = (long long*)PlugSubAlloc(g, NULL, sizeof(long long)); + longlong *llp = (longlong*)PlugSubAlloc(g, NULL, sizeof(longlong)); *llp = iv; - vlp->To_Val = MakeOff(base, llp); + vlp->To_Val = MOF(llp); vlp->Type = TYPE_BINT; } else { vlp->N = (int)iv; @@ -668,12 +607,11 @@ err: } // end of ParseNumeric /***********************************************************************/ -/* Serialize a JSON document tree: */ +/* Serialize a BJSON document tree: */ /***********************************************************************/ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty) { PSZ str = NULL; bool b = false, err = true; - JOUT* jp; FILE* fs = NULL; g->Message[0] = 0; @@ -712,7 +650,7 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty) { err |= SerializeObject(bvp->To_Val); break; case TYPE_JVAL: - err = SerializeValue((PBVAL)MakePtr(base, bvp->To_Val)); + err = SerializeValue(MVP(bvp->To_Val)); break; default: strcpy(g->Message, "Invalid json tree"); @@ -750,7 +688,7 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty) { /***********************************************************************/ bool BDOC::SerializeArray(OFFSET arp, bool b) { bool first = true; - PBVAL vp = (PBVAL)MakePtr(base, arp); + PBVAL vp = MVP(arp); if (b) { if (jp->Prty()) { @@ -764,7 +702,7 @@ bool BDOC::SerializeArray(OFFSET arp, bool b) { } else if (jp->WriteChr('[')) return true; - for (vp; vp; vp = (PBVAL)MakePtr(base, vp->Next)) { + for (vp; vp; vp = MVP(vp->Next)) { if (first) first = false; else if ((!b || jp->Prty()) && jp->WriteChr(',')) @@ -780,7 +718,7 @@ bool BDOC::SerializeArray(OFFSET arp, bool b) { if (SerializeValue(vp)) return true; - } // endfor i + } // endfor vp if (b && jp->Prty() == 1 && jp->WriteStr(EL)) return true; @@ -793,22 +731,22 @@ bool BDOC::SerializeArray(OFFSET arp, bool b) { /***********************************************************************/ bool BDOC::SerializeObject(OFFSET obp) { bool first = true; - PBPR prp = (PBPR)MakePtr(base, obp); + PBPR prp = MPP(obp); if (jp->WriteChr('{')) return true; - for (prp; prp; prp = (PBPR)MakePtr(base, prp->Next)) { + for (prp; prp; prp = MPP(prp->Next)) { if (first) first = false; else if (jp->WriteChr(',')) return true; if (jp->WriteChr('"') || - jp->WriteStr((const char*)MakePtr(base, prp->Key)) || + jp->WriteStr(MZP(prp->Key)) || jp->WriteChr('"') || jp->WriteChr(':') || - SerializeValue((PBVAL)MakePtr(base, prp->Vlp))) + SerializeValue(MVP(prp->Vlp))) return true; } // endfor i @@ -831,18 +769,18 @@ bool BDOC::SerializeValue(PBVAL jvp) { return jp->WriteStr(jvp->B ? "true" : "false"); case TYPE_STRG: case TYPE_DTM: - return jp->Escape((const char*)MakePtr(base, jvp->To_Val)); + return jp->Escape(MZP(jvp->To_Val)); case TYPE_INTG: sprintf(buf, "%d", jvp->N); return jp->WriteStr(buf); case TYPE_BINT: - sprintf(buf, "%lld", *(long long*)MakePtr(base, jvp->To_Val)); + sprintf(buf, "%lld", *(longlong*)MakePtr(Base, jvp->To_Val)); return jp->WriteStr(buf); case TYPE_FLOAT: sprintf(buf, "%.*f", jvp->Nd, jvp->F); return jp->WriteStr(buf); case TYPE_DBL: - sprintf(buf, "%.*lf", jvp->Nd, *(double*)MakePtr(base, jvp->To_Val)); + sprintf(buf, "%.*lf", jvp->Nd, *(double*)MakePtr(Base, jvp->To_Val)); return jp->WriteStr(buf); case TYPE_NULL: return jp->WriteStr("null"); @@ -854,84 +792,139 @@ bool BDOC::SerializeValue(PBVAL jvp) { return true; } // end of SerializeValue -#if 0 -/* -------------------------- Class JOBJECT -------------------------- */ +/* --------------------------- Class BJSON --------------------------- */ + +/***********************************************************************/ +/* Program for sub-allocating Bjson structures. */ +/***********************************************************************/ +void* BJSON::BsonSubAlloc(PGLOBAL g, size_t size) +{ + PPOOLHEADER pph; /* Points on area header. */ + void* memp = g->Sarea; + + size = ((size + 3) / 4) * 4; /* Round up size to multiple of 4 */ + pph = (PPOOLHEADER)memp; + + xtrc(16, "SubAlloc in %p size=%zd used=%zd free=%zd\n", + memp, size, pph->To_Free, pph->FreeBlk); + + if (size > pph->FreeBlk) { /* Not enough memory left in pool */ + sprintf(g->Message, + "Not enough memory for request of %zd (used=%zd free=%zd)", + size, pph->To_Free, pph->FreeBlk); + xtrc(1, "BsonSubAlloc: %s\n", g->Message); + throw(1234); + } /* endif size OS32 code */ + + // Do the suballocation the simplest way + memp = MakePtr(memp, pph->To_Free); /* Points to suballocated block */ + pph->To_Free += size; /* New offset of pool free block */ + pph->FreeBlk -= size; /* New size of pool free block */ + xtrc(16, "Done memp=%p used=%zd free=%zd\n", + memp, pph->To_Free, pph->FreeBlk); + return memp; +} /* end of BsonSubAlloc */ + +/* ------------------------ Bobject functions ------------------------ */ + +/***********************************************************************/ +/* Sub-allocate and initialize a BPAIR. */ +/***********************************************************************/ +PBPR BJSON::SubAllocPair(PGLOBAL g, OFFSET key, OFFSET val) +{ + PBPR bpp = (PBPR)BsonSubAlloc(g, sizeof(BPAIR)); + + bpp->Key = key; + bpp->Vlp = val; + bpp->Next = 0; + return bpp; +} // end of SubAllocPair /***********************************************************************/ /* Return the number of pairs in this object. */ /***********************************************************************/ -int JOBJECT::GetSize(bool b) { +int BJSON::GetObjectSize(PBPR bop, bool b) +{ int n = 0; - for (PJPR jpp = First; jpp; jpp = jpp->Next) + for (PBPR brp = bop; brp; brp = MPP(brp->Next)) // If b return only non null pairs - if (!b || jpp->Val && !jpp->Val->IsNull()) + if (!b || (brp->Vlp && (MVP(brp->Vlp))->Type != TYPE_NULL)) n++; return n; -} // end of GetSize +} // end of GetObjectSize /***********************************************************************/ -/* Add a new pair to an Object. */ +/* Add a new pair to an Object and return it. */ /***********************************************************************/ -PJPR JOBJECT::AddPair(PGLOBAL g, PCSZ key) { - PJPR jpp = (PJPR)PlugSubAlloc(g, NULL, sizeof(JPAIR)); +PBPR BJSON::AddPair(PGLOBAL g, PBPR bop, PSZ key, OFFSET val) +{ + PBPR brp, nrp = SubAllocPair(g, MOF(key), val); - jpp->Key = key; - jpp->Next = NULL; - jpp->Val = NULL; + if (bop) { + for (brp = bop; brp->Next; brp = MPP(brp->Next)); - if (Last) - Last->Next = jpp; - else - First = jpp; + brp->Next = MOF(nrp); + } else + bop = nrp; - Last = jpp; - return jpp; + return bop; } // end of AddPair /***********************************************************************/ -/* Return all keys as an array. */ +/* Return all object keys as an array. */ /***********************************************************************/ -PJAR JOBJECT::GetKeyList(PGLOBAL g) { - PJAR jarp = new(g) JARRAY(); +PBVAL BJSON::GetKeyList(PGLOBAL g, PBPR bop) +{ + PBVAL bvp, lvp, fvp = NULL; - for (PJPR jpp = First; jpp; jpp = jpp->Next) - jarp->AddArrayValue(g, new(g) JVALUE(g, jpp->Key)); + for (PBPR brp = bop; brp; brp = MPP(brp->Next)) + if (fvp) { + bvp = SubAllocVal(g, brp->Key, TYPE_STRG); + lvp->Next = MOF(bvp); + lvp = bvp; + } else + lvp = fvp = SubAllocVal(g, brp->Key, TYPE_STRG); - jarp->InitArray(g); - return jarp; + return fvp; } // end of GetKeyList /***********************************************************************/ -/* Return all values as an array. */ +/* Return all object values as an array. */ /***********************************************************************/ -PJAR JOBJECT::GetValList(PGLOBAL g) { - PJAR jarp = new(g) JARRAY(); +PBVAL BJSON::GetObjectValList(PGLOBAL g, PBPR bop) +{ + PBVAL bvp, lvp, fvp = NULL; - for (PJPR jpp = First; jpp; jpp = jpp->Next) - jarp->AddArrayValue(g, jpp->Val); + for (PBPR brp = bop; brp; brp = MPP(brp->Next)) + if (fvp) { + bvp = DupVal(g, MVP(brp->Vlp)); + lvp->Next = MOF(bvp); + lvp = bvp; + } else + lvp = fvp = DupVal(g, MVP(brp->Vlp)); - jarp->InitArray(g); - return jarp; -} // end of GetValList + return fvp; +} // end of GetObjectValList /***********************************************************************/ /* Get the value corresponding to the given key. */ /***********************************************************************/ -PJVAL JOBJECT::GetKeyValue(const char* key) { - for (PJPR jp = First; jp; jp = jp->Next) - if (!strcmp(jp->Key, key)) - return jp->Val; +PBVAL BJSON::GetKeyValue(PBPR bop, PSZ key) +{ + for (PBPR brp = bop; brp; brp = MPP(brp->Next)) + if (!strcmp(MZP(brp->Key), key)) + return MVP(brp->Vlp); return NULL; -} // end of GetValue; +} // end of GetKeyValue; /***********************************************************************/ /* Return the text corresponding to all keys (XML like). */ /***********************************************************************/ -PSZ JOBJECT::GetText(PGLOBAL g, PSTRG text) { - if (First) { +PSZ BJSON::GetObjectText(PGLOBAL g, PBPR bop, PSTRG text) { + if (bop) { bool b; if (!text) { @@ -944,7 +937,8 @@ PSZ JOBJECT::GetText(PGLOBAL g, PSTRG text) { b = false; } // endif text - if (b && !First->Next && !strcmp(First->Key, "$date")) { +#if 0 + if (b && !bop->Next && !strcmp(MZP(bop->Key), "$date")) { int i; PSZ s; @@ -964,228 +958,211 @@ PSZ JOBJECT::GetText(PGLOBAL g, PSTRG text) { } // endif text - } else for (PJPR jp = First; jp; jp = jp->Next) { - jp->Val->GetText(g, text); + } else +#endif // 0 - if (jp->Next) + for (PBPR brp = bop; brp; brp = MPP(brp->Next)) { + GetValueText(g, MVP(brp->Vlp), text); + + if (brp->Next) text->Append(' '); - } // endfor jp + } // endfor brp if (b) { text->Trim(); return text->GetStr(); } // endif b - } // endif First + } // endif bop return NULL; -} // end of GetText; - -/***********************************************************************/ -/* Merge two objects. */ -/***********************************************************************/ -bool JOBJECT::Merge(PGLOBAL g, PJSON jsp) { - if (jsp->GetType() != TYPE_JOB) { - strcpy(g->Message, "Second argument is not an object"); - return true; - } // endif Type - - PJOB jobp = (PJOB)jsp; - - for (PJPR jpp = jobp->First; jpp; jpp = jpp->Next) - SetKeyValue(g, jpp->Val, jpp->Key); - - return false; -} // end of Marge; +} // end of GetObjectText; /***********************************************************************/ /* Set or add a value corresponding to the given key. */ /***********************************************************************/ -void JOBJECT::SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key) { - PJPR jp; +PBPR BJSON::SetKeyValue(PGLOBAL g, PBPR bop, OFFSET bvp, PSZ key) +{ + PBPR brp = bop, prp = NULL; - for (jp = First; jp; jp = jp->Next) - if (!strcmp(jp->Key, key)) { - jp->Val = jvp; - break; - } // endif key + if (brp) { + for (brp = bop; brp; brp = MPP(brp->Next)) + if (!strcmp(MZP(brp->Key), key)) { + brp->Vlp = bvp; + break; + } else + prp = brp; - if (!jp) { - jp = AddPair(g, key); - jp->Val = jvp; - } // endif jp + if (!brp) + prp->Vlp = MOF(SubAllocPair(g, MOF(key), bvp)); -} // end of SetValue + } else + bop = SubAllocPair(g, MOF(key), bvp); + + // Return the first pair of this object + return bop; +} // end of SetKeyValue + +/***********************************************************************/ +/* Merge two objects. */ +/***********************************************************************/ +PBPR BJSON::MergeObject(PGLOBAL g, PBPR bop1, PBPR bop2) +{ + if (bop1) + for (PBPR brp = bop2; brp; brp = MPP(brp->Next)) + SetKeyValue(g, bop1, brp->Vlp, MZP(brp->Key)); + + else + bop1 = bop2; + + return bop1; +} // end of MergeObject; /***********************************************************************/ /* Delete a value corresponding to the given key. */ /***********************************************************************/ -void JOBJECT::DeleteKey(PCSZ key) { - PJPR jp, * pjp = &First; +PBPR BJSON::DeleteKey(PBPR bop, PCSZ key) +{ + PBPR brp, pbrp = NULL; + + for (brp = bop; brp; brp = MPP(brp->Next)) + if (!strcmp(MZP(brp->Key), key)) { + if (pbrp) { + pbrp->Next = brp->Next; + return bop; + } else + return MPP(brp->Next); - for (jp = First; jp; jp = jp->Next) - if (!strcmp(jp->Key, key)) { - *pjp = jp->Next; - break; } else - pjp = &jp->Next; + pbrp = brp; + return bop; } // end of DeleteKey /***********************************************************************/ /* True if void or if all members are nulls. */ /***********************************************************************/ -bool JOBJECT::IsNull(void) { - for (PJPR jp = First; jp; jp = jp->Next) - if (!jp->Val->IsNull()) +bool BJSON::IsObjectNull(PBPR bop) +{ + for (PBPR brp = bop; brp; brp = MPP(brp->Next)) + if (brp->Vlp && (MVP(brp->Vlp))->Type != TYPE_NULL) return false; return true; -} // end of IsNull +} // end of IsObjectNull -/* -------------------------- Class JARRAY --------------------------- */ - -/***********************************************************************/ -/* JARRAY constructor. */ -/***********************************************************************/ -JARRAY::JARRAY(void) : JSON() { - Type = TYPE_JAR; - Size = 0; - Alloc = 0; - First = Last = NULL; - Mvals = NULL; -} // end of JARRAY constructor +/* ------------------------- Barray functions ------------------------ */ /***********************************************************************/ /* Return the number of values in this object. */ /***********************************************************************/ -int JARRAY::GetSize(bool b) { - if (b) { - // Return only non null values - int n = 0; +int BJSON::GetArraySize(PBVAL bap, bool b) +{ + int n = 0; - for (PJVAL jvp = First; jvp; jvp = jvp->Next) - if (!jvp->IsNull()) - n++; + for (PBVAL bvp = bap; bvp; bvp = MVP(bvp->Next)) + // If b, return only non null values + if (!b || bvp->Type != TYPE_NULL) + n++; - return n; - } else - return Size; - -} // end of GetSize - -/***********************************************************************/ -/* Make the array of values from the values list. */ -/***********************************************************************/ -void JARRAY::InitArray(PGLOBAL g) { - int i; - PJVAL jvp, * pjvp = &First; - - for (Size = 0, jvp = First; jvp; jvp = jvp->Next) - if (!jvp->Del) - Size++; - - if (Size > Alloc) { - // No need to realloc after deleting values - Mvals = (PJVAL*)PlugSubAlloc(g, NULL, Size * sizeof(PJVAL)); - Alloc = Size; - } // endif Size - - for (i = 0, jvp = First; jvp; jvp = jvp->Next) - if (!jvp->Del) { - Mvals[i++] = jvp; - pjvp = &jvp->Next; - Last = jvp; - } else - *pjvp = jvp->Next; - -} // end of InitArray + return n; +} // end of GetArraySize /***********************************************************************/ /* Get the Nth value of an Array. */ /***********************************************************************/ -PJVAL JARRAY::GetArrayValue(int i) { - if (Mvals && i >= 0 && i < Size) - return Mvals[i]; - else - return NULL; -} // end of GetValue +PBVAL BJSON::GetArrayValue(PBVAL bap, int n) +{ + int i = 0; + + for (PBVAL bvp = bap; bvp; bvp = MVP(bvp->Next)) + if (i == n) + return bvp; + else + i++; + + return NULL; +} // end of GetArrayValue /***********************************************************************/ /* Add a Value to the Array Value list. */ /***********************************************************************/ -PJVAL JARRAY::AddArrayValue(PGLOBAL g, PJVAL jvp, int* x) { - if (!jvp) - jvp = new(g) JVALUE; +PBVAL BJSON::AddArrayValue(PGLOBAL g, PBVAL bap, PBVAL nvp, int* x) +{ + if (!nvp) + nvp = SubAllocVal(g); - if (x) { + if (bap) { int i = 0, n = *x; - PJVAL jp, * jpp = &First; + PBVAL bvp; - for (jp = First; jp && i < n; i++, jp = *(jpp = &jp->Next)); + for (bvp = bap; bvp; bvp = MVP(bvp->Next), i++) + if (!bvp->Next || (x && i == n)) { + nvp->Next = bvp->Next; + bvp->Next = MOF(nvp); + break; + } // endif Next - (*jpp) = jvp; + } else + bap = nvp; - if (!(jvp->Next = jp)) - Last = jvp; - - } else { - if (!First) - First = jvp; - else if (Last == First) - First->Next = Last = jvp; - else - Last->Next = jvp; - - Last = jvp; - Last->Next = NULL; - } // endif x - - return jvp; -} // end of AddValue + return bap; +} // end of AddArrayValue /***********************************************************************/ /* Merge two arrays. */ /***********************************************************************/ -bool JARRAY::Merge(PGLOBAL g, PJSON jsp) { - if (jsp->GetType() != TYPE_JAR) { - strcpy(g->Message, "Second argument is not an array"); - return true; - } // endif Type +PBVAL BJSON::MergeArray(PGLOBAL g, PBVAL bap1, PBVAL bap2) +{ + if (bap1) { + for (PBVAL bvp = bap2; bvp; bvp = MVP(bvp->Next)) + AddArrayValue(g, bap1, bvp); - PJAR arp = (PJAR)jsp; + return bap1; + } else + return bap2; - for (int i = 0; i < arp->size(); i++) - AddArrayValue(g, arp->GetArrayValue(i)); - - InitArray(g); - return false; -} // end of Merge +} // end of MergeArray /***********************************************************************/ -/* Set the nth Value of the Array Value list. */ +/* Set the nth Value of the Array Value list or add it. */ /***********************************************************************/ -bool JARRAY::SetArrayValue(PGLOBAL g, PJVAL jvp, int n) { - int i = 0; - PJVAL jp, * jpp = &First; +PBVAL BJSON::SetArrayValue(PGLOBAL g, PBVAL bap, PBVAL nvp, int n) +{ + PBVAL bvp = bap, pvp = NULL; - for (jp = First; i < n; i++, jp = *(jpp = &jp->Next)) - if (!jp) - *jpp = jp = new(g) JVALUE; + if (bvp) { + for (int i = 0; bvp; i++, bvp = MVP(bvp->Next)) + if (i == n) { + bvp->To_Val = nvp->To_Val; + bvp->Nd = nvp->Nd; + bvp->Type = nvp->Type; + return bap; + } else + pvp = bvp; - *jpp = jvp; - jvp->Next = (jp ? jp->Next : NULL); - return false; + } // endif bap + + if (!bvp) { + bvp = DupVal(g, nvp); + + if (pvp) + pvp->Next = MOF(bvp); + else + bap = bvp; + + } // endif bvp + + return bap; } // end of SetValue /***********************************************************************/ /* Return the text corresponding to all values. */ /***********************************************************************/ -PSZ JARRAY::GetText(PGLOBAL g, PSTRG text) { - if (First) { +PSZ BJSON::GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text) { + if (bap) { bool b; - PJVAL jp; if (!text) { text = new(g) STRING(g, 256); @@ -1197,12 +1174,12 @@ PSZ JARRAY::GetText(PGLOBAL g, PSTRG text) { text->Append('('); b = false; - } + } // endif text - for (jp = First; jp; jp = jp->Next) { - jp->GetText(g, text); + for (PBVAL bvp = bap; bvp; bvp = MVP(bvp->Next)) { + GetValueText(g, bvp, text); - if (jp->Next) + if (bvp->Next) text->Append(", "); else if (!b) text->Append(')'); @@ -1222,30 +1199,89 @@ PSZ JARRAY::GetText(PGLOBAL g, PSTRG text) { /***********************************************************************/ /* Delete a Value from the Arrays Value list. */ /***********************************************************************/ -bool JARRAY::DeleteValue(int n) { - PJVAL jvp = GetArrayValue(n); +PBVAL BJSON::DeleteValue(PBVAL bap, int n) +{ + PBVAL bvp = bap, pvp = NULL; - if (jvp) { - jvp->Del = true; - return false; - } else - return true; + if (bvp) + for (int i = 0; bvp; i++, bvp = MVP(bvp->Next)) + if (i == n) { + if (pvp) + pvp->Next = bvp->Next; + else + bap = bvp; + break; + } // endif i + + return bap; } // end of DeleteValue /***********************************************************************/ /* True if void or if all members are nulls. */ /***********************************************************************/ -bool JARRAY::IsNull(void) { - for (int i = 0; i < Size; i++) - if (!Mvals[i]->IsNull()) +bool BJSON::IsArrayNull(PBVAL bap) +{ + for (PBVAL bvp = bap; bvp; bvp = MVP(bvp->Next)) + if (bvp->Type != TYPE_NULL) return false; return true; } // end of IsNull -/* -------------------------- Class JVALUE- -------------------------- */ +/* ------------------------- Bvalue functions ------------------------ */ +/***********************************************************************/ +/* Sub-allocate and clear a BVAL. */ +/***********************************************************************/ +PBVAL BJSON::SubAllocVal(PGLOBAL g) +{ + PBVAL bvp = (PBVAL)BsonSubAlloc(g, sizeof(BVAL)); + + bvp->To_Val = 0; + bvp->Nd = 0; + bvp->Type = TYPE_UNKNOWN; + bvp->Next = 0; + return bvp; +} // end of SubAllocVal + +/***********************************************************************/ +/* Sub-allocate and initialize a BVAL as string. */ +/***********************************************************************/ +PBVAL BJSON::SubAllocVal(PGLOBAL g, OFFSET toval, JTYP type, short nd) +{ + PBVAL bvp = (PBVAL)BsonSubAlloc(g, sizeof(BVAL)); + + bvp->To_Val = toval; + bvp->Nd = nd; + bvp->Type = type; + bvp->Next = 0; + return bvp; +} // end of SubAllocVal + +/***********************************************************************/ +/* Allocate a BVALUE with a given string or numeric value. */ +/***********************************************************************/ +PBVAL BJSON::SubAllocVal(PGLOBAL g, PVAL valp) +{ + PBVAL vlp = SubAllocVal(g); + SetValue(g, vlp, valp); + vlp->Next = NULL; + return vlp; +} // end of SubAllocVal + +/***********************************************************************/ +/* Sub-allocate and initialize a BVAL from another BVAL. */ +/***********************************************************************/ +PBVAL BJSON::DupVal(PGLOBAL g, PBVAL bvlp) { + PBVAL bvp = (PBVAL)BsonSubAlloc(g, sizeof(BVAL)); + + *bvp = *bvlp; + bvp->Next = 0; + return bvp; +} // end of DupVal + +#if 0 /***********************************************************************/ /* Constructor for a JVALUE. */ /***********************************************************************/ @@ -1276,7 +1312,6 @@ JVALUE::JVALUE(PJSON jsp) : JSON() { Type = TYPE_JVAL; } // end of JVALUE constructor -#if 0 /***********************************************************************/ /* Constructor for a JVALUE with a given string or numeric value. */ /***********************************************************************/ @@ -1289,18 +1324,7 @@ JVALUE::JVALUE(PGLOBAL g, PVL vlp) : JSON() { } // end of JVALUE constructor #endif // 0 -/***********************************************************************/ -/* Constructor for a JVALUE with a given string or numeric value. */ -/***********************************************************************/ -JVALUE::JVALUE(PGLOBAL g, PVAL valp) : JSON() { - Jsp = NULL; - //Val = NULL; - SetValue(g, valp); - Next = NULL; - Del = false; - Type = TYPE_JVAL; -} // end of JVALUE constructor - +#if 0 /***********************************************************************/ /* Constructor for a given string. */ /***********************************************************************/ @@ -1339,13 +1363,31 @@ JTYP JVALUE::GetValType(void) { return DataType; } // end of GetValType +#endif // 0 + +/***********************************************************************/ +/* Return the size of value's value. */ +/***********************************************************************/ +int BJSON::GetSize(PBVAL vlp, bool b) +{ + switch (vlp->Type) { + case TYPE_JAR: + return GetArraySize(MVP(vlp->To_Val)); + case TYPE_JOB: + return GetObjectSize(MPP(vlp->To_Val)); + default: + return 1; + } // enswitch Type + +} // end of GetSize /***********************************************************************/ /* Return the Value's Object value. */ /***********************************************************************/ -PJOB JVALUE::GetObject(void) { - if (DataType == TYPE_JSON && Jsp->GetType() == TYPE_JOB) - return (PJOB)Jsp; +PBPR BJSON::GetObject(PBVAL vlp) +{ + if (vlp->Type == TYPE_JOB) + return MPP(vlp->To_Val); return NULL; } // end of GetObject @@ -1353,24 +1395,41 @@ PJOB JVALUE::GetObject(void) { /***********************************************************************/ /* Return the Value's Array value. */ /***********************************************************************/ -PJAR JVALUE::GetArray(void) { - if (DataType == TYPE_JSON && Jsp->GetType() == TYPE_JAR) - return (PJAR)Jsp; +PBVAL BJSON::GetArray(PBVAL vlp) +{ + if (vlp->Type == TYPE_JAR) + return MVP(vlp->To_Val); return NULL; } // end of GetArray /***********************************************************************/ -/* Return the Value's as a Value class. */ +/* Return the Value's as a Value struct. */ /***********************************************************************/ -PVAL JVALUE::GetValue(PGLOBAL g) { - PVAL valp = NULL; +PVAL BJSON::GetValue(PGLOBAL g, PBVAL vp) +{ + double d; + PVAL valp; + PBVAL vlp = vp->Type == TYPE_JVAL ? MVP(vp->To_Val) : vp; - if (DataType != TYPE_JSON) - if (DataType == TYPE_STRG) - valp = AllocateValue(g, Strp, DataType, Nd); - else - valp = AllocateValue(g, &LLn, DataType, Nd); + switch (vlp->Type) { + case TYPE_STRG: + case TYPE_DBL: + case TYPE_BINT: + valp = AllocateValue(g, MP(vlp->To_Val), vlp->Type, vlp->Nd); + break; + case TYPE_INTG: + case TYPE_BOOL: + valp = AllocateValue(g, vlp, vlp->Type); + break; + case TYPE_FLOAT: + d = (double)vlp->F; + valp = AllocateValue(g, &d, TYPE_DOUBLE, vlp->Nd); + break; + default: + valp = NULL; + break; + } // endswitch Type return valp; } // end of GetValue @@ -1378,16 +1437,30 @@ PVAL JVALUE::GetValue(PGLOBAL g) { /***********************************************************************/ /* Return the Value's Integer value. */ /***********************************************************************/ -int JVALUE::GetInteger(void) { - int n; +int BJSON::GetInteger(PBVAL vp) { + int n; + PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp; - switch (DataType) { - case TYPE_INTG: n = N; break; - case TYPE_DBL: n = (int)F; break; + switch (vlp->Type) { + case TYPE_INTG: + n = vlp->N; + break; + case TYPE_FLOAT: + n = (int)vlp->F; + break; case TYPE_DTM: - case TYPE_STRG: n = atoi(Strp); break; - case TYPE_BOOL: n = (B) ? 1 : 0; break; - case TYPE_BINT: n = (int)LLn; break; + case TYPE_STRG: + n = atoi(MZP(vlp->To_Val)); + break; + case TYPE_BOOL: + n = (vlp->B) ? 1 : 0; + break; + case TYPE_BINT: + n = (int)*(longlong*)MP(vlp->To_Val); + break; + case TYPE_DBL: + n = (int)*(double*)MP(vlp->To_Val); + break; default: n = 0; } // endswitch Type @@ -1398,16 +1471,30 @@ int JVALUE::GetInteger(void) { /***********************************************************************/ /* Return the Value's Big integer value. */ /***********************************************************************/ -long long JVALUE::GetBigint(void) { - long long lln; +longlong BJSON::GetBigint(PBVAL vp) { + longlong lln; + PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp; - switch (DataType) { - case TYPE_BINT: lln = LLn; break; - case TYPE_INTG: lln = (long long)N; break; - case TYPE_DBL: lln = (long long)F; break; + switch (vlp->Type) { + case TYPE_BINT: + lln = *(longlong*)MP(vlp->To_Val); + break; + case TYPE_INTG: + lln = (longlong)vlp->N; + break; + case TYPE_FLOAT: + lln = (longlong)vlp->F; + break; + case TYPE_DBL: + lln = (longlong)*(double*)MP(vlp->To_Val); + break; case TYPE_DTM: - case TYPE_STRG: lln = atoll(Strp); break; - case TYPE_BOOL: lln = (B) ? 1 : 0; break; + case TYPE_STRG: + lln = atoll(MZP(vlp->To_Val)); + break; + case TYPE_BOOL: + lln = (vlp->B) ? 1 : 0; + break; default: lln = 0; } // endswitch Type @@ -1418,16 +1505,31 @@ long long JVALUE::GetBigint(void) { /***********************************************************************/ /* Return the Value's Double value. */ /***********************************************************************/ -double JVALUE::GetFloat(void) { +double BJSON::GetDouble(PBVAL vp) +{ double d; + PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp; - switch (DataType) { - case TYPE_DBL: d = F; break; - case TYPE_BINT: d = (double)LLn; break; - case TYPE_INTG: d = (double)N; break; + switch (vlp->Type) { + case TYPE_DBL: + d = *(double*)MP(vlp->To_Val); + break; + case TYPE_BINT: + d = (double)*(longlong*)MP(vlp->To_Val); + break; + case TYPE_INTG: + d = (double)vlp->N; + break; + case TYPE_FLOAT: + d = (double)vlp->F; + break; case TYPE_DTM: - case TYPE_STRG: d = atof(Strp); break; - case TYPE_BOOL: d = (B) ? 1.0 : 0.0; break; + case TYPE_STRG: + d = atof(MZP(vlp->To_Val)); + break; + case TYPE_BOOL: + d = (vlp->B) ? 1.0 : 0.0; + break; default: d = 0.0; } // endswitch Type @@ -1438,47 +1540,53 @@ double JVALUE::GetFloat(void) { /***********************************************************************/ /* Return the Value's String value. */ /***********************************************************************/ -PSZ JVALUE::GetString(PGLOBAL g, char* buff) { +PSZ BJSON::GetString(PGLOBAL g, PBVAL vp, char* buff) +{ char buf[32]; char* p = (buff) ? buff : buf; + PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp; - switch (DataType) { + switch (vlp->Type) { case TYPE_DTM: case TYPE_STRG: - p = Strp; + p = MZP(vlp->To_Val); break; case TYPE_INTG: - sprintf(p, "%d", N); + sprintf(p, "%d", vlp->N); + break; + case TYPE_FLOAT: + sprintf(p, "%.*f", vlp->Nd, vlp->F); break; case TYPE_BINT: - sprintf(p, "%lld", LLn); + sprintf(p, "%lld", *(longlong*)MP(vlp->To_Val)); break; case TYPE_DBL: - sprintf(p, "%.*lf", Nd, F); + sprintf(p, "%.*lf", vlp->Nd, *(double*)MP(vlp->To_Val)); break; case TYPE_BOOL: - p = (char*)((B) ? "true" : "false"); + p = (PSZ)((vlp->B) ? "true" : "false"); break; case TYPE_NULL: - p = (char*)"null"; + p = (PSZ)"null"; break; default: p = NULL; } // endswitch Type - - return (p == buf) ? (char*)PlugDup(g, buf) : p; + return (p == buf) ? (PSZ)PlugDup(g, buf) : p; } // end of GetString /***********************************************************************/ /* Return the Value's String value. */ /***********************************************************************/ -PSZ JVALUE::GetText(PGLOBAL g, PSTRG text) { - if (DataType == TYPE_JSON) - return Jsp->GetText(g, text); +PSZ BJSON::GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text) { + if (vlp->Type == TYPE_JOB) + return GetObjectText(g, MPP(vlp->To_Val), text); + else if (vlp->Type == TYPE_JAR) + return GetArrayText(g, MVP(vlp->To_Val), text); char buff[32]; - PSZ s = (DataType == TYPE_NULL) ? NULL : GetString(g, buff); + PSZ s = (vlp->Type == TYPE_NULL) ? NULL : GetString(g, vlp, buff); if (s) text->Append(s); @@ -1488,60 +1596,81 @@ PSZ JVALUE::GetText(PGLOBAL g, PSTRG text) { return NULL; } // end of GetText -void JVALUE::SetValue(PJSON jsp) { - if (DataType == TYPE_JSON && jsp->GetType() == TYPE_JVAL) { - Jsp = jsp->GetJsp(); - Nd = ((PJVAL)jsp)->Nd; - DataType = ((PJVAL)jsp)->DataType; - // Val = ((PJVAL)jsp)->GetVal(); - } else { - Jsp = jsp; - DataType = TYPE_JSON; - } // endif Type +void BJSON::SetValueObj(PBVAL vlp, PBPR bop) +{ + vlp->To_Val = MOF(bop); + vlp->Type = TYPE_JOB; +} // end of SetValueObj; +void BJSON::SetValueArr(PBVAL vlp, PBVAL bap) +{ + vlp->To_Val = MOF(bap); + vlp->Type = TYPE_JAR; } // end of SetValue; -void JVALUE::SetValue(PGLOBAL g, PVAL valp) { - //if (!Val) - // Val = AllocVal(g, TYPE_VAL); +void BJSON::SetValueVal(PBVAL vlp, PBVAL vp) +{ + vlp->To_Val = vp->To_Val; + vlp->Nd = vp->Nd; + vlp->Type = vp->Type; +} // end of SetValue; +void BJSON::SetValue(PGLOBAL g, PBVAL vlp, PVAL valp) +{ if (!valp || valp->IsNull()) { - DataType = TYPE_NULL; + vlp->Type = TYPE_NULL; } else switch (valp->GetType()) { case TYPE_DATE: if (((DTVAL*)valp)->IsFormatted()) - Strp = valp->GetCharValue(); + vlp->To_Val = MOF(valp->GetCharValue()); else { char buf[32]; - Strp = PlugDup(g, valp->GetCharString(buf)); + vlp->To_Val = MOF(PlugDup(g, valp->GetCharString(buf))); } // endif Formatted - DataType = TYPE_DTM; + vlp->Type = TYPE_DTM; break; case TYPE_STRING: - Strp = valp->GetCharValue(); - DataType = TYPE_STRG; + vlp->To_Val = MOF(valp->GetCharValue()); + vlp->Type = TYPE_STRG; break; case TYPE_DOUBLE: case TYPE_DECIM: - F = valp->GetFloatValue(); + vlp->Nd = (IsTypeNum(valp->GetType())) ? valp->GetValPrec() : 0; - if (IsTypeNum(valp->GetType())) - Nd = valp->GetValPrec(); + if (vlp->Nd <= 6) { + vlp->F = (float)valp->GetFloatValue(); + vlp->Type = TYPE_FLOAT; + } else { + double *dp = (double*)PlugSubAlloc(g, NULL, sizeof(double)); + + *dp = valp->GetFloatValue(); + vlp->To_Val = MOF(dp); + vlp->Type = TYPE_DBL; + } // endif Nd - DataType = TYPE_DBL; break; case TYPE_TINY: - B = valp->GetTinyValue() != 0; - DataType = TYPE_BOOL; + vlp->B = valp->GetTinyValue() != 0; + vlp->Type = TYPE_BOOL; case TYPE_INT: - N = valp->GetIntValue(); - DataType = TYPE_INTG; + vlp->N = valp->GetIntValue(); + vlp->Type = TYPE_INTG; break; case TYPE_BIGINT: - LLn = valp->GetBigintValue(); - DataType = TYPE_BINT; + if (valp->GetBigintValue() >= INT_MIN32 && + valp->GetBigintValue() <= INT_MAX32) { + vlp->N = valp->GetIntValue(); + vlp->Type = TYPE_INTG; + } else { + longlong* llp = (longlong*)PlugSubAlloc(g, NULL, sizeof(longlong)); + + *llp = valp->GetBigintValue(); + vlp->To_Val = MOF(llp); + vlp->Type = TYPE_BINT; + } // endif BigintValue + break; default: sprintf(g->Message, "Unsupported typ %d\n", valp->GetType()); @@ -1553,49 +1682,76 @@ void JVALUE::SetValue(PGLOBAL g, PVAL valp) { /***********************************************************************/ /* Set the Value's value as the given integer. */ /***********************************************************************/ -void JVALUE::SetInteger(PGLOBAL g, int n) { - N = n; - DataType = TYPE_INTG; +void BJSON::SetInteger(PBVAL vlp, int n) +{ + vlp->N = n; + vlp->Type = TYPE_INTG; } // end of SetInteger /***********************************************************************/ /* Set the Value's Boolean value as a tiny integer. */ /***********************************************************************/ -void JVALUE::SetBool(PGLOBAL g, bool b) { - B = b; - DataType = TYPE_BOOL; +void BJSON::SetBool(PBVAL vlp, bool b) +{ + vlp->B = b; + vlp->Type = TYPE_BOOL; } // end of SetTiny /***********************************************************************/ /* Set the Value's value as the given big integer. */ /***********************************************************************/ -void JVALUE::SetBigint(PGLOBAL g, long long ll) { - LLn = ll; - DataType = TYPE_BINT; +void BJSON::SetBigint(PGLOBAL g, PBVAL vlp, longlong ll) +{ + if (ll >= INT_MIN32 && ll <= INT_MAX32) { + vlp->N = (int)ll; + vlp->Type = TYPE_INTG; + } else { + longlong* llp = (longlong*)PlugSubAlloc(g, NULL, sizeof(longlong)); + + *llp = ll; + vlp->To_Val = MOF(llp); + vlp->Type = TYPE_BINT; + } // endif ll + } // end of SetBigint /***********************************************************************/ /* Set the Value's value as the given DOUBLE. */ /***********************************************************************/ -void JVALUE::SetFloat(PGLOBAL g, double f) { - F = f; - Nd = 6; - DataType = TYPE_DBL; +void BJSON::SetFloat(PBVAL vlp, double f) { + vlp->F = (float)f; + vlp->Nd = 6; + vlp->Type = TYPE_FLOAT; } // end of SetFloat /***********************************************************************/ /* Set the Value's value as the given string. */ /***********************************************************************/ -void JVALUE::SetString(PGLOBAL g, PSZ s, int ci) { - Strp = s; - Nd = ci; - DataType = TYPE_STRG; +void BJSON::SetString(PBVAL vlp, PSZ s, int ci) { + vlp->To_Val = MOF(s); + vlp->Nd = ci; + vlp->Type = TYPE_STRG; } // end of SetString /***********************************************************************/ /* True when its JSON or normal value is null. */ /***********************************************************************/ -bool JVALUE::IsNull(void) { - return (DataType == TYPE_JSON) ? Jsp->IsNull() : DataType == TYPE_NULL; -} // end of IsNull -#endif // 0 +bool BJSON::IsValueNull(PBVAL vlp) { + bool b; + + switch (vlp->Type) { + case TYPE_NULL: + b = true; + break; + case TYPE_JOB: + b = IsObjectNull(MPP(vlp->To_Val)); + break; + case TYPE_JAR: + b = IsArrayNull(MVP(vlp->To_Val)); + break; + default: + b = false; + } // endswitch Type + + return b; + } // end of IsNull diff --git a/storage/connect/bson.h b/storage/connect/bson.h index 7cf0820dc7a..9f5f44d073c 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -16,20 +16,25 @@ #define X #endif +#define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0) +#define MOF(X) MakeOff(Base, X) +#define MP(X) MakePtr(Base, X) +#define MPP(X) (PBPR)MakePtr(Base, X) +#define MVP(X) (PBVAL)MakePtr(Base, X) +#define MZP(X) (PSZ)MakePtr(Base, X) +#define LLN(X) *(longlong*)MakePtr(Base, X) +#define DBL(X) *(double*)MakePtr(Base, X) + class BDOC; class BOUT; -//class JSON; +class BJSON; typedef class BDOC* PBDOC; -//typedef class BJSON* PBSON; - -// BSON size should be equal on Linux and Windows -#define BMX 255 - -typedef uint OFFSET; +typedef class BJSON* PBJSON; +typedef uint OFFSET; /***********************************************************************/ -/* Structure JVALUE. */ +/* Structure BVAL. Binary representation of a JVALUE. */ /***********************************************************************/ typedef struct _jvalue { union { @@ -39,12 +44,12 @@ typedef struct _jvalue { bool B; // A boolean value True or false (0) }; short Nd; // Number of decimals - JTYP Type; // The value type + short Type; // The value type OFFSET Next; // Offset to the next value in array } BVAL, *PBVAL; // end of struct BVALUE /***********************************************************************/ -/* Structure JPAIR. The pairs of a json Object. */ +/* Structure BPAIR. The pairs of a json Object. */ /***********************************************************************/ typedef struct _jpair { OFFSET Key; // Offset to this pair key name @@ -52,42 +57,91 @@ typedef struct _jpair { OFFSET Next; // Offset to the next pair in object } BPAIR, *PBPR; // end of struct BPAIR -#if 0 -/***********************************************************************/ -/* Structure used to return binary json to Json UDF functions. */ -/* (should be moved to jsonudf.h). */ -/***********************************************************************/ -typedef struct _JsonBin { - char Msg[BMX + 1]; - char *Filename; - PGLOBAL G; - int Pretty; - ulong Reslen; - my_bool Changed; - PBSON Top; - PBSON Jsp; - PBJN Bsp; -} BJSON, *PBJN ; // end of struct BJSON - -PBJN JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp); -#endif // 0 - char* NextChr(PSZ s, char sep); char* GetJsonNull(void); const char* GetFmt(int type, bool un); DllExport bool IsNum(PSZ s); +/***********************************************************************/ +/* Class BJSON. The class handling all BJSON operations. */ +/***********************************************************************/ +class BJSON : public BLOCK { +public: + // Constructor + BJSON(void* base, PBVAL vp = NULL) { Base = base; Bvp = vp; } + + void* GetBase(void) { return Base; } + + // SubAlloc functions + void* BsonSubAlloc(PGLOBAL g, size_t size); + PBPR SubAllocPair(PGLOBAL g, OFFSET key, OFFSET val = 0); + PBVAL SubAllocVal(PGLOBAL g); + PBVAL SubAllocVal(PGLOBAL g, OFFSET toval, JTYP type = TYPE_UNKNOWN, short nd = 0); + PBVAL SubAllocVal(PGLOBAL g, PVAL valp); + PBVAL DupVal(PGLOBAL g, PBVAL bvp); + + // Array functions + int GetArraySize(PBVAL bap, bool b = false); + PBVAL GetArrayValue(PBVAL bap, int i); + PSZ GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text); + PBVAL MergeArray(PGLOBAL g, PBVAL bap1,PBVAL bap2); + PBVAL DeleteValue(PBVAL bap, int n); + PBVAL AddArrayValue(PGLOBAL g, PBVAL bap, PBVAL nvp = NULL, int* x = NULL); + PBVAL SetArrayValue(PGLOBAL g, PBVAL bap, PBVAL nvp, int n); + bool IsArrayNull(PBVAL bap); + + // Object functions + int GetObjectSize(PBPR bop, bool b = false); + PSZ GetObjectText(PGLOBAL g, PBPR bop, PSTRG text); + PBPR MergeObject(PGLOBAL g, PBPR bop1, PBPR bop2); + PBPR AddPair(PGLOBAL g, PBPR bop, PSZ key, OFFSET val = 0); + PBVAL GetKeyValue(PBPR bop, PSZ key); + PBVAL GetKeyList(PGLOBAL g, PBPR bop); + PBVAL GetObjectValList(PGLOBAL g, PBPR bop); + PBPR SetKeyValue(PGLOBAL g, PBPR bop, OFFSET bvp, PSZ key); + PBPR DeleteKey(PBPR bop, PCSZ k); + bool IsObjectNull(PBPR bop); + + // Value functions + int GetSize(PBVAL vlp, bool b = false); + PBPR GetObject(PBVAL vlp); + PBVAL GetArray(PBVAL vlp); + //PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); } + PSZ GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text); + //inline PJSON GetJson(void) { return (DataType == TYPE_JSON ? Jsp : this); } + PSZ GetString(PGLOBAL g, PBVAL vp, char* buff = NULL); + int GetInteger(PBVAL vp); + long long GetBigint(PBVAL vp); + double GetDouble(PBVAL vp); + PVAL GetValue(PGLOBAL g, PBVAL vp); + void SetValueObj(PBVAL vlp, PBPR bop); + void SetValueArr(PBVAL vlp, PBVAL bap); + void SetValueVal(PBVAL vlp, PBVAL vp); + void SetValue(PGLOBAL g, PBVAL vlp, PVAL valp); + void SetString(PBVAL vlp, PSZ s, int ci = 0); + void SetInteger(PBVAL vlp, int n); + void SetBigint(PGLOBAL g, PBVAL vlp, longlong ll); + void SetFloat(PBVAL vlp, double f); + void SetBool(PBVAL vlp, bool b); + bool IsValueNull(PBVAL vlp); + + // Members + PBVAL Bvp; + void* Base; + +protected: + // Default constructor not to be used + BJSON(void) {} +}; // end of class BJSON + /***********************************************************************/ /* Class JDOC. The class for parsing and serializing json documents. */ /***********************************************************************/ -class BDOC : public BLOCK { +class BDOC : public BJSON { public: - BDOC(void); + BDOC(void *); - void *BsonSubAlloc(PGLOBAL g, size_t size); - PBPR SubAllocPair(PGLOBAL g, OFFSET key); - PBVAL SubAllocVal(PGLOBAL g); PBVAL ParseJson(PGLOBAL g, char* s, size_t n, int* prty = NULL, bool* b = NULL); PSZ Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty); @@ -98,72 +152,21 @@ protected: OFFSET ParseString(PGLOBAL g, int& i); void ParseNumeric(PGLOBAL g, int& i, PBVAL bvp); OFFSET ParseAsArray(PGLOBAL g, int& i, int pretty, int* ptyp); - bool SerializeArray(OFFSET arp, bool b); - bool SerializeObject(OFFSET obp); - bool SerializeValue(PBVAL vp); + bool SerializeArray(OFFSET arp, bool b); + bool SerializeObject(OFFSET obp); + bool SerializeValue(PBVAL vp); // Members used when parsing and serializing -private: JOUT* jp; // Used with serialize - void* base; // The base for making offsets or pointers char* s; // The Json string to parse int len; // The Json string length bool pty[3]; // Used to guess what pretty is + + // Default constructor not to be used + BDOC(void) {} }; // end of class BDOC #if 0 -/***********************************************************************/ -/* Class BJSON. The class handling all BSON operations. */ -/***********************************************************************/ -class BJSON : public BLOCK { -public: - // Constructor - BJSON(PBVAL vp, void* base) { Vlp = vp; Base = base; } - - // Array functions - int GetSize(bool b); - PBVAL GetArrayValue(int i); - PSZ GetText(PGLOBAL g, PSTRG text); - bool Merge(PGLOBAL g, PBVAL jsp); - bool DeleteValue(int n); - PBVAL AddArrayValue(PGLOBAL g, PBVAL jvp = NULL, int* x = NULL); - bool SetArrayValue(PGLOBAL g, PBVAL jvp, int i); - - // Object functions - int GetObjectSize(PBPR prp, bool b); - PSZ GetObjectText(PGLOBAL g, PBPR prp, PSTRG text); - bool MergeObject(PGLOBAL g, PBPR prp); - PJPR AddPair(PGLOBAL g, PCSZ key); - PJVAL GetKeyValue(const char* key); - PJAR GetKeyList(PGLOBAL g); - PJAR GetValList(PGLOBAL g); - void SetKeyValue(PGLOBAL g, PBVAL jvp, PCSZ key); - void DeleteKey(PCSZ k); - - // Value functions - PBPR GetObject(void); - PBVAL GetArray(void); - PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); } - PSZ GetValueText(PGLOBAL g, PSTRG text); - inline PJSON GetJson(void) { return (DataType == TYPE_JSON ? Jsp : this); } - PSZ GetString(PGLOBAL g, char* buff = NULL); - int GetInteger(void); - long long GetBigint(void); - double GetFloat(void); - PVAL GetValue(PGLOBAL g); - void SetValue(PJSON jsp); - void SetValue(PGLOBAL g, PVAL valp); - void SetString(PGLOBAL g, PSZ s, int ci = 0); - void SetInteger(PGLOBAL g, int n); - void SetBigint(PGLOBAL g, longlong ll); - void SetFloat(PGLOBAL g, double f); - void SetBool(PGLOBAL g, bool b); - - // Members - PBVAL Vlp; - void* Base; -}; // end of class BJSON - /***********************************************************************/ /* Class JOBJECT: contains a list of value pairs. */ /***********************************************************************/ diff --git a/storage/connect/json.h b/storage/connect/json.h index c5251af01a9..5ba4d7b3dbd 100644 --- a/storage/connect/json.h +++ b/storage/connect/json.h @@ -15,10 +15,7 @@ #define X #endif -// Required by some compilers -enum JTYP : short; - -enum JTYP : short { +enum JTYP { TYPE_NULL = TYPE_VOID, TYPE_STRG = TYPE_STRING, TYPE_DBL = TYPE_DOUBLE, @@ -48,9 +45,6 @@ typedef class JVALUE *PJVAL; typedef class JOBJECT *PJOB; typedef class JARRAY *PJAR; -// BSON size should be equal on Linux and Windows -#define BMX 255 -typedef struct BSON *PBSON; typedef struct JPAIR *PJPR; //typedef struct VAL *PVL; @@ -63,39 +57,6 @@ struct JPAIR { PJPR Next; // To the next pair }; // end of struct JPAIR -#if 0 -/***********************************************************************/ -/* Structure VAL (string, int, float, bool or null) */ -/***********************************************************************/ -struct VAL { - union { - char *Strp; // Ptr to a string - int N; // An integer value - long long LLn; // A big integer value - double F; // A float value - bool B; // True or false - }; - int Nd; // Decimal number - JTYP Type; // The value type -}; // end of struct VAL -#endif // 0 - -/***********************************************************************/ -/* Structure used to return binary json to Json UDF functions. */ -/***********************************************************************/ -struct BSON { - char Msg[BMX + 1]; - char *Filename; - PGLOBAL G; - int Pretty; - ulong Reslen; - my_bool Changed; - PJSON Top; - PJSON Jsp; - PBSON Bsp; -}; // end of struct BSON - -PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp); //PVL AllocVal(PGLOBAL g, JTYP type); char *NextChr(PSZ s, char sep); char *GetJsonNull(void); diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index c9f0ea9239a..0012b3d6bdd 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -63,7 +63,7 @@ static PJSNX JsnxNew(PGLOBAL g, PJSON jsp, int type, int len) return jsx; } /* end of JsnxNew */ - /* ----------------------------------- JSNX ------------------------------------ */ +/* ----------------------------------- JSNX ------------------------------------ */ /*********************************************************************************/ /* JSNX public constructor. */ @@ -1186,6 +1186,7 @@ static my_bool JsonSubSet(PGLOBAL g) pph->To_Free = (g->Saved_Size) ? g->Saved_Size : (size_t)sizeof(POOLHEADER); pph->FreeBlk = g->Sarea_Size - pph->To_Free; + g->Saved_Size = 0; return FALSE; } /* end of JsonSubSet */ @@ -6558,3 +6559,1476 @@ long long countin(UDF_INIT *initid, UDF_ARGS *args, char *result, free(str2); return n; } // end of countin + +/* --------------------------- New Testing BJSON Stuff --------------------------*/ + +/*********************************************************************************/ +/* SubAlloc a new BJNX class with protection against memory exhaustion. */ +/*********************************************************************************/ +static PBJNX BjnxNew(PGLOBAL g, PBVAL vlp, int type, int len) { + PBJNX bjnx; + + try { + bjnx = new(g) BJNX(g, vlp, type, len); + } catch (...) { + if (trace(1023)) + htrc("%s\n", g->Message); + + PUSH_WARNING(g->Message); + bjnx = NULL; + } // end try/catch + + return bjnx; +} /* end of BjnxNew */ + +/* ----------------------------------- BSNX ------------------------------------ */ + +/*********************************************************************************/ +/* BSNX public constructor. */ +/*********************************************************************************/ +BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) + : BDOC(g->Sarea) +{ + Row = row; + Bvalp = NULL; + Jpnp = NULL; + Jp = NULL; + Nodes = NULL; + Value = AllocateValue(g, type, len, prec); + MulVal = NULL; + Jpath = NULL; + Buf_Type = type; + Long = len; + Prec = prec; + Nod = 0; + Xnod = -1; + K = 0; + I = -1; + Imax = 9; + B = 0; + Xpd = false; + Parsed = false; + Found = false; + Wr = wr; + Jb = false; +} // end of BJNX constructor + +/*********************************************************************************/ +/* SetJpath: set and parse the json path. */ +/*********************************************************************************/ +my_bool BJNX::SetJpath(PGLOBAL g, char* path, my_bool jb) { + // Check Value was allocated + if (!Value) + return true; + + Value->SetNullable(true); + Jpath = path; + + // Parse the json path + Parsed = false; + Nod = 0; + Jb = jb; + return ParseJpath(g); +} // end of SetJpath + +/*********************************************************************************/ +/* Analyse array processing options. */ +/*********************************************************************************/ +my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) { + int n = (int)strlen(p); + my_bool dg = true, b = false; + PJNODE jnp = &Nodes[i]; + + if (*p) { + if (p[n - 1] == ']') { + p[--n] = 0; + } else if (!IsNum(p)) { + // Wrong array specification + sprintf(g->Message, "Invalid array specification %s", p); + return true; + } // endif p + + } else + b = true; + + // To check whether a numeric Rank was specified + dg = IsNum(p); + + if (!n) { + // Default specifications + if (jnp->Op != OP_EXP) { + if (Wr) { + // Force append + jnp->Rank = INT_MAX32; + jnp->Op = OP_LE; + } else if (Jb) { + // Return a Json item + jnp->Op = OP_XX; + } else if (b) { + // Return 1st value (B is the index base) + jnp->Rank = B; + jnp->Op = OP_LE; + } else if (!Value->IsTypeNum()) { + jnp->CncVal = AllocateValue(g, PlugDup(g, ", "), TYPE_STRING); + jnp->Op = OP_CNC; + } else + jnp->Op = OP_ADD; + + } // endif OP + + } else if (dg) { + // Return nth value + jnp->Rank = atoi(p) - B; + jnp->Op = OP_EQ; + } else if (Wr) { + sprintf(g->Message, "Invalid specification %s in a write path", p); + return true; + } else if (n == 1) { + // Set the Op value; + switch (*p) { + case '+': jnp->Op = OP_ADD; break; + case 'x': jnp->Op = OP_MULT; break; + case '>': jnp->Op = OP_MAX; break; + case '<': jnp->Op = OP_MIN; break; + case '!': jnp->Op = OP_SEP; break; // Average + case '#': jnp->Op = OP_NUM; break; + case '*': // Expand this array + strcpy(g->Message, "Expand not supported by this function"); + return true; + default: + sprintf(g->Message, "Invalid function specification %c", *p); + return true; + } // endswitch *p + + } else if (*p == '"' && p[n - 1] == '"') { + // This is a concat specification + jnp->Op = OP_CNC; + + if (n > 2) { + // Set concat intermediate string + p[n - 1] = 0; + + if (trace(1)) + htrc("Concat string=%s\n", p + 1); + + jnp->CncVal = AllocateValue(g, p + 1, TYPE_STRING); + } // endif n + + } else { + strcpy(g->Message, "Wrong array specification"); + return true; + } // endif's + + // For calculated arrays, a local Value must be used + switch (jnp->Op) { + case OP_NUM: + jnp->Valp = AllocateValue(g, TYPE_INT); + break; + case OP_ADD: + case OP_MULT: + case OP_SEP: + if (!IsTypeChar(Buf_Type)) + jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision()); + else + jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2); + + break; + case OP_MIN: + case OP_MAX: + jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision()); + break; + case OP_CNC: + if (IsTypeChar(Buf_Type)) + jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision()); + else + jnp->Valp = AllocateValue(g, TYPE_STRING, 512); + + break; + default: + break; + } // endswitch Op + + if (jnp->Valp) + MulVal = AllocateValue(g, jnp->Valp); + + return false; +} // end of SetArrayOptions + +/*********************************************************************************/ +/* Parse the eventual passed Jpath information. */ +/* This information can be specified in the Fieldfmt column option when */ +/* creating the table. It permits to indicate the position of the node */ +/* corresponding to that column. */ +/*********************************************************************************/ +my_bool BJNX::ParseJpath(PGLOBAL g) { + char* p, * p1 = NULL, * p2 = NULL, * pbuf = NULL; + int i; + my_bool a, mul = false; + + if (Parsed) + return false; // Already done + else if (!Jpath) + // Jpath = Name; + return true; + + if (trace(1)) + htrc("ParseJpath %s\n", SVP(Jpath)); + + if (!(pbuf = PlgDBDup(g, Jpath))) + return true; + + if (*pbuf == '$') pbuf++; + if (*pbuf == '.') pbuf++; + if (*pbuf == '[') p1 = pbuf++; + + // Estimate the required number of nodes + for (i = 0, p = pbuf; (p = NextChr(p, '.')); i++, p++) + Nod++; // One path node found + + if (!(Nodes = (PJNODE)PlgDBSubAlloc(g, NULL, (++Nod) * sizeof(JNODE)))) + return true; + + memset(Nodes, 0, (Nod) * sizeof(JNODE)); + + // Analyze the Jpath for this column + for (i = 0, p = pbuf; p && i < Nod; i++, p = (p2 ? p2 : NULL)) { + a = (p1 != NULL); + p1 = strchr(p, '['); + p2 = strchr(p, '.'); + + if (!p2) + p2 = p1; + else if (p1) { + if (p1 < p2) + p2 = p1; + else if (p1 == p2 + 1) + *p2++ = 0; // Old syntax .[ + else + p1 = NULL; + + } // endif p1 + + if (p2) + *p2++ = 0; + + // Jpath must be explicit + if (a || *p == 0 || *p == '[' || IsNum(p)) { + // Analyse intermediate array processing + if (SetArrayOptions(g, p, i, Nodes[i - 1].Key)) + return true; + + } else if (*p == '*') { + if (Wr) { + sprintf(g->Message, "Invalid specification %c in a write path", *p); + return true; + } else // Return JSON + Nodes[i].Op = OP_XX; + + } else { + Nodes[i].Key = p; + Nodes[i].Op = OP_EXIST; + } // endif's + + } // endfor i, p + + Nod = i; + MulVal = AllocateValue(g, Value); + + if (trace(1)) + for (i = 0; i < Nod; i++) + htrc("Node(%d) Key=%s Op=%d Rank=%d\n", + i, SVP(Nodes[i].Key), Nodes[i].Op, Nodes[i].Rank); + + Parsed = true; + return false; +} // end of ParseJpath + +/*********************************************************************************/ +/* MakeJson: Serialize the json item and set value to it. */ +/*********************************************************************************/ +PVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp) { + if (Value->IsTypeNum()) { + strcpy(g->Message, "Cannot make Json for a numeric value"); + Value->Reset(); + } else if (bvp->Type != TYPE_JAR && bvp->Type != TYPE_JOB) { + strcpy(g->Message, "Target is not an array or object"); + Value->Reset(); + } else + Value->SetValue_psz(Serialize(g, bvp, NULL, 0)); + + return Value; +} // end of MakeJson + +/*********************************************************************************/ +/* SetValue: Set a value from a JVALUE contains. */ +/*********************************************************************************/ +void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) { + if (vlp) { + vp->SetNull(false); + + if (Jb) { + vp->SetValue_psz(Serialize(g, vlp, NULL, 0)); + } else switch (vlp->Type) { + case TYPE_DTM: + case TYPE_STRG: + vp->SetValue_psz(GetString(g, vlp)); + break; + case TYPE_INTG: + case TYPE_BINT: + vp->SetValue(GetInteger(vlp)); + break; + case TYPE_DBL: + if (vp->IsTypeNum()) + vp->SetValue(GetDouble(vlp)); + else // Get the proper number of decimals + vp->SetValue_psz(GetString(g, vlp)); + + break; + case TYPE_BOOL: + if (vp->IsTypeNum()) + vp->SetValue(GetInteger(vlp) ? 1 : 0); + else + vp->SetValue_psz(GetString(g, vlp)); + + break; + case TYPE_JAR: + vp->SetValue_psz(GetArrayText(g, MVP(vlp->To_Val), NULL)); + break; + case TYPE_JOB: + vp->SetValue_psz(GetObjectText(g, MPP(vlp->To_Val), NULL)); + break; + case TYPE_NULL: + vp->SetNull(true); + default: + vp->Reset(); + } // endswitch Type + + } else { + vp->SetNull(true); + vp->Reset(); + } // endif val + +} // end of SetJsonValue + +/*********************************************************************************/ +/* GetJson: */ +/*********************************************************************************/ +PBVAL BJNX::GetJson(PGLOBAL g) { + return GetRowValue(g, Row, 0); +} // end of GetJson + +/*********************************************************************************/ +/* ReadValue: */ +/*********************************************************************************/ +void BJNX::ReadValue(PGLOBAL g) { + Value->SetValue_pval(GetColumnValue(g, Row, 0)); +} // end of ReadValue + +/*********************************************************************************/ +/* GetColumnValue: */ +/*********************************************************************************/ +PVAL BJNX::GetColumnValue(PGLOBAL g, PBVAL row, int i) { + PBVAL vlp = GetRowValue(g, row, i); + + SetJsonValue(g, Value, vlp); + return Value; +} // end of GetColumnValue + +/*********************************************************************************/ +/* GetRowValue: */ +/*********************************************************************************/ +PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) { + my_bool expd = false; + PBVAL bap; + PBVAL vlp = NULL; + + for (; i < Nod && row; i++) { + if (Nodes[i].Op == OP_NUM) { + Value->SetValue(row->Type == TYPE_JAR ? GetArraySize(MVP(row->To_Val)) : 1); + vlp = SubAllocVal(g, Value); + return vlp; + } else if (Nodes[i].Op == OP_XX) { + Jb = b; +// return DupVal(g, row); + return row; // or last line ??? + } else switch (row->Type) { + case TYPE_JOB: + if (!Nodes[i].Key) { + // Expected Array was not there + if (Nodes[i].Op == OP_LE) { + if (i < Nod - 1) + continue; + else + vlp = row; // DupVal(g, row) ??? + + } else { + strcpy(g->Message, "Unexpected object"); + vlp = NULL; + } //endif Op + + } else + vlp = GetKeyValue(MPP(row->To_Val), Nodes[i].Key); + + break; + case TYPE_JAR: + bap = MVP(row->To_Val); + + if (!Nodes[i].Key) { + if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) + vlp = GetArrayValue(bap, Nodes[i].Rank); + else if (Nodes[i].Op == OP_EXP) + return (PBVAL)ExpandArray(g, bap, i); + else + return SubAllocVal(g, CalculateArray(g, bap, i)); + + } else { + // Unexpected array, unwrap it as [0] + vlp = GetArrayValue(bap, 0); + i--; + } // endif's + + break; + case TYPE_JVAL: + vlp = row; + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + vlp = NULL; + } // endswitch Type + + row = vlp; + } // endfor i + + return vlp; +} // end of GetRowValue + +/*********************************************************************************/ +/* ExpandArray: */ +/*********************************************************************************/ +PVAL BJNX::ExpandArray(PGLOBAL g, PBVAL arp, int n) +{ + strcpy(g->Message, "Expand cannot be done by this function"); + return NULL; +} // end of ExpandArray + +/*********************************************************************************/ +/* CalculateArray: NIY */ +/*********************************************************************************/ +PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) +{ +#if 0 + int i, ars = GetArraySize(bap), nv = 0; + bool err; + OPVAL op = Nodes[n].Op; + PVAL val[2], vp = Nodes[n].Valp; + PBVAL bvrp, bvp; + BVAL bval; + + vp->Reset(); + xtrc(1,"CalculateArray size=%d op=%d\n", ars, op); + + for (i = 0; i < ars; i++) { + bvrp = GetArrayValue(bap, i); + xtrc(1, "i=%d nv=%d\n", i, nv); + + if (!IsValueNull(bvrp) || (op == OP_CNC && GetJsonNull())) { + if (IsValueNull(bvrp)) { + SetString(bvrp, GetJsonNull(), 0); + bvp = bvrp; + } else if (n < Nod - 1 && bvrp->GetJson()) { + bval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1)); + bvp = &bval; + } else + jvp = jvrp; + + if (trace(1)) + htrc("jvp=%s null=%d\n", + jvp->GetString(g), jvp->IsNull() ? 1 : 0); + + if (!nv++) { + SetJsonValue(g, vp, jvp); + continue; + } else + SetJsonValue(g, MulVal, jvp); + + if (!MulVal->IsNull()) { + switch (op) { + case OP_CNC: + if (Nodes[n].CncVal) { + val[0] = Nodes[n].CncVal; + err = vp->Compute(g, val, 1, op); + } // endif CncVal + + val[0] = MulVal; + err = vp->Compute(g, val, 1, op); + break; + // case OP_NUM: + case OP_SEP: + val[0] = Nodes[n].Valp; + val[1] = MulVal; + err = vp->Compute(g, val, 2, OP_ADD); + break; + default: + val[0] = Nodes[n].Valp; + val[1] = MulVal; + err = vp->Compute(g, val, 2, op); + } // endswitch Op + + if (err) + vp->Reset(); + + if (trace(1)) { + char buf(32); + + htrc("vp='%s' err=%d\n", + vp->GetCharString(&buf), err ? 1 : 0); + } // endif trace + + } // endif Zero + + } // endif jvrp + + } // endfor i + + if (op == OP_SEP) { + // Calculate average + MulVal->SetValue(nv); + val[0] = vp; + val[1] = MulVal; + + if (vp->Compute(g, val, 2, OP_DIV)) + vp->Reset(); + + } // endif Op + + return vp; +#else + strcpy(g->Message, "Calculate array NIY"); + return NULL; +#endif +} // end of CalculateArray + +/*********************************************************************************/ +/* CheckPath: Checks whether the path exists in the document. */ +/*********************************************************************************/ +my_bool BJNX::CheckPath(PGLOBAL g) { + PBVAL val = NULL; + PBVAL row = Row; + + for (int i = 0; i < Nod && row; i++) { + val = NULL; + + if (Nodes[i].Op == OP_NUM || Nodes[i].Op == OP_XX) { + } else switch (row->Type) { + case TYPE_JOB: + if (Nodes[i].Key) + val = GetKeyValue(MPP(row->To_Val), Nodes[i].Key); + + break; + case TYPE_JAR: + if (!Nodes[i].Key) + if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) + val = GetArrayValue(MVP(row->To_Val), Nodes[i].Rank); + + break; + case TYPE_JVAL: + val = MVP(row->To_Val); + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + } // endswitch Type + +// if (i < Nod - 1) +// if (!(row = (val) ? val->GetJsp() : NULL)) +// val = NULL; + + row = val; + } // endfor i + + return (val != NULL); +} // end of CheckPath + +/***********************************************************************/ +/* GetRow: Set the complete path of the object to be set. */ +/***********************************************************************/ +PBVAL BJNX::GetRow(PGLOBAL g) { + PBVAL val = NULL; + PBVAL arp; + PBVAL nwr, row = Row; + + for (int i = 0; i < Nod - 1 && row; i++) { + if (Nodes[i].Op == OP_XX) + break; + else switch (row->Type) { + case TYPE_JOB: + if (!Nodes[i].Key) + // Expected Array was not there, wrap the value + continue; + + val = GetKeyValue(MPP(row->To_Val), Nodes[i].Key); + break; + case TYPE_JAR: + arp = MVP(row->To_Val); + + if (!Nodes[i].Key) { + if (Nodes[i].Op == OP_EQ) + val = GetArrayValue(arp, Nodes[i].Rank); + else + val = GetArrayValue(arp, Nodes[i].Rx); + + } else { + // Unexpected array, unwrap it as [0] + val = GetArrayValue(arp, 0); + i--; + } // endif Nodes + + break; + case TYPE_JVAL: + val = MVP(row->To_Val); + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + val = NULL; + } // endswitch Type + + if (val) { + row = val; + } else { + // Construct missing objects + for (i++; row && i < Nod; i++) { + if (Nodes[i].Op == OP_XX) + break; +// else if (!Nodes[i].Key) + // Construct intermediate array +// nwr = SubAllocVal(g); +// else +// nwr = SubAllocPair(g); + + // Construct new row + nwr = SubAllocVal(g); + + if (row->Type == TYPE_JOB) { + SetKeyValue(g, MPP(row->To_Val), MOF(nwr), Nodes[i - 1].Key); + } else if (row->Type == TYPE_JAR) { + AddArrayValue(g, MVP(row->To_Val), nwr); + } else { + strcpy(g->Message, "Wrong type when writing new row"); + nwr = NULL; + } // endif's + + row = nwr; + } // endfor i + + break; + } // endelse + + } // endfor i + + return row; +} // end of GetRow + +/***********************************************************************/ +/* WriteValue: */ +/***********************************************************************/ +my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) { + PBPR objp = NULL; + PBVAL arp = NULL; + PBVAL jvp = NULL; + PBVAL row = GetRow(g); + + if (!row) + return true; + + switch (row->Type) { + case TYPE_JOB: objp = MPP(row->To_Val); break; + case TYPE_JAR: arp = MVP(row->To_Val); break; + case TYPE_JVAL: jvp = MVP(row->To_Val); break; + default: + strcpy(g->Message, "Invalid target type"); + return true; + } // endswitch Type + + if (arp) { + if (!Nodes[Nod - 1].Key) { + if (Nodes[Nod - 1].Op == OP_EQ) + SetArrayValue(g, arp, jvalp, Nodes[Nod - 1].Rank); + else + AddArrayValue(g, arp, jvalp); + + } // endif Key + + } else if (objp) { + if (Nodes[Nod - 1].Key) + SetKeyValue(g, objp, MOF(jvalp), Nodes[Nod - 1].Key); + + } else if (jvp) + SetValueVal(jvp, jvalp); + + return false; +} // end of WriteValue + +/*********************************************************************************/ +/* Locate a value in a JSON tree: */ +/*********************************************************************************/ +PSZ BJNX::Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k) { + PSZ str = NULL; + my_bool b = false, err = true; + + g->Message[0] = 0; + + if (!jsp) { + strcpy(g->Message, "Null json tree"); + return NULL; + } // endif jsp + + try { + // Write to the path string + Jp = new(g) JOUTSTR(g); + Jp->WriteChr('$'); + Bvalp = jvp; + K = k; + + switch (jsp->Type) { + case TYPE_JAR: + err = LocateArray(g, MVP(jsp->To_Val)); + break; + case TYPE_JOB: + err = LocateObject(g, MPP(jsp->To_Val)); + break; + case TYPE_JVAL: + err = LocateValue(g, MVP(jsp->To_Val)); + break; + default: + err = true; + } // endswitch Type + + if (err) { + if (!g->Message[0]) + strcpy(g->Message, "Invalid json tree"); + + } else if (Found) { + Jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, Jp->N); + str = Jp->Strp; + } // endif's + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + + PUSH_WARNING(g->Message); + } catch (const char* msg) { + strcpy(g->Message, msg); + } // end catch + + return str; +} // end of Locate + +/*********************************************************************************/ +/* Locate in a JSON Array. */ +/*********************************************************************************/ +my_bool BJNX::LocateArray(PGLOBAL g, PBVAL jarp) { + char s[16]; + int n = GetArraySize(jarp); + size_t m = Jp->N; + + for (int i = 0; i < n && !Found; i++) { + Jp->N = m; + sprintf(s, "[%d]", i + B); + + if (Jp->WriteStr(s)) + return true; + + if (LocateValue(g, GetArrayValue(jarp, i))) + return true; + + } // endfor i + + return false; +} // end of LocateArray + +/*********************************************************************************/ +/* Locate in a JSON Object. */ +/*********************************************************************************/ +my_bool BJNX::LocateObject(PGLOBAL g, PBPR jobp) { + size_t m; + + if (Jp->WriteChr('.')) + return true; + + m = Jp->N; + + for (PBPR pair = jobp; pair && !Found; pair = MPP(pair->Next)) { + Jp->N = m; + + if (Jp->WriteStr(MZP(pair->Key))) + return true; + + if (LocateValue(g, MVP(pair->Vlp))) + return true; + + } // endfor i + + return false; +} // end of LocateObject + +/*********************************************************************************/ +/* Locate a JSON Value. */ +/*********************************************************************************/ +my_bool BJNX::LocateValue(PGLOBAL g, PBVAL jvp) +{ + if (CompareTree(g, Bvalp, jvp)) + Found = (--K == 0); + else if (jvp->Type == TYPE_JAR) + return LocateArray(g, GetArray(jvp)); + else if (jvp->Type == TYPE_JOB) + return LocateObject(g, GetObject(jvp)); + + return false; +} // end of LocateValue + +/*********************************************************************************/ +/* Locate all occurrences of a value in a JSON tree: */ +/*********************************************************************************/ +PSZ BJNX::LocateAll(PGLOBAL g, PBVAL jsp, PBVAL bvp, int mx) +{ + PSZ str = NULL; + my_bool b = false, err = true; + PJPN jnp; + + if (!jsp) { + strcpy(g->Message, "Null json tree"); + return NULL; + } // endif jsp + + try { + jnp = (PJPN)PlugSubAlloc(g, NULL, sizeof(JPN) * mx); + memset(jnp, 0, sizeof(JPN) * mx); + g->Message[0] = 0; + + // Write to the path string + Jp = new(g)JOUTSTR(g); + Bvalp = bvp; + Imax = mx - 1; + Jpnp = jnp; + Jp->WriteChr('['); + + switch (jsp->Type) { + case TYPE_JAR: + err = LocateArrayAll(g, MVP(jsp->To_Val)); + break; + case TYPE_JOB: + err = LocateObjectAll(g, MPP(jsp->To_Val)); + break; + case TYPE_JVAL: + err = LocateValueAll(g, MVP(jsp->To_Val)); + break; + default: + err = LocateValueAll(g, jsp); + } // endswitch Type + + if (!err) { + if (Jp->N > 1) + Jp->N--; + + Jp->WriteChr(']'); + Jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, Jp->N); + str = Jp->Strp; + } else if (!g->Message[0]) + strcpy(g->Message, "Invalid json tree"); + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + } catch (const char* msg) { + strcpy(g->Message, msg); + } // end catch + + return str; +} // end of LocateAll + +/*********************************************************************************/ +/* Locate in a JSON Array. */ +/*********************************************************************************/ +my_bool BJNX::LocateArrayAll(PGLOBAL g, PBVAL jarp) +{ + int i = 0; + + if (I < Imax) { + Jpnp[++I].Type = TYPE_JAR; + + for (PBVAL vp = jarp; vp; vp = MVP(vp->Next)) { + Jpnp[I].N = i; + + if (LocateValueAll(g, GetArrayValue(jarp, i))) + return true; + + i++; + } // endfor i + + I--; + } // endif I + + return false; +} // end of LocateArrayAll + +/*********************************************************************************/ +/* Locate in a JSON Object. */ +/*********************************************************************************/ +my_bool BJNX::LocateObjectAll(PGLOBAL g, PBPR jobp) +{ + if (I < Imax) { + Jpnp[++I].Type = TYPE_JOB; + + for (PBPR pair = jobp; pair; pair = MPP(pair->Next)) { + Jpnp[I].Key = MZP(pair->Key); + + if (LocateValueAll(g, MVP(pair->Vlp))) + return true; + + } // endfor i + + I--; + } // endif I + + return false; +} // end of LocateObjectAll + +/*********************************************************************************/ +/* Locate a JSON Value. */ +/*********************************************************************************/ +my_bool BJNX::LocateValueAll(PGLOBAL g, PBVAL jvp) { + if (CompareTree(g, Bvalp, jvp)) + return AddPath(); + else if (jvp->Type == TYPE_JAR) + return LocateArrayAll(g, GetArray(jvp)); + else if (jvp->Type == TYPE_JOB) + return LocateObjectAll(g, GetObject(jvp)); + + return false; +} // end of LocateValueAll + +/*********************************************************************************/ +/* Compare two JSON trees. */ +/*********************************************************************************/ +my_bool BJNX::CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2) +{ + if (!jp1 || !jp2 || jp1->Type != jp2->Type || GetSize(jp1) != GetSize(jp2)) + return false; + + my_bool found = true; + + if (jp1->Type == TYPE_JAR) { + for (int i = 0; found && i < GetArraySize(jp1); i++) + found = (CompareValues(g, GetArrayValue(jp1, i), GetArrayValue(jp2, i))); + + } else if (jp1->Type == TYPE_JOB) { + PBPR p1 = MPP(jp1->To_Val), p2 = MPP(jp2->To_Val); + + // Keys can be differently ordered + for (; found && p1 && p2; p1 = MPP(p1->Next)) + found = CompareValues(g, MVP(p1->Vlp), GetKeyValue(p2, MZP(p1->Key))); + + } else if (jp1->Type == TYPE_JVAL) { + found = CompareTree(g, MVP(jp1->To_Val), (MVP(jp2->To_Val))); + } else + found = CompareValues(g, jp1, jp2); + + return found; +} // end of CompareTree + +/*********************************************************************************/ +/* Compare two VAL values and return true if they are equal. */ +/*********************************************************************************/ +my_bool BJNX::CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2) +{ + my_bool b = false; + + if (v1 && v2) + switch (v1->Type) { + case TYPE_JAR: + if (v2->Type == TYPE_JAR) + b = CompareTree(g, MVP(v1->To_Val), MVP(v2->To_Val)); + + break; + case TYPE_STRG: + if (v2->Type == TYPE_STRG) { + if (v1->Nd || v2->Nd) // Case insensitive + b = (!stricmp(MZP(v1->To_Val), MZP(v2->To_Val))); + else + b = (!strcmp(MZP(v1->To_Val), MZP(v2->To_Val))); + + } // endif Type + + break; + case TYPE_DTM: + if (v2->Type == TYPE_DTM) + b = (!strcmp(MZP(v1->To_Val), MZP(v2->To_Val))); + + break; + case TYPE_INTG: + if (v2->Type == TYPE_INTG) + b = (v1->N == v2->N); + else if (v2->Type == TYPE_BINT) + b = ((longlong)v1->N == LLN(v2->To_Val)); + + break; + case TYPE_BINT: + if (v2->Type == TYPE_INTG) + b = (LLN(v1->To_Val) == (longlong)v2->N); + else if (v2->Type == TYPE_BINT) + b = (LLN(v1->To_Val) == LLN(v2->To_Val)); + + break; + case TYPE_FLOAT: + if (v2->Type == TYPE_FLOAT) + b = (v1->F == v2->F); + else if (v2->Type == TYPE_DBL) + b = ((double)v1->F == DBL(v2->To_Val)); + + break; + case TYPE_DBL: + if (v2->Type == TYPE_DBL) + b = (DBL(v1->To_Val) == DBL(v2->To_Val)); + else if (v2->Type == TYPE_FLOAT) + b = (DBL(v1->To_Val) == (double)v2->F); + + break; + case TYPE_BOOL: + if (v2->Type == TYPE_BOOL) + b = (v1->B == v2->B); + + break; + case TYPE_NULL: + b = (v2->Type == TYPE_NULL); + break; + default: + break; + } // endswitch Type + + else + b = (!v1 && !v2); + + return b; +} // end of CompareValues + +/*********************************************************************************/ +/* Add the found path to the list. */ +/*********************************************************************************/ +my_bool BJNX::AddPath(void) { + char s[16]; + + if (Jp->WriteStr("\"$")) + return true; + + for (int i = 0; i <= I; i++) { + if (Jpnp[i].Type == TYPE_JAR) { + sprintf(s, "[%d]", Jpnp[i].N + B); + + if (Jp->WriteStr(s)) + return true; + + } else { + if (Jp->WriteChr('.')) + return true; + + if (Jp->WriteStr(Jpnp[i].Key)) + return true; + + } // endif's + + } // endfor i + + if (Jp->WriteStr("\",")) + return true; + + return false; +} // end of AddPath + +/*********************************************************************************/ +/* Make a BVAL value from the passed argument. */ +/*********************************************************************************/ +static PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) { + char* sap = (args->arg_count > i) ? args->args[i] : NULL; + int n, len; + int ci; + longlong bigint; + void* Base = g->Sarea; // Required by MOF + BDOC doc(Base); + PBVAL bp; + PBVAL bvp = doc.SubAllocVal(g); + + if (sap) switch (args->arg_type[i]) { + case STRING_RESULT: + if ((len = args->lengths[i])) { + if ((n = IsJson(args, i)) < 3) + sap = MakePSZ(g, args, i); + + if (n) { + if (n == 2) { + if (!(sap = GetJsonFile(g, sap))) { + PUSH_WARNING(g->Message); + return NULL; + } // endif sap + + len = strlen(sap); + } // endif 2 + + if (!(bp = doc.ParseJson(g, sap, strlen(sap)))) + PUSH_WARNING(g->Message); + + bvp = bp; + } else { + // Check whether this string is a valid json string + JsonMemSave(g); + + if (!(bvp = doc.ParseJson(g, sap, strlen(sap)))) { + // Recover suballocated memory + JsonSubSet(g); + ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; + bvp = doc.SubAllocVal(g, MOF(sap), TYPE_STRG, ci); + } else + g->Saved_Size = 0; + + } // endif n + + } // endif len + + break; + case INT_RESULT: + bigint = *(longlong*)sap; + + if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) || + (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) + doc.SetBool(bvp, (bool)bigint); + else + doc.SetBigint(g, bvp, bigint); + + break; + case REAL_RESULT: + doc.SetFloat(bvp, *(double*)sap); + break; + case DECIMAL_RESULT: + doc.SetFloat(bvp, atof(MakePSZ(g, args, i))); + break; + case TIME_RESULT: + case ROW_RESULT: + default: + bvp = NULL; + break; + } // endswitch arg_type + + return bvp; +} // end of MakeBinValue + +/*********************************************************************************/ +/* Test BJSON parse and serialize. */ +/*********************************************************************************/ +my_bool json_test_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen, more = 1000; + + if (args->arg_count == 0) { + strcpy(message, "At least 1 argument required (json)"); + return true; + } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of json_test_bson_init + +char* json_test_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) { + char* str = NULL, * sap = NULL, * fn = NULL; + int pretty = 1; + PBVAL bvp; + PGLOBAL g = (PGLOBAL)initid->ptr; + BDOC doc(g); + + if (g->N) { + str = (char*)g->Activityp; + goto err; + } else if (initid->const_item) + g->N = 1; + + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, !g->Xchk)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else if (!(bvp = MakeBinValue(g, args, 0))) { + PUSH_WARNING(g->Message); + goto err; + } // endif bvp + + if (g->Mrr) { // First argument is a constant + g->Xchk = bvp; + JsonMemSave(g); + } // endif Mrr + + } else + bvp = (PBVAL)g->Xchk; + + for (uint i = 1; i < args->arg_count; i++) + if (args->arg_type[i] == STRING_RESULT) + fn = args->args[i]; + else if (args->arg_type[i] == INT_RESULT) + pretty = (int)*(longlong*)args->args[i]; + + // Serialize the parse tree + str = doc.Serialize(g, bvp, fn, pretty); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; + + } catch (int n) { + xtrc(1, "json_test_bson: error %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + str = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + str = NULL; + } // end catch + +err: + if (!str) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(str); + + return str; +} // end of json_test_bson + +void json_test_bson_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of json_test_bson_deinit + +/*********************************************************************************/ +/* Locate a value in a Json tree. */ +/*********************************************************************************/ +my_bool jsonlocate_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen, more = 1000; + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third argument is not an integer (rank)"); + return true; + } // endifs args + + CalcLen(args, false, reslen, memlen); + + // TODO: calculate this + if (IsJson(args, 0) == 3) + more = 0; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of jsonlocate_bson_init + +char* jsonlocate_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) { + char* path = NULL; + int k; + PBVAL bvp, bvp2; + PBJNX bnxp; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + if (g->Activityp) { + path = (char*)g->Activityp; + *res_length = strlen(path); + return path; + } else { + *res_length = 0; + *is_null = 1; + return NULL; + } // endif Activityp + + } else if (initid->const_item) + g->N = 1; + + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, !g->Xchk)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else + bvp = MakeBinValue(g, args, 0); + + if (!bvp) { + PUSH_WARNING("First argument is not a valid JSON item"); + goto err; + } // endif bvp + + if (g->Mrr) { // First argument is a constant + g->Xchk = bvp; + JsonMemSave(g); + } // endif Mrr + + } else + bvp = (PBVAL)g->Xchk; + + // The item to locate + bvp2 = MakeBinValue(g, args, 1); + + k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1; + + bnxp = new(g) BJNX(g, bvp, TYPE_STRING); + path = bnxp->Locate(g, bvp, bvp2, k); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)path; + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } // end catch + +err: + if (!path) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(path); + + return path; +} // end of jsonlocate_bson + +void jsonlocate_bson_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of jsonlocate_bson_deinit + +/*********************************************************************************/ +/* Locate all occurences of a value in a Json tree. */ +/*********************************************************************************/ +my_bool json_locate_all_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) +{ + unsigned long reslen, memlen, more = 1000; + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third argument is not an integer (Depth)"); + return true; + } // endifs + + CalcLen(args, false, reslen, memlen); + + // TODO: calculate this + if (IsJson(args, 0) == 3) + more = 0; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of json_locate_all_bson_init + +char* json_locate_all_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) +{ + char *path = NULL; + int mx = 10; + PBVAL bvp, bvp2; + PBJNX bnxp; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + if (g->Activityp) { + path = (char*)g->Activityp; + *res_length = strlen(path); + return path; + } else { + *error = 1; + *res_length = 0; + *is_null = 1; + return NULL; + } // endif Activityp + + } else if (initid->const_item) + g->N = 1; + + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else + bvp = MakeBinValue(g, args, 0); + + if (!bvp) { + PUSH_WARNING("First argument is not a valid JSON item"); + goto err; + } // endif bvp + + if (g->Mrr) { // First argument is a constant + g->Xchk = bvp; + JsonMemSave(g); + } // endif Mrr + + } else + bvp = (PBVAL)g->Xchk; + + // The item to locate + bvp2 = MakeBinValue(g, args, 1); + + if (args->arg_count > 2) + mx = (int)*(long long*)args->args[2]; + + bnxp = new(g) BJNX(g, bvp, TYPE_STRING); + path = bnxp->LocateAll(g, bvp, bvp2, mx); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)path; + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } // end catch + +err: + if (!path) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(path); + + return path; +} // end of json_locate_all_bson + +void json_locate_all_bson_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of json_locate_all_bson_deinit + diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h index 46bac66b607..886f380d426 100644 --- a/storage/connect/jsonudf.h +++ b/storage/connect/jsonudf.h @@ -1,7 +1,7 @@ /******************** tabjson H Declares Source Code File (.H) *******************/ -/* Name: jsonudf.h Version 1.3 */ +/* Name: jsonudf.h Version 1.4 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2015-2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2015-2020 */ /* */ /* This file contains the JSON UDF function and class declares. */ /*********************************************************************************/ @@ -15,6 +15,27 @@ #define UDF_EXEC_ARGS \ UDF_INIT*, UDF_ARGS*, char*, unsigned long*, char*, char* +// BSON size should be equal on Linux and Windows +#define BMX 255 +typedef struct BSON* PBSON; + +/***********************************************************************/ +/* Structure used to return binary json to Json UDF functions. */ +/***********************************************************************/ +struct BSON { + char Msg[BMX + 1]; + char *Filename; + PGLOBAL G; + int Pretty; + ulong Reslen; + my_bool Changed; + PJSON Top; + PJSON Jsp; + PBSON Bsp; +}; // end of struct BSON + +PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp); + /*********************************************************************************/ /* The JSON tree node. Can be an Object or an Array. */ /*********************************************************************************/ @@ -29,8 +50,6 @@ typedef struct _jnode { } JNODE, *PJNODE; typedef class JSNX *PJSNX; -typedef class JOUTPATH *PJTP; -typedef class JOUTALL *PJTA; extern "C" { DllExport my_bool jsonvalue_init(UDF_INIT*, UDF_ARGS*, char*); @@ -368,3 +387,90 @@ public: int k, recl; }; // end of class JUP + +/* --------------------------- New Testing BJSON Stuff --------------------------*/ + +typedef class BJNX* PBJNX; + +/*********************************************************************************/ +/* Class BJNX: BJSON access methods. */ +/*********************************************************************************/ +class BJNX : public BDOC { +public: + // Constructors + BJNX(PGLOBAL g, PBVAL row, int type, int len = 64, int prec = 0, my_bool wr = false); + + // Implementation + int GetPrecision(void) { return Prec; } + PVAL GetValue(void) { return Value; } + + // Methods + my_bool SetJpath(PGLOBAL g, char* path, my_bool jb = false); + my_bool ParseJpath(PGLOBAL g); + void ReadValue(PGLOBAL g); + PBVAL GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b = true); + PBVAL GetJson(PGLOBAL g); + my_bool CheckPath(PGLOBAL g); + my_bool WriteValue(PGLOBAL g, PBVAL jvalp); + char* Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k = 1); + char* LocateAll(PGLOBAL g, PBVAL jsp, PBVAL jvp, int mx = 10); + +protected: + my_bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm); + PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i); + PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n); + PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n); + PVAL MakeJson(PGLOBAL g, PBVAL bvp); + void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp); + PBVAL GetRow(PGLOBAL g); + my_bool CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2); + my_bool LocateArray(PGLOBAL g, PBVAL jarp); + my_bool LocateObject(PGLOBAL g, PBPR jobp); + my_bool LocateValue(PGLOBAL g, PBVAL jvp); + my_bool LocateArrayAll(PGLOBAL g, PBVAL jarp); + my_bool LocateObjectAll(PGLOBAL g, PBPR jobp); + my_bool LocateValueAll(PGLOBAL g, PBVAL jvp); + my_bool CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2); + my_bool AddPath(void); + + // Default constructor not to be used + BJNX(void) {} + + // Members + PBVAL Row; + PBVAL Bvalp; + PJPN Jpnp; + JOUTSTR* Jp; + JNODE* Nodes; // The intermediate objects + PVAL Value; + PVAL MulVal; // To value used by multiple column + char* Jpath; // The json path + int Buf_Type; + int Long; + int Prec; + int Nod; // The number of intermediate objects + int Xnod; // Index of multiple values + int K; // Kth item to locate + int I; // Index of JPN + int Imax; // Max number of JPN's + int B; // Index base + my_bool Xpd; // True for expandable column + my_bool Parsed; // True when parsed + my_bool Found; // Item found by locate + my_bool Wr; // Write mode + my_bool Jb; // Must return json item +}; // end of class BJNX + +extern "C" { +DllExport my_bool json_test_bson_init(UDF_INIT*, UDF_ARGS*, char*); +DllExport char* json_test_bson(UDF_EXEC_ARGS); +DllExport void json_test_bson_deinit(UDF_INIT*); + +DllExport my_bool jsonlocate_bson_init(UDF_INIT*, UDF_ARGS*, char*); +DllExport char* jsonlocate_bson(UDF_EXEC_ARGS); +DllExport void jsonlocate_bson_deinit(UDF_INIT*); + +DllExport my_bool json_locate_all_bson_init(UDF_INIT*, UDF_ARGS*, char*); +DllExport char* json_locate_all_bson(UDF_EXEC_ARGS); +DllExport void json_locate_all_bson_deinit(UDF_INIT*); +} // extern "C" diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index a9aeadd7bf4..336b0f371ca 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1596,6 +1596,7 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp) if (Value->IsTypeNum()) { strcpy(g->Message, "Cannot make Json for a numeric column"); Value->Reset(); +#if 0 } else if (Value->GetType() == TYPE_BIN) { if ((unsigned)Value->GetClen() >= sizeof(BSON)) { ulong len = Tjp->Lrecl ? Tjp->Lrecl : 500; @@ -1607,6 +1608,7 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp) strcpy(g->Message, "Column size too small"); Value->SetValue_char(NULL, 0); } // endif Clen +#endif 0 } else Value->SetValue_psz(Serialize(g, jsp, NULL, 0)); diff --git a/storage/connect/value.cpp b/storage/connect/value.cpp index bca6d26d1e9..e710fefc624 100644 --- a/storage/connect/value.cpp +++ b/storage/connect/value.cpp @@ -197,7 +197,7 @@ const char *GetFormatType(int type) case TYPE_DOUBLE: c = "F"; break; case TYPE_DATE: c = "D"; break; case TYPE_TINY: c = "T"; break; - case TYPE_DECIM: c = "M"; break; + case TYPE_DECIM: c = "F"; break; case TYPE_BIN: c = "B"; break; case TYPE_PCHAR: c = "P"; break; } // endswitch type From b656d3d333f6d8a28407e5e4b636cd142d757595 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 25 Nov 2020 17:42:01 +0100 Subject: [PATCH 030/150] Desesperatly trying to stop compiling failures --- storage/connect/bson.cpp | 20 +- storage/connect/bson.h | 2 +- storage/connect/bsonudf.cpp | 1507 +++++++++++++++++++++++++++++++++++ storage/connect/bsonudf.h | 98 +++ storage/connect/json.h | 1 + storage/connect/jsonudf.cpp | 1502 +--------------------------------- storage/connect/jsonudf.h | 121 +-- 7 files changed, 1655 insertions(+), 1596 deletions(-) create mode 100644 storage/connect/bsonudf.cpp create mode 100644 storage/connect/bsonudf.h diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index 2f380752c0d..61e5eb9fe16 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -135,10 +135,10 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { if (Bvp->Type != TYPE_UNKNOWN) { Bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); Bvp->Type = TYPE_JAR; - } else if ((Bvp->To_Val = ParseObject(g, ++i))) + } else { + Bvp->To_Val = ParseObject(g, ++i); Bvp->Type = TYPE_JOB; - else - throw 2; + } // endif Type break; case ' ': @@ -300,7 +300,7 @@ OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { } else firstbpp = lastbpp = bpp; - level = 1; + level = 2; } else { sprintf(g->Message, "misplaced string near %.*s", ARGS); throw 2; @@ -308,9 +308,9 @@ OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { break; case ':': - if (level == 1) { + if (level == 2) { lastbpp->Vlp = MOF(ParseValue(g, ++i)); - level = 2; + level = 3; } else { sprintf(g->Message, "Unexpected ':' near %.*s", ARGS); throw 2; @@ -318,15 +318,15 @@ OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { break; case ',': - if (level < 2) { + if (level < 3) { sprintf(g->Message, "Unexpected ',' near %.*s", ARGS); throw 2; } else - level = 0; + level = 1; break; case '}': - if (level < 2) { + if (!(level == 0 || level == 3)) { sprintf(g->Message, "Unexpected '}' near %.*s", ARGS); throw 2; } // endif level @@ -1248,7 +1248,7 @@ PBVAL BJSON::SubAllocVal(PGLOBAL g) /***********************************************************************/ /* Sub-allocate and initialize a BVAL as string. */ /***********************************************************************/ -PBVAL BJSON::SubAllocVal(PGLOBAL g, OFFSET toval, JTYP type, short nd) +PBVAL BJSON::SubAllocVal(PGLOBAL g, OFFSET toval, int type, short nd) { PBVAL bvp = (PBVAL)BsonSubAlloc(g, sizeof(BVAL)); diff --git a/storage/connect/bson.h b/storage/connect/bson.h index 9f5f44d073c..bffda8ea316 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -77,7 +77,7 @@ public: void* BsonSubAlloc(PGLOBAL g, size_t size); PBPR SubAllocPair(PGLOBAL g, OFFSET key, OFFSET val = 0); PBVAL SubAllocVal(PGLOBAL g); - PBVAL SubAllocVal(PGLOBAL g, OFFSET toval, JTYP type = TYPE_UNKNOWN, short nd = 0); + PBVAL SubAllocVal(PGLOBAL g, OFFSET toval, int type = TYPE_UNKNOWN, short nd = 0); PBVAL SubAllocVal(PGLOBAL g, PVAL valp); PBVAL DupVal(PGLOBAL g, PBVAL bvp); diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp new file mode 100644 index 00000000000..bbb279ce6ce --- /dev/null +++ b/storage/connect/bsonudf.cpp @@ -0,0 +1,1507 @@ +/****************** bsonudf C++ Program Source Code File (.CPP) ******************/ +/* PROGRAM NAME: bsonudf Version 1.0 */ +/* (C) Copyright to the author Olivier BERTRAND 2020 */ +/* This program are the BSON User Defined Functions . */ +/*********************************************************************************/ + +/*********************************************************************************/ +/* Include relevant sections of the MariaDB header file. */ +/*********************************************************************************/ +#include +#include +#include +#include +#include + +#include "bsonudf.h" + +#if defined(UNIX) || defined(UNIV_LINUX) +#define _O_RDONLY O_RDONLY +#endif + +#define MEMFIX 4096 +#if defined(connect_EXPORTS) +#define PUSH_WARNING(M) push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, 0, M) +#else +#define PUSH_WARNING(M) htrc(M) +#endif +#define M 9 + +/* --------------------------------- JSON UDF ---------------------------------- */ + +/*********************************************************************************/ +/* Program for saving the status of the memory pools. */ +/*********************************************************************************/ +inline void JsonMemSave(PGLOBAL g) { + g->Saved_Size = ((PPOOLHEADER)g->Sarea)->To_Free; +} /* end of JsonMemSave */ + +/*********************************************************************************/ +/* Program for freeing the memory pools. */ +/*********************************************************************************/ +inline void JsonFreeMem(PGLOBAL g) { + g->Activityp = NULL; + PlugExit(g); +} /* end of JsonFreeMem */ + +/* --------------------------- New Testing BJSON Stuff --------------------------*/ + +/*********************************************************************************/ +/* SubAlloc a new BJNX class with protection against memory exhaustion. */ +/*********************************************************************************/ +static PBJNX BjnxNew(PGLOBAL g, PBVAL vlp, int type, int len) { + PBJNX bjnx; + + try { + bjnx = new(g) BJNX(g, vlp, type, len); + } catch (...) { + if (trace(1023)) + htrc("%s\n", g->Message); + + PUSH_WARNING(g->Message); + bjnx = NULL; + } // end try/catch + + return bjnx; +} /* end of BjnxNew */ + +/* ----------------------------------- BSNX ------------------------------------ */ + +/*********************************************************************************/ +/* BSNX public constructor. */ +/*********************************************************************************/ +BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) + : BDOC(g->Sarea) { + Row = row; + Bvalp = NULL; + Jpnp = NULL; + Jp = NULL; + Nodes = NULL; + Value = AllocateValue(g, type, len, prec); + MulVal = NULL; + Jpath = NULL; + Buf_Type = type; + Long = len; + Prec = prec; + Nod = 0; + Xnod = -1; + K = 0; + I = -1; + Imax = 9; + B = 0; + Xpd = false; + Parsed = false; + Found = false; + Wr = wr; + Jb = false; +} // end of BJNX constructor + +/*********************************************************************************/ +/* SetJpath: set and parse the json path. */ +/*********************************************************************************/ +my_bool BJNX::SetJpath(PGLOBAL g, char* path, my_bool jb) { + // Check Value was allocated + if (!Value) + return true; + + Value->SetNullable(true); + Jpath = path; + + // Parse the json path + Parsed = false; + Nod = 0; + Jb = jb; + return ParseJpath(g); +} // end of SetJpath + +/*********************************************************************************/ +/* Analyse array processing options. */ +/*********************************************************************************/ +my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) { + int n = (int)strlen(p); + my_bool dg = true, b = false; + PJNODE jnp = &Nodes[i]; + + if (*p) { + if (p[n - 1] == ']') { + p[--n] = 0; + } else if (!IsNum(p)) { + // Wrong array specification + sprintf(g->Message, "Invalid array specification %s", p); + return true; + } // endif p + + } else + b = true; + + // To check whether a numeric Rank was specified + dg = IsNum(p); + + if (!n) { + // Default specifications + if (jnp->Op != OP_EXP) { + if (Wr) { + // Force append + jnp->Rank = INT_MAX32; + jnp->Op = OP_LE; + } else if (Jb) { + // Return a Json item + jnp->Op = OP_XX; + } else if (b) { + // Return 1st value (B is the index base) + jnp->Rank = B; + jnp->Op = OP_LE; + } else if (!Value->IsTypeNum()) { + jnp->CncVal = AllocateValue(g, PlugDup(g, ", "), TYPE_STRING); + jnp->Op = OP_CNC; + } else + jnp->Op = OP_ADD; + + } // endif OP + + } else if (dg) { + // Return nth value + jnp->Rank = atoi(p) - B; + jnp->Op = OP_EQ; + } else if (Wr) { + sprintf(g->Message, "Invalid specification %s in a write path", p); + return true; + } else if (n == 1) { + // Set the Op value; + switch (*p) { + case '+': jnp->Op = OP_ADD; break; + case 'x': jnp->Op = OP_MULT; break; + case '>': jnp->Op = OP_MAX; break; + case '<': jnp->Op = OP_MIN; break; + case '!': jnp->Op = OP_SEP; break; // Average + case '#': jnp->Op = OP_NUM; break; + case '*': // Expand this array + strcpy(g->Message, "Expand not supported by this function"); + return true; + default: + sprintf(g->Message, "Invalid function specification %c", *p); + return true; + } // endswitch *p + + } else if (*p == '"' && p[n - 1] == '"') { + // This is a concat specification + jnp->Op = OP_CNC; + + if (n > 2) { + // Set concat intermediate string + p[n - 1] = 0; + + if (trace(1)) + htrc("Concat string=%s\n", p + 1); + + jnp->CncVal = AllocateValue(g, p + 1, TYPE_STRING); + } // endif n + + } else { + strcpy(g->Message, "Wrong array specification"); + return true; + } // endif's + + // For calculated arrays, a local Value must be used + switch (jnp->Op) { + case OP_NUM: + jnp->Valp = AllocateValue(g, TYPE_INT); + break; + case OP_ADD: + case OP_MULT: + case OP_SEP: + if (!IsTypeChar(Buf_Type)) + jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision()); + else + jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2); + + break; + case OP_MIN: + case OP_MAX: + jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision()); + break; + case OP_CNC: + if (IsTypeChar(Buf_Type)) + jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision()); + else + jnp->Valp = AllocateValue(g, TYPE_STRING, 512); + + break; + default: + break; + } // endswitch Op + + if (jnp->Valp) + MulVal = AllocateValue(g, jnp->Valp); + + return false; +} // end of SetArrayOptions + +/*********************************************************************************/ +/* Parse the eventual passed Jpath information. */ +/* This information can be specified in the Fieldfmt column option when */ +/* creating the table. It permits to indicate the position of the node */ +/* corresponding to that column. */ +/*********************************************************************************/ +my_bool BJNX::ParseJpath(PGLOBAL g) { + char* p, * p1 = NULL, * p2 = NULL, * pbuf = NULL; + int i; + my_bool a, mul = false; + + if (Parsed) + return false; // Already done + else if (!Jpath) + // Jpath = Name; + return true; + + if (trace(1)) + htrc("ParseJpath %s\n", SVP(Jpath)); + + if (!(pbuf = PlgDBDup(g, Jpath))) + return true; + + if (*pbuf == '$') pbuf++; + if (*pbuf == '.') pbuf++; + if (*pbuf == '[') p1 = pbuf++; + + // Estimate the required number of nodes + for (i = 0, p = pbuf; (p = NextChr(p, '.')); i++, p++) + Nod++; // One path node found + + if (!(Nodes = (PJNODE)PlgDBSubAlloc(g, NULL, (++Nod) * sizeof(JNODE)))) + return true; + + memset(Nodes, 0, (Nod) * sizeof(JNODE)); + + // Analyze the Jpath for this column + for (i = 0, p = pbuf; p && i < Nod; i++, p = (p2 ? p2 : NULL)) { + a = (p1 != NULL); + p1 = strchr(p, '['); + p2 = strchr(p, '.'); + + if (!p2) + p2 = p1; + else if (p1) { + if (p1 < p2) + p2 = p1; + else if (p1 == p2 + 1) + *p2++ = 0; // Old syntax .[ + else + p1 = NULL; + + } // endif p1 + + if (p2) + *p2++ = 0; + + // Jpath must be explicit + if (a || *p == 0 || *p == '[' || IsNum(p)) { + // Analyse intermediate array processing + if (SetArrayOptions(g, p, i, Nodes[i - 1].Key)) + return true; + + } else if (*p == '*') { + if (Wr) { + sprintf(g->Message, "Invalid specification %c in a write path", *p); + return true; + } else // Return JSON + Nodes[i].Op = OP_XX; + + } else { + Nodes[i].Key = p; + Nodes[i].Op = OP_EXIST; + } // endif's + + } // endfor i, p + + Nod = i; + MulVal = AllocateValue(g, Value); + + if (trace(1)) + for (i = 0; i < Nod; i++) + htrc("Node(%d) Key=%s Op=%d Rank=%d\n", + i, SVP(Nodes[i].Key), Nodes[i].Op, Nodes[i].Rank); + + Parsed = true; + return false; +} // end of ParseJpath + +/*********************************************************************************/ +/* MakeJson: Serialize the json item and set value to it. */ +/*********************************************************************************/ +PVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp) { + if (Value->IsTypeNum()) { + strcpy(g->Message, "Cannot make Json for a numeric value"); + Value->Reset(); + } else if (bvp->Type != TYPE_JAR && bvp->Type != TYPE_JOB) { + strcpy(g->Message, "Target is not an array or object"); + Value->Reset(); + } else + Value->SetValue_psz(Serialize(g, bvp, NULL, 0)); + + return Value; +} // end of MakeJson + +/*********************************************************************************/ +/* SetValue: Set a value from a JVALUE contains. */ +/*********************************************************************************/ +void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) { + if (vlp) { + vp->SetNull(false); + + if (Jb) { + vp->SetValue_psz(Serialize(g, vlp, NULL, 0)); + } else switch (vlp->Type) { + case TYPE_DTM: + case TYPE_STRG: + vp->SetValue_psz(GetString(g, vlp)); + break; + case TYPE_INTG: + case TYPE_BINT: + vp->SetValue(GetInteger(vlp)); + break; + case TYPE_DBL: + if (vp->IsTypeNum()) + vp->SetValue(GetDouble(vlp)); + else // Get the proper number of decimals + vp->SetValue_psz(GetString(g, vlp)); + + break; + case TYPE_BOOL: + if (vp->IsTypeNum()) + vp->SetValue(GetInteger(vlp) ? 1 : 0); + else + vp->SetValue_psz(GetString(g, vlp)); + + break; + case TYPE_JAR: + vp->SetValue_psz(GetArrayText(g, MVP(vlp->To_Val), NULL)); + break; + case TYPE_JOB: + vp->SetValue_psz(GetObjectText(g, MPP(vlp->To_Val), NULL)); + break; + case TYPE_NULL: + vp->SetNull(true); + default: + vp->Reset(); + } // endswitch Type + + } else { + vp->SetNull(true); + vp->Reset(); + } // endif val + +} // end of SetJsonValue + +/*********************************************************************************/ +/* GetJson: */ +/*********************************************************************************/ +PBVAL BJNX::GetJson(PGLOBAL g) { + return GetRowValue(g, Row, 0); +} // end of GetJson + +/*********************************************************************************/ +/* ReadValue: */ +/*********************************************************************************/ +void BJNX::ReadValue(PGLOBAL g) { + Value->SetValue_pval(GetColumnValue(g, Row, 0)); +} // end of ReadValue + +/*********************************************************************************/ +/* GetColumnValue: */ +/*********************************************************************************/ +PVAL BJNX::GetColumnValue(PGLOBAL g, PBVAL row, int i) { + PBVAL vlp = GetRowValue(g, row, i); + + SetJsonValue(g, Value, vlp); + return Value; +} // end of GetColumnValue + +/*********************************************************************************/ +/* GetRowValue: */ +/*********************************************************************************/ +PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) { + my_bool expd = false; + PBVAL bap; + PBVAL vlp = NULL; + + for (; i < Nod && row; i++) { + if (Nodes[i].Op == OP_NUM) { + Value->SetValue(row->Type == TYPE_JAR ? GetArraySize(MVP(row->To_Val)) : 1); + vlp = SubAllocVal(g, Value); + return vlp; + } else if (Nodes[i].Op == OP_XX) { + Jb = b; + // return DupVal(g, row); + return row; // or last line ??? + } else switch (row->Type) { + case TYPE_JOB: + if (!Nodes[i].Key) { + // Expected Array was not there + if (Nodes[i].Op == OP_LE) { + if (i < Nod - 1) + continue; + else + vlp = row; // DupVal(g, row) ??? + + } else { + strcpy(g->Message, "Unexpected object"); + vlp = NULL; + } //endif Op + + } else + vlp = GetKeyValue(MPP(row->To_Val), Nodes[i].Key); + + break; + case TYPE_JAR: + bap = MVP(row->To_Val); + + if (!Nodes[i].Key) { + if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) + vlp = GetArrayValue(bap, Nodes[i].Rank); + else if (Nodes[i].Op == OP_EXP) + return (PBVAL)ExpandArray(g, bap, i); + else + return SubAllocVal(g, CalculateArray(g, bap, i)); + + } else { + // Unexpected array, unwrap it as [0] + vlp = GetArrayValue(bap, 0); + i--; + } // endif's + + break; + case TYPE_JVAL: + vlp = row; + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + vlp = NULL; + } // endswitch Type + + row = vlp; + } // endfor i + + return vlp; +} // end of GetRowValue + +/*********************************************************************************/ +/* ExpandArray: */ +/*********************************************************************************/ +PVAL BJNX::ExpandArray(PGLOBAL g, PBVAL arp, int n) { + strcpy(g->Message, "Expand cannot be done by this function"); + return NULL; +} // end of ExpandArray + +/*********************************************************************************/ +/* CalculateArray: NIY */ +/*********************************************************************************/ +PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) { +#if 0 + int i, ars = GetArraySize(bap), nv = 0; + bool err; + OPVAL op = Nodes[n].Op; + PVAL val[2], vp = Nodes[n].Valp; + PBVAL bvrp, bvp; + BVAL bval; + + vp->Reset(); + xtrc(1, "CalculateArray size=%d op=%d\n", ars, op); + + for (i = 0; i < ars; i++) { + bvrp = GetArrayValue(bap, i); + xtrc(1, "i=%d nv=%d\n", i, nv); + + if (!IsValueNull(bvrp) || (op == OP_CNC && GetJsonNull())) { + if (IsValueNull(bvrp)) { + SetString(bvrp, GetJsonNull(), 0); + bvp = bvrp; + } else if (n < Nod - 1 && bvrp->GetJson()) { + bval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1)); + bvp = &bval; + } else + jvp = jvrp; + + if (trace(1)) + htrc("jvp=%s null=%d\n", + jvp->GetString(g), jvp->IsNull() ? 1 : 0); + + if (!nv++) { + SetJsonValue(g, vp, jvp); + continue; + } else + SetJsonValue(g, MulVal, jvp); + + if (!MulVal->IsNull()) { + switch (op) { + case OP_CNC: + if (Nodes[n].CncVal) { + val[0] = Nodes[n].CncVal; + err = vp->Compute(g, val, 1, op); + } // endif CncVal + + val[0] = MulVal; + err = vp->Compute(g, val, 1, op); + break; + // case OP_NUM: + case OP_SEP: + val[0] = Nodes[n].Valp; + val[1] = MulVal; + err = vp->Compute(g, val, 2, OP_ADD); + break; + default: + val[0] = Nodes[n].Valp; + val[1] = MulVal; + err = vp->Compute(g, val, 2, op); + } // endswitch Op + + if (err) + vp->Reset(); + + if (trace(1)) { + char buf(32); + + htrc("vp='%s' err=%d\n", + vp->GetCharString(&buf), err ? 1 : 0); + } // endif trace + + } // endif Zero + + } // endif jvrp + + } // endfor i + + if (op == OP_SEP) { + // Calculate average + MulVal->SetValue(nv); + val[0] = vp; + val[1] = MulVal; + + if (vp->Compute(g, val, 2, OP_DIV)) + vp->Reset(); + + } // endif Op + + return vp; +#else + strcpy(g->Message, "Calculate array NIY"); + return NULL; +#endif +} // end of CalculateArray + +/*********************************************************************************/ +/* CheckPath: Checks whether the path exists in the document. */ +/*********************************************************************************/ +my_bool BJNX::CheckPath(PGLOBAL g) { + PBVAL val = NULL; + PBVAL row = Row; + + for (int i = 0; i < Nod && row; i++) { + val = NULL; + + if (Nodes[i].Op == OP_NUM || Nodes[i].Op == OP_XX) { + } else switch (row->Type) { + case TYPE_JOB: + if (Nodes[i].Key) + val = GetKeyValue(MPP(row->To_Val), Nodes[i].Key); + + break; + case TYPE_JAR: + if (!Nodes[i].Key) + if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) + val = GetArrayValue(MVP(row->To_Val), Nodes[i].Rank); + + break; + case TYPE_JVAL: + val = MVP(row->To_Val); + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + } // endswitch Type + +// if (i < Nod - 1) +// if (!(row = (val) ? val->GetJsp() : NULL)) +// val = NULL; + + row = val; + } // endfor i + + return (val != NULL); +} // end of CheckPath + +/***********************************************************************/ +/* GetRow: Set the complete path of the object to be set. */ +/***********************************************************************/ +PBVAL BJNX::GetRow(PGLOBAL g) { + PBVAL val = NULL; + PBVAL arp; + PBVAL nwr, row = Row; + + for (int i = 0; i < Nod - 1 && row; i++) { + if (Nodes[i].Op == OP_XX) + break; + else switch (row->Type) { + case TYPE_JOB: + if (!Nodes[i].Key) + // Expected Array was not there, wrap the value + continue; + + val = GetKeyValue(MPP(row->To_Val), Nodes[i].Key); + break; + case TYPE_JAR: + arp = MVP(row->To_Val); + + if (!Nodes[i].Key) { + if (Nodes[i].Op == OP_EQ) + val = GetArrayValue(arp, Nodes[i].Rank); + else + val = GetArrayValue(arp, Nodes[i].Rx); + + } else { + // Unexpected array, unwrap it as [0] + val = GetArrayValue(arp, 0); + i--; + } // endif Nodes + + break; + case TYPE_JVAL: + val = MVP(row->To_Val); + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + val = NULL; + } // endswitch Type + + if (val) { + row = val; + } else { + // Construct missing objects + for (i++; row && i < Nod; i++) { + if (Nodes[i].Op == OP_XX) + break; + // else if (!Nodes[i].Key) + // Construct intermediate array + // nwr = SubAllocVal(g); + // else + // nwr = SubAllocPair(g); + + // Construct new row + nwr = SubAllocVal(g); + + if (row->Type == TYPE_JOB) { + SetKeyValue(g, MPP(row->To_Val), MOF(nwr), Nodes[i - 1].Key); + } else if (row->Type == TYPE_JAR) { + AddArrayValue(g, MVP(row->To_Val), nwr); + } else { + strcpy(g->Message, "Wrong type when writing new row"); + nwr = NULL; + } // endif's + + row = nwr; + } // endfor i + + break; + } // endelse + + } // endfor i + + return row; +} // end of GetRow + +/***********************************************************************/ +/* WriteValue: */ +/***********************************************************************/ +my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) { + PBPR objp = NULL; + PBVAL arp = NULL; + PBVAL jvp = NULL; + PBVAL row = GetRow(g); + + if (!row) + return true; + + switch (row->Type) { + case TYPE_JOB: objp = MPP(row->To_Val); break; + case TYPE_JAR: arp = MVP(row->To_Val); break; + case TYPE_JVAL: jvp = MVP(row->To_Val); break; + default: + strcpy(g->Message, "Invalid target type"); + return true; + } // endswitch Type + + if (arp) { + if (!Nodes[Nod - 1].Key) { + if (Nodes[Nod - 1].Op == OP_EQ) + SetArrayValue(g, arp, jvalp, Nodes[Nod - 1].Rank); + else + AddArrayValue(g, arp, jvalp); + + } // endif Key + + } else if (objp) { + if (Nodes[Nod - 1].Key) + SetKeyValue(g, objp, MOF(jvalp), Nodes[Nod - 1].Key); + + } else if (jvp) + SetValueVal(jvp, jvalp); + + return false; +} // end of WriteValue + +/*********************************************************************************/ +/* Locate a value in a JSON tree: */ +/*********************************************************************************/ +PSZ BJNX::Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k) { + PSZ str = NULL; + my_bool b = false, err = true; + + g->Message[0] = 0; + + if (!jsp) { + strcpy(g->Message, "Null json tree"); + return NULL; + } // endif jsp + + try { + // Write to the path string + Jp = new(g) JOUTSTR(g); + Jp->WriteChr('$'); + Bvalp = jvp; + K = k; + + switch (jsp->Type) { + case TYPE_JAR: + err = LocateArray(g, MVP(jsp->To_Val)); + break; + case TYPE_JOB: + err = LocateObject(g, MPP(jsp->To_Val)); + break; + case TYPE_JVAL: + err = LocateValue(g, MVP(jsp->To_Val)); + break; + default: + err = true; + } // endswitch Type + + if (err) { + if (!g->Message[0]) + strcpy(g->Message, "Invalid json tree"); + + } else if (Found) { + Jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, Jp->N); + str = Jp->Strp; + } // endif's + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + + PUSH_WARNING(g->Message); + } catch (const char* msg) { + strcpy(g->Message, msg); + } // end catch + + return str; +} // end of Locate + +/*********************************************************************************/ +/* Locate in a JSON Array. */ +/*********************************************************************************/ +my_bool BJNX::LocateArray(PGLOBAL g, PBVAL jarp) { + char s[16]; + int n = GetArraySize(jarp); + size_t m = Jp->N; + + for (int i = 0; i < n && !Found; i++) { + Jp->N = m; + sprintf(s, "[%d]", i + B); + + if (Jp->WriteStr(s)) + return true; + + if (LocateValue(g, GetArrayValue(jarp, i))) + return true; + + } // endfor i + + return false; +} // end of LocateArray + +/*********************************************************************************/ +/* Locate in a JSON Object. */ +/*********************************************************************************/ +my_bool BJNX::LocateObject(PGLOBAL g, PBPR jobp) { + size_t m; + + if (Jp->WriteChr('.')) + return true; + + m = Jp->N; + + for (PBPR pair = jobp; pair && !Found; pair = MPP(pair->Next)) { + Jp->N = m; + + if (Jp->WriteStr(MZP(pair->Key))) + return true; + + if (LocateValue(g, MVP(pair->Vlp))) + return true; + + } // endfor i + + return false; +} // end of LocateObject + +/*********************************************************************************/ +/* Locate a JSON Value. */ +/*********************************************************************************/ +my_bool BJNX::LocateValue(PGLOBAL g, PBVAL jvp) { + if (CompareTree(g, Bvalp, jvp)) + Found = (--K == 0); + else if (jvp->Type == TYPE_JAR) + return LocateArray(g, GetArray(jvp)); + else if (jvp->Type == TYPE_JOB) + return LocateObject(g, GetObject(jvp)); + + return false; +} // end of LocateValue + +/*********************************************************************************/ +/* Locate all occurrences of a value in a JSON tree: */ +/*********************************************************************************/ +PSZ BJNX::LocateAll(PGLOBAL g, PBVAL jsp, PBVAL bvp, int mx) { + PSZ str = NULL; + my_bool b = false, err = true; + PJPN jnp; + + if (!jsp) { + strcpy(g->Message, "Null json tree"); + return NULL; + } // endif jsp + + try { + jnp = (PJPN)PlugSubAlloc(g, NULL, sizeof(JPN) * mx); + memset(jnp, 0, sizeof(JPN) * mx); + g->Message[0] = 0; + + // Write to the path string + Jp = new(g)JOUTSTR(g); + Bvalp = bvp; + Imax = mx - 1; + Jpnp = jnp; + Jp->WriteChr('['); + + switch (jsp->Type) { + case TYPE_JAR: + err = LocateArrayAll(g, MVP(jsp->To_Val)); + break; + case TYPE_JOB: + err = LocateObjectAll(g, MPP(jsp->To_Val)); + break; + case TYPE_JVAL: + err = LocateValueAll(g, MVP(jsp->To_Val)); + break; + default: + err = LocateValueAll(g, jsp); + } // endswitch Type + + if (!err) { + if (Jp->N > 1) + Jp->N--; + + Jp->WriteChr(']'); + Jp->WriteChr('\0'); + PlugSubAlloc(g, NULL, Jp->N); + str = Jp->Strp; + } else if (!g->Message[0]) + strcpy(g->Message, "Invalid json tree"); + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + } catch (const char* msg) { + strcpy(g->Message, msg); + } // end catch + + return str; +} // end of LocateAll + +/*********************************************************************************/ +/* Locate in a JSON Array. */ +/*********************************************************************************/ +my_bool BJNX::LocateArrayAll(PGLOBAL g, PBVAL jarp) { + int i = 0; + + if (I < Imax) { + Jpnp[++I].Type = TYPE_JAR; + + for (PBVAL vp = jarp; vp; vp = MVP(vp->Next)) { + Jpnp[I].N = i; + + if (LocateValueAll(g, GetArrayValue(jarp, i))) + return true; + + i++; + } // endfor i + + I--; + } // endif I + + return false; +} // end of LocateArrayAll + +/*********************************************************************************/ +/* Locate in a JSON Object. */ +/*********************************************************************************/ +my_bool BJNX::LocateObjectAll(PGLOBAL g, PBPR jobp) { + if (I < Imax) { + Jpnp[++I].Type = TYPE_JOB; + + for (PBPR pair = jobp; pair; pair = MPP(pair->Next)) { + Jpnp[I].Key = MZP(pair->Key); + + if (LocateValueAll(g, MVP(pair->Vlp))) + return true; + + } // endfor i + + I--; + } // endif I + + return false; +} // end of LocateObjectAll + +/*********************************************************************************/ +/* Locate a JSON Value. */ +/*********************************************************************************/ +my_bool BJNX::LocateValueAll(PGLOBAL g, PBVAL jvp) { + if (CompareTree(g, Bvalp, jvp)) + return AddPath(); + else if (jvp->Type == TYPE_JAR) + return LocateArrayAll(g, GetArray(jvp)); + else if (jvp->Type == TYPE_JOB) + return LocateObjectAll(g, GetObject(jvp)); + + return false; +} // end of LocateValueAll + +/*********************************************************************************/ +/* Compare two JSON trees. */ +/*********************************************************************************/ +my_bool BJNX::CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2) { + if (!jp1 || !jp2 || jp1->Type != jp2->Type || GetSize(jp1) != GetSize(jp2)) + return false; + + my_bool found = true; + + if (jp1->Type == TYPE_JAR) { + for (int i = 0; found && i < GetArraySize(jp1); i++) + found = (CompareValues(g, GetArrayValue(jp1, i), GetArrayValue(jp2, i))); + + } else if (jp1->Type == TYPE_JOB) { + PBPR p1 = MPP(jp1->To_Val), p2 = MPP(jp2->To_Val); + + // Keys can be differently ordered + for (; found && p1 && p2; p1 = MPP(p1->Next)) + found = CompareValues(g, MVP(p1->Vlp), GetKeyValue(p2, MZP(p1->Key))); + + } else if (jp1->Type == TYPE_JVAL) { + found = CompareTree(g, MVP(jp1->To_Val), (MVP(jp2->To_Val))); + } else + found = CompareValues(g, jp1, jp2); + + return found; +} // end of CompareTree + +/*********************************************************************************/ +/* Compare two VAL values and return true if they are equal. */ +/*********************************************************************************/ +my_bool BJNX::CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2) { + my_bool b = false; + + if (v1 && v2) + switch (v1->Type) { + case TYPE_JAR: + if (v2->Type == TYPE_JAR) + b = CompareTree(g, MVP(v1->To_Val), MVP(v2->To_Val)); + + break; + case TYPE_STRG: + if (v2->Type == TYPE_STRG) { + if (v1->Nd || v2->Nd) // Case insensitive + b = (!stricmp(MZP(v1->To_Val), MZP(v2->To_Val))); + else + b = (!strcmp(MZP(v1->To_Val), MZP(v2->To_Val))); + + } // endif Type + + break; + case TYPE_DTM: + if (v2->Type == TYPE_DTM) + b = (!strcmp(MZP(v1->To_Val), MZP(v2->To_Val))); + + break; + case TYPE_INTG: + if (v2->Type == TYPE_INTG) + b = (v1->N == v2->N); + else if (v2->Type == TYPE_BINT) + b = ((longlong)v1->N == LLN(v2->To_Val)); + + break; + case TYPE_BINT: + if (v2->Type == TYPE_INTG) + b = (LLN(v1->To_Val) == (longlong)v2->N); + else if (v2->Type == TYPE_BINT) + b = (LLN(v1->To_Val) == LLN(v2->To_Val)); + + break; + case TYPE_FLOAT: + if (v2->Type == TYPE_FLOAT) + b = (v1->F == v2->F); + else if (v2->Type == TYPE_DBL) + b = ((double)v1->F == DBL(v2->To_Val)); + + break; + case TYPE_DBL: + if (v2->Type == TYPE_DBL) + b = (DBL(v1->To_Val) == DBL(v2->To_Val)); + else if (v2->Type == TYPE_FLOAT) + b = (DBL(v1->To_Val) == (double)v2->F); + + break; + case TYPE_BOOL: + if (v2->Type == TYPE_BOOL) + b = (v1->B == v2->B); + + break; + case TYPE_NULL: + b = (v2->Type == TYPE_NULL); + break; + default: + break; + } // endswitch Type + + else + b = (!v1 && !v2); + + return b; +} // end of CompareValues + +/*********************************************************************************/ +/* Add the found path to the list. */ +/*********************************************************************************/ +my_bool BJNX::AddPath(void) { + char s[16]; + + if (Jp->WriteStr("\"$")) + return true; + + for (int i = 0; i <= I; i++) { + if (Jpnp[i].Type == TYPE_JAR) { + sprintf(s, "[%d]", Jpnp[i].N + B); + + if (Jp->WriteStr(s)) + return true; + + } else { + if (Jp->WriteChr('.')) + return true; + + if (Jp->WriteStr(Jpnp[i].Key)) + return true; + + } // endif's + + } // endfor i + + if (Jp->WriteStr("\",")) + return true; + + return false; +} // end of AddPath + +/*********************************************************************************/ +/* Make a BVAL value from the passed argument. */ +/*********************************************************************************/ +static PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) { + char* sap = (args->arg_count > i) ? args->args[i] : NULL; + int n, len; + int ci; + longlong bigint; + void* Base = g->Sarea; // Required by MOF + BDOC doc(Base); + PBVAL bp; + PBVAL bvp = doc.SubAllocVal(g); + + if (sap) switch (args->arg_type[i]) { + case STRING_RESULT: + if ((len = args->lengths[i])) { + if ((n = IsJson(args, i)) < 3) + sap = MakePSZ(g, args, i); + + if (n) { + if (n == 2) { + if (!(sap = GetJsonFile(g, sap))) { + PUSH_WARNING(g->Message); + return NULL; + } // endif sap + + len = strlen(sap); + } // endif 2 + + if (!(bp = doc.ParseJson(g, sap, strlen(sap)))) + PUSH_WARNING(g->Message); + + bvp = bp; + } else { + // Check whether this string is a valid json string + JsonMemSave(g); + + if (!(bvp = doc.ParseJson(g, sap, strlen(sap)))) { + // Recover suballocated memory + JsonSubSet(g); + ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; + bvp = doc.SubAllocVal(g, MOF(sap), TYPE_STRG, ci); + } else + g->Saved_Size = 0; + + } // endif n + + } // endif len + + break; + case INT_RESULT: + bigint = *(longlong*)sap; + + if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) || + (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) + doc.SetBool(bvp, (bool)bigint); + else + doc.SetBigint(g, bvp, bigint); + + break; + case REAL_RESULT: + doc.SetFloat(bvp, *(double*)sap); + break; + case DECIMAL_RESULT: + doc.SetFloat(bvp, atof(MakePSZ(g, args, i))); + break; + case TIME_RESULT: + case ROW_RESULT: + default: + bvp = NULL; + break; + } // endswitch arg_type + + return bvp; +} // end of MakeBinValue + +/*********************************************************************************/ +/* Test BJSON parse and serialize. */ +/*********************************************************************************/ +my_bool json_test_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen, more = 1000; + + if (args->arg_count == 0) { + strcpy(message, "At least 1 argument required (json)"); + return true; + } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of json_test_bson_init + +char* json_test_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) { + char* str = NULL, * sap = NULL, * fn = NULL; + int pretty = 1; + PBVAL bvp; + PGLOBAL g = (PGLOBAL)initid->ptr; + BDOC doc(g); + + if (g->N) { + str = (char*)g->Activityp; + goto err; + } else if (initid->const_item) + g->N = 1; + + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, !g->Xchk)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else if (!(bvp = MakeBinValue(g, args, 0))) { + PUSH_WARNING(g->Message); + goto err; + } // endif bvp + + if (g->Mrr) { // First argument is a constant + g->Xchk = bvp; + JsonMemSave(g); + } // endif Mrr + + } else + bvp = (PBVAL)g->Xchk; + + for (uint i = 1; i < args->arg_count; i++) + if (args->arg_type[i] == STRING_RESULT) + fn = args->args[i]; + else if (args->arg_type[i] == INT_RESULT) + pretty = (int)*(longlong*)args->args[i]; + + // Serialize the parse tree + str = doc.Serialize(g, bvp, fn, pretty); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; + + } catch (int n) { + xtrc(1, "json_test_bson: error %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + str = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + str = NULL; + } // end catch + +err: + if (!str) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(str); + + return str; +} // end of json_test_bson + +void json_test_bson_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of json_test_bson_deinit + +/*********************************************************************************/ +/* Locate a value in a Json tree. */ +/*********************************************************************************/ +my_bool jsonlocate_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen, more = 1000; + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third argument is not an integer (rank)"); + return true; + } // endifs args + + CalcLen(args, false, reslen, memlen); + + // TODO: calculate this + if (IsJson(args, 0) == 3) + more = 0; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of jsonlocate_bson_init + +char* jsonlocate_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) { + char* path = NULL; + int k; + PBVAL bvp, bvp2; + PBJNX bnxp; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + if (g->Activityp) { + path = (char*)g->Activityp; + *res_length = strlen(path); + return path; + } else { + *res_length = 0; + *is_null = 1; + return NULL; + } // endif Activityp + + } else if (initid->const_item) + g->N = 1; + + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, !g->Xchk)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else + bvp = MakeBinValue(g, args, 0); + + if (!bvp) { + PUSH_WARNING("First argument is not a valid JSON item"); + goto err; + } // endif bvp + + if (g->Mrr) { // First argument is a constant + g->Xchk = bvp; + JsonMemSave(g); + } // endif Mrr + + } else + bvp = (PBVAL)g->Xchk; + + // The item to locate + bvp2 = MakeBinValue(g, args, 1); + + k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1; + + bnxp = new(g) BJNX(g, bvp, TYPE_STRING); + path = bnxp->Locate(g, bvp, bvp2, k); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)path; + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } // end catch + +err: + if (!path) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(path); + + return path; +} // end of jsonlocate_bson + +void jsonlocate_bson_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of jsonlocate_bson_deinit + +/*********************************************************************************/ +/* Locate all occurences of a value in a Json tree. */ +/*********************************************************************************/ +my_bool json_locate_all_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen, more = 1000; + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third argument is not an integer (Depth)"); + return true; + } // endifs + + CalcLen(args, false, reslen, memlen); + + // TODO: calculate this + if (IsJson(args, 0) == 3) + more = 0; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of json_locate_all_bson_init + +char* json_locate_all_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) { + char* path = NULL; + int mx = 10; + PBVAL bvp, bvp2; + PBJNX bnxp; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + if (g->Activityp) { + path = (char*)g->Activityp; + *res_length = strlen(path); + return path; + } else { + *error = 1; + *res_length = 0; + *is_null = 1; + return NULL; + } // endif Activityp + + } else if (initid->const_item) + g->N = 1; + + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else + bvp = MakeBinValue(g, args, 0); + + if (!bvp) { + PUSH_WARNING("First argument is not a valid JSON item"); + goto err; + } // endif bvp + + if (g->Mrr) { // First argument is a constant + g->Xchk = bvp; + JsonMemSave(g); + } // endif Mrr + + } else + bvp = (PBVAL)g->Xchk; + + // The item to locate + bvp2 = MakeBinValue(g, args, 1); + + if (args->arg_count > 2) + mx = (int)*(long long*)args->args[2]; + + bnxp = new(g) BJNX(g, bvp, TYPE_STRING); + path = bnxp->LocateAll(g, bvp, bvp2, mx); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)path; + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } // end catch + +err: + if (!path) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(path); + + return path; +} // end of json_locate_all_bson + +void json_locate_all_bson_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of json_locate_all_bson_deinit + diff --git a/storage/connect/bsonudf.h b/storage/connect/bsonudf.h new file mode 100644 index 00000000000..6687d4c1640 --- /dev/null +++ b/storage/connect/bsonudf.h @@ -0,0 +1,98 @@ +/******************** tabjson H Declares Source Code File (.H) *******************/ +/* Name: bsonudf.h Version 1.0 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2020 */ +/* */ +/* This file contains the BSON UDF function and class declares. */ +/*********************************************************************************/ +#pragma once +#include "jsonudf.h" +#include "bson.h" + +/* --------------------------- New Testing BJSON Stuff --------------------------*/ + +typedef class BJNX* PBJNX; + +/*********************************************************************************/ +/* Class BJNX: BJSON access methods. */ +/*********************************************************************************/ +class BJNX : public BDOC { +public: + // Constructors + BJNX(PGLOBAL g, PBVAL row, int type, int len = 64, int prec = 0, my_bool wr = false); + + // Implementation + int GetPrecision(void) { return Prec; } + PVAL GetValue(void) { return Value; } + + // Methods + my_bool SetJpath(PGLOBAL g, char* path, my_bool jb = false); + my_bool ParseJpath(PGLOBAL g); + void ReadValue(PGLOBAL g); + PBVAL GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b = true); + PBVAL GetJson(PGLOBAL g); + my_bool CheckPath(PGLOBAL g); + my_bool WriteValue(PGLOBAL g, PBVAL jvalp); + char* Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k = 1); + char* LocateAll(PGLOBAL g, PBVAL jsp, PBVAL jvp, int mx = 10); + +protected: + my_bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm); + PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i); + PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n); + PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n); + PVAL MakeJson(PGLOBAL g, PBVAL bvp); + void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp); + PBVAL GetRow(PGLOBAL g); + my_bool CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2); + my_bool LocateArray(PGLOBAL g, PBVAL jarp); + my_bool LocateObject(PGLOBAL g, PBPR jobp); + my_bool LocateValue(PGLOBAL g, PBVAL jvp); + my_bool LocateArrayAll(PGLOBAL g, PBVAL jarp); + my_bool LocateObjectAll(PGLOBAL g, PBPR jobp); + my_bool LocateValueAll(PGLOBAL g, PBVAL jvp); + my_bool CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2); + my_bool AddPath(void); + + // Default constructor not to be used + BJNX(void) {} + + // Members + PBVAL Row; + PBVAL Bvalp; + PJPN Jpnp; + JOUTSTR *Jp; + JNODE *Nodes; // The intermediate objects + PVAL Value; + PVAL MulVal; // To value used by multiple column + char *Jpath; // The json path + int Buf_Type; + int Long; + int Prec; + int Nod; // The number of intermediate objects + int Xnod; // Index of multiple values + int K; // Kth item to locate + int I; // Index of JPN + int Imax; // Max number of JPN's + int B; // Index base + my_bool Xpd; // True for expandable column + my_bool Parsed; // True when parsed + my_bool Found; // Item found by locate + my_bool Wr; // Write mode + my_bool Jb; // Must return json item +}; // end of class BJNX + +extern "C" { + DllExport my_bool json_test_bson_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* json_test_bson(UDF_EXEC_ARGS); + DllExport void json_test_bson_deinit(UDF_INIT*); + + DllExport my_bool jsonlocate_bson_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* jsonlocate_bson(UDF_EXEC_ARGS); + DllExport void jsonlocate_bson_deinit(UDF_INIT*); + + DllExport my_bool json_locate_all_bson_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* json_locate_all_bson(UDF_EXEC_ARGS); + DllExport void json_locate_all_bson_deinit(UDF_INIT*); +} // extern "C" + diff --git a/storage/connect/json.h b/storage/connect/json.h index 5ba4d7b3dbd..3a026f5df22 100644 --- a/storage/connect/json.h +++ b/storage/connect/json.h @@ -5,6 +5,7 @@ /* */ /* This file contains the JSON classes declares. */ /***********************************************************************/ +#pragma once #include #include "value.h" #include "xobject.h" diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 0012b3d6bdd..cb29b9f5d6c 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -27,12 +27,6 @@ #endif #define M 9 -bool IsNum(PSZ s); -char *NextChr(PSZ s, char sep); -char *GetJsonNull(void); -uint GetJsonGrpSize(void); -static int IsJson(UDF_ARGS *args, uint i, bool b = false); -static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i); static char *handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error); static char *bin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, @@ -1180,7 +1174,7 @@ static uint GetJsonGroupSize(void) /*********************************************************************************/ /* Program for SubSet re-initialization of the memory pool. */ /*********************************************************************************/ -static my_bool JsonSubSet(PGLOBAL g) +my_bool JsonSubSet(PGLOBAL g) { PPOOLHEADER pph = (PPOOLHEADER)g->Sarea; @@ -1277,10 +1271,8 @@ static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp) /*********************************************************************************/ /* Allocate and initialise the memory area. */ /*********************************************************************************/ -static my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args, - char *message, my_bool mbn, - unsigned long reslen, unsigned long memlen, - unsigned long more = 0) +my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args, char *message, my_bool mbn, + unsigned long reslen, unsigned long memlen, unsigned long more) { PGLOBAL g = PlugInit(NULL, (size_t)memlen + more + 500); // +500 to avoid CheckMem @@ -1443,7 +1435,7 @@ static int *GetIntArgPtr(PGLOBAL g, UDF_ARGS *args, uint& n) /*********************************************************************************/ /* Returns not 0 if the argument is a JSON item or file name. */ /*********************************************************************************/ -static int IsJson(UDF_ARGS *args, uint i, bool b) +int IsJson(UDF_ARGS *args, uint i, bool b) { int n = 0; @@ -1510,9 +1502,8 @@ static long GetFileLength(char *fn) /*********************************************************************************/ /* Calculate the reslen and memlen needed by a function. */ /*********************************************************************************/ -static my_bool CalcLen(UDF_ARGS *args, my_bool obj, - unsigned long& reslen, unsigned long& memlen, - my_bool mod = false) +my_bool CalcLen(UDF_ARGS *args, my_bool obj, unsigned long& reslen, + unsigned long& memlen, my_bool mod) { char fn[_MAX_PATH]; unsigned long i, k, m, n; @@ -1629,8 +1620,8 @@ static my_bool CalcLen(UDF_ARGS *args, my_bool obj, /*********************************************************************************/ /* Check if the calculated memory is enough. */ /*********************************************************************************/ -static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n, - my_bool m, my_bool obj = false, my_bool mod = false) +my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n, + my_bool m, my_bool obj, my_bool mod) { unsigned long rl, ml; my_bool b = false; @@ -1682,7 +1673,7 @@ static my_bool CheckMemory(PGLOBAL g, UDF_INIT *initid, UDF_ARGS *args, uint n, /*********************************************************************************/ /* Make a zero terminated string from the passed argument. */ /*********************************************************************************/ -static PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i) +PSZ MakePSZ(PGLOBAL g, UDF_ARGS *args, int i) { if (args->arg_count > (unsigned)i && args->args[i]) { int n = args->lengths[i]; @@ -1807,7 +1798,7 @@ static PJSON ParseJsonFile(PGLOBAL g, char *fn, int *pretty, size_t& len) /*********************************************************************************/ /* Return a json file contains. */ /*********************************************************************************/ -static char *GetJsonFile(PGLOBAL g, char *fn) +char *GetJsonFile(PGLOBAL g, char *fn) { char *str; int h, n, len; @@ -6559,1476 +6550,3 @@ long long countin(UDF_INIT *initid, UDF_ARGS *args, char *result, free(str2); return n; } // end of countin - -/* --------------------------- New Testing BJSON Stuff --------------------------*/ - -/*********************************************************************************/ -/* SubAlloc a new BJNX class with protection against memory exhaustion. */ -/*********************************************************************************/ -static PBJNX BjnxNew(PGLOBAL g, PBVAL vlp, int type, int len) { - PBJNX bjnx; - - try { - bjnx = new(g) BJNX(g, vlp, type, len); - } catch (...) { - if (trace(1023)) - htrc("%s\n", g->Message); - - PUSH_WARNING(g->Message); - bjnx = NULL; - } // end try/catch - - return bjnx; -} /* end of BjnxNew */ - -/* ----------------------------------- BSNX ------------------------------------ */ - -/*********************************************************************************/ -/* BSNX public constructor. */ -/*********************************************************************************/ -BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) - : BDOC(g->Sarea) -{ - Row = row; - Bvalp = NULL; - Jpnp = NULL; - Jp = NULL; - Nodes = NULL; - Value = AllocateValue(g, type, len, prec); - MulVal = NULL; - Jpath = NULL; - Buf_Type = type; - Long = len; - Prec = prec; - Nod = 0; - Xnod = -1; - K = 0; - I = -1; - Imax = 9; - B = 0; - Xpd = false; - Parsed = false; - Found = false; - Wr = wr; - Jb = false; -} // end of BJNX constructor - -/*********************************************************************************/ -/* SetJpath: set and parse the json path. */ -/*********************************************************************************/ -my_bool BJNX::SetJpath(PGLOBAL g, char* path, my_bool jb) { - // Check Value was allocated - if (!Value) - return true; - - Value->SetNullable(true); - Jpath = path; - - // Parse the json path - Parsed = false; - Nod = 0; - Jb = jb; - return ParseJpath(g); -} // end of SetJpath - -/*********************************************************************************/ -/* Analyse array processing options. */ -/*********************************************************************************/ -my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) { - int n = (int)strlen(p); - my_bool dg = true, b = false; - PJNODE jnp = &Nodes[i]; - - if (*p) { - if (p[n - 1] == ']') { - p[--n] = 0; - } else if (!IsNum(p)) { - // Wrong array specification - sprintf(g->Message, "Invalid array specification %s", p); - return true; - } // endif p - - } else - b = true; - - // To check whether a numeric Rank was specified - dg = IsNum(p); - - if (!n) { - // Default specifications - if (jnp->Op != OP_EXP) { - if (Wr) { - // Force append - jnp->Rank = INT_MAX32; - jnp->Op = OP_LE; - } else if (Jb) { - // Return a Json item - jnp->Op = OP_XX; - } else if (b) { - // Return 1st value (B is the index base) - jnp->Rank = B; - jnp->Op = OP_LE; - } else if (!Value->IsTypeNum()) { - jnp->CncVal = AllocateValue(g, PlugDup(g, ", "), TYPE_STRING); - jnp->Op = OP_CNC; - } else - jnp->Op = OP_ADD; - - } // endif OP - - } else if (dg) { - // Return nth value - jnp->Rank = atoi(p) - B; - jnp->Op = OP_EQ; - } else if (Wr) { - sprintf(g->Message, "Invalid specification %s in a write path", p); - return true; - } else if (n == 1) { - // Set the Op value; - switch (*p) { - case '+': jnp->Op = OP_ADD; break; - case 'x': jnp->Op = OP_MULT; break; - case '>': jnp->Op = OP_MAX; break; - case '<': jnp->Op = OP_MIN; break; - case '!': jnp->Op = OP_SEP; break; // Average - case '#': jnp->Op = OP_NUM; break; - case '*': // Expand this array - strcpy(g->Message, "Expand not supported by this function"); - return true; - default: - sprintf(g->Message, "Invalid function specification %c", *p); - return true; - } // endswitch *p - - } else if (*p == '"' && p[n - 1] == '"') { - // This is a concat specification - jnp->Op = OP_CNC; - - if (n > 2) { - // Set concat intermediate string - p[n - 1] = 0; - - if (trace(1)) - htrc("Concat string=%s\n", p + 1); - - jnp->CncVal = AllocateValue(g, p + 1, TYPE_STRING); - } // endif n - - } else { - strcpy(g->Message, "Wrong array specification"); - return true; - } // endif's - - // For calculated arrays, a local Value must be used - switch (jnp->Op) { - case OP_NUM: - jnp->Valp = AllocateValue(g, TYPE_INT); - break; - case OP_ADD: - case OP_MULT: - case OP_SEP: - if (!IsTypeChar(Buf_Type)) - jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision()); - else - jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2); - - break; - case OP_MIN: - case OP_MAX: - jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision()); - break; - case OP_CNC: - if (IsTypeChar(Buf_Type)) - jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision()); - else - jnp->Valp = AllocateValue(g, TYPE_STRING, 512); - - break; - default: - break; - } // endswitch Op - - if (jnp->Valp) - MulVal = AllocateValue(g, jnp->Valp); - - return false; -} // end of SetArrayOptions - -/*********************************************************************************/ -/* Parse the eventual passed Jpath information. */ -/* This information can be specified in the Fieldfmt column option when */ -/* creating the table. It permits to indicate the position of the node */ -/* corresponding to that column. */ -/*********************************************************************************/ -my_bool BJNX::ParseJpath(PGLOBAL g) { - char* p, * p1 = NULL, * p2 = NULL, * pbuf = NULL; - int i; - my_bool a, mul = false; - - if (Parsed) - return false; // Already done - else if (!Jpath) - // Jpath = Name; - return true; - - if (trace(1)) - htrc("ParseJpath %s\n", SVP(Jpath)); - - if (!(pbuf = PlgDBDup(g, Jpath))) - return true; - - if (*pbuf == '$') pbuf++; - if (*pbuf == '.') pbuf++; - if (*pbuf == '[') p1 = pbuf++; - - // Estimate the required number of nodes - for (i = 0, p = pbuf; (p = NextChr(p, '.')); i++, p++) - Nod++; // One path node found - - if (!(Nodes = (PJNODE)PlgDBSubAlloc(g, NULL, (++Nod) * sizeof(JNODE)))) - return true; - - memset(Nodes, 0, (Nod) * sizeof(JNODE)); - - // Analyze the Jpath for this column - for (i = 0, p = pbuf; p && i < Nod; i++, p = (p2 ? p2 : NULL)) { - a = (p1 != NULL); - p1 = strchr(p, '['); - p2 = strchr(p, '.'); - - if (!p2) - p2 = p1; - else if (p1) { - if (p1 < p2) - p2 = p1; - else if (p1 == p2 + 1) - *p2++ = 0; // Old syntax .[ - else - p1 = NULL; - - } // endif p1 - - if (p2) - *p2++ = 0; - - // Jpath must be explicit - if (a || *p == 0 || *p == '[' || IsNum(p)) { - // Analyse intermediate array processing - if (SetArrayOptions(g, p, i, Nodes[i - 1].Key)) - return true; - - } else if (*p == '*') { - if (Wr) { - sprintf(g->Message, "Invalid specification %c in a write path", *p); - return true; - } else // Return JSON - Nodes[i].Op = OP_XX; - - } else { - Nodes[i].Key = p; - Nodes[i].Op = OP_EXIST; - } // endif's - - } // endfor i, p - - Nod = i; - MulVal = AllocateValue(g, Value); - - if (trace(1)) - for (i = 0; i < Nod; i++) - htrc("Node(%d) Key=%s Op=%d Rank=%d\n", - i, SVP(Nodes[i].Key), Nodes[i].Op, Nodes[i].Rank); - - Parsed = true; - return false; -} // end of ParseJpath - -/*********************************************************************************/ -/* MakeJson: Serialize the json item and set value to it. */ -/*********************************************************************************/ -PVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp) { - if (Value->IsTypeNum()) { - strcpy(g->Message, "Cannot make Json for a numeric value"); - Value->Reset(); - } else if (bvp->Type != TYPE_JAR && bvp->Type != TYPE_JOB) { - strcpy(g->Message, "Target is not an array or object"); - Value->Reset(); - } else - Value->SetValue_psz(Serialize(g, bvp, NULL, 0)); - - return Value; -} // end of MakeJson - -/*********************************************************************************/ -/* SetValue: Set a value from a JVALUE contains. */ -/*********************************************************************************/ -void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) { - if (vlp) { - vp->SetNull(false); - - if (Jb) { - vp->SetValue_psz(Serialize(g, vlp, NULL, 0)); - } else switch (vlp->Type) { - case TYPE_DTM: - case TYPE_STRG: - vp->SetValue_psz(GetString(g, vlp)); - break; - case TYPE_INTG: - case TYPE_BINT: - vp->SetValue(GetInteger(vlp)); - break; - case TYPE_DBL: - if (vp->IsTypeNum()) - vp->SetValue(GetDouble(vlp)); - else // Get the proper number of decimals - vp->SetValue_psz(GetString(g, vlp)); - - break; - case TYPE_BOOL: - if (vp->IsTypeNum()) - vp->SetValue(GetInteger(vlp) ? 1 : 0); - else - vp->SetValue_psz(GetString(g, vlp)); - - break; - case TYPE_JAR: - vp->SetValue_psz(GetArrayText(g, MVP(vlp->To_Val), NULL)); - break; - case TYPE_JOB: - vp->SetValue_psz(GetObjectText(g, MPP(vlp->To_Val), NULL)); - break; - case TYPE_NULL: - vp->SetNull(true); - default: - vp->Reset(); - } // endswitch Type - - } else { - vp->SetNull(true); - vp->Reset(); - } // endif val - -} // end of SetJsonValue - -/*********************************************************************************/ -/* GetJson: */ -/*********************************************************************************/ -PBVAL BJNX::GetJson(PGLOBAL g) { - return GetRowValue(g, Row, 0); -} // end of GetJson - -/*********************************************************************************/ -/* ReadValue: */ -/*********************************************************************************/ -void BJNX::ReadValue(PGLOBAL g) { - Value->SetValue_pval(GetColumnValue(g, Row, 0)); -} // end of ReadValue - -/*********************************************************************************/ -/* GetColumnValue: */ -/*********************************************************************************/ -PVAL BJNX::GetColumnValue(PGLOBAL g, PBVAL row, int i) { - PBVAL vlp = GetRowValue(g, row, i); - - SetJsonValue(g, Value, vlp); - return Value; -} // end of GetColumnValue - -/*********************************************************************************/ -/* GetRowValue: */ -/*********************************************************************************/ -PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) { - my_bool expd = false; - PBVAL bap; - PBVAL vlp = NULL; - - for (; i < Nod && row; i++) { - if (Nodes[i].Op == OP_NUM) { - Value->SetValue(row->Type == TYPE_JAR ? GetArraySize(MVP(row->To_Val)) : 1); - vlp = SubAllocVal(g, Value); - return vlp; - } else if (Nodes[i].Op == OP_XX) { - Jb = b; -// return DupVal(g, row); - return row; // or last line ??? - } else switch (row->Type) { - case TYPE_JOB: - if (!Nodes[i].Key) { - // Expected Array was not there - if (Nodes[i].Op == OP_LE) { - if (i < Nod - 1) - continue; - else - vlp = row; // DupVal(g, row) ??? - - } else { - strcpy(g->Message, "Unexpected object"); - vlp = NULL; - } //endif Op - - } else - vlp = GetKeyValue(MPP(row->To_Val), Nodes[i].Key); - - break; - case TYPE_JAR: - bap = MVP(row->To_Val); - - if (!Nodes[i].Key) { - if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) - vlp = GetArrayValue(bap, Nodes[i].Rank); - else if (Nodes[i].Op == OP_EXP) - return (PBVAL)ExpandArray(g, bap, i); - else - return SubAllocVal(g, CalculateArray(g, bap, i)); - - } else { - // Unexpected array, unwrap it as [0] - vlp = GetArrayValue(bap, 0); - i--; - } // endif's - - break; - case TYPE_JVAL: - vlp = row; - break; - default: - sprintf(g->Message, "Invalid row JSON type %d", row->Type); - vlp = NULL; - } // endswitch Type - - row = vlp; - } // endfor i - - return vlp; -} // end of GetRowValue - -/*********************************************************************************/ -/* ExpandArray: */ -/*********************************************************************************/ -PVAL BJNX::ExpandArray(PGLOBAL g, PBVAL arp, int n) -{ - strcpy(g->Message, "Expand cannot be done by this function"); - return NULL; -} // end of ExpandArray - -/*********************************************************************************/ -/* CalculateArray: NIY */ -/*********************************************************************************/ -PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) -{ -#if 0 - int i, ars = GetArraySize(bap), nv = 0; - bool err; - OPVAL op = Nodes[n].Op; - PVAL val[2], vp = Nodes[n].Valp; - PBVAL bvrp, bvp; - BVAL bval; - - vp->Reset(); - xtrc(1,"CalculateArray size=%d op=%d\n", ars, op); - - for (i = 0; i < ars; i++) { - bvrp = GetArrayValue(bap, i); - xtrc(1, "i=%d nv=%d\n", i, nv); - - if (!IsValueNull(bvrp) || (op == OP_CNC && GetJsonNull())) { - if (IsValueNull(bvrp)) { - SetString(bvrp, GetJsonNull(), 0); - bvp = bvrp; - } else if (n < Nod - 1 && bvrp->GetJson()) { - bval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1)); - bvp = &bval; - } else - jvp = jvrp; - - if (trace(1)) - htrc("jvp=%s null=%d\n", - jvp->GetString(g), jvp->IsNull() ? 1 : 0); - - if (!nv++) { - SetJsonValue(g, vp, jvp); - continue; - } else - SetJsonValue(g, MulVal, jvp); - - if (!MulVal->IsNull()) { - switch (op) { - case OP_CNC: - if (Nodes[n].CncVal) { - val[0] = Nodes[n].CncVal; - err = vp->Compute(g, val, 1, op); - } // endif CncVal - - val[0] = MulVal; - err = vp->Compute(g, val, 1, op); - break; - // case OP_NUM: - case OP_SEP: - val[0] = Nodes[n].Valp; - val[1] = MulVal; - err = vp->Compute(g, val, 2, OP_ADD); - break; - default: - val[0] = Nodes[n].Valp; - val[1] = MulVal; - err = vp->Compute(g, val, 2, op); - } // endswitch Op - - if (err) - vp->Reset(); - - if (trace(1)) { - char buf(32); - - htrc("vp='%s' err=%d\n", - vp->GetCharString(&buf), err ? 1 : 0); - } // endif trace - - } // endif Zero - - } // endif jvrp - - } // endfor i - - if (op == OP_SEP) { - // Calculate average - MulVal->SetValue(nv); - val[0] = vp; - val[1] = MulVal; - - if (vp->Compute(g, val, 2, OP_DIV)) - vp->Reset(); - - } // endif Op - - return vp; -#else - strcpy(g->Message, "Calculate array NIY"); - return NULL; -#endif -} // end of CalculateArray - -/*********************************************************************************/ -/* CheckPath: Checks whether the path exists in the document. */ -/*********************************************************************************/ -my_bool BJNX::CheckPath(PGLOBAL g) { - PBVAL val = NULL; - PBVAL row = Row; - - for (int i = 0; i < Nod && row; i++) { - val = NULL; - - if (Nodes[i].Op == OP_NUM || Nodes[i].Op == OP_XX) { - } else switch (row->Type) { - case TYPE_JOB: - if (Nodes[i].Key) - val = GetKeyValue(MPP(row->To_Val), Nodes[i].Key); - - break; - case TYPE_JAR: - if (!Nodes[i].Key) - if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) - val = GetArrayValue(MVP(row->To_Val), Nodes[i].Rank); - - break; - case TYPE_JVAL: - val = MVP(row->To_Val); - break; - default: - sprintf(g->Message, "Invalid row JSON type %d", row->Type); - } // endswitch Type - -// if (i < Nod - 1) -// if (!(row = (val) ? val->GetJsp() : NULL)) -// val = NULL; - - row = val; - } // endfor i - - return (val != NULL); -} // end of CheckPath - -/***********************************************************************/ -/* GetRow: Set the complete path of the object to be set. */ -/***********************************************************************/ -PBVAL BJNX::GetRow(PGLOBAL g) { - PBVAL val = NULL; - PBVAL arp; - PBVAL nwr, row = Row; - - for (int i = 0; i < Nod - 1 && row; i++) { - if (Nodes[i].Op == OP_XX) - break; - else switch (row->Type) { - case TYPE_JOB: - if (!Nodes[i].Key) - // Expected Array was not there, wrap the value - continue; - - val = GetKeyValue(MPP(row->To_Val), Nodes[i].Key); - break; - case TYPE_JAR: - arp = MVP(row->To_Val); - - if (!Nodes[i].Key) { - if (Nodes[i].Op == OP_EQ) - val = GetArrayValue(arp, Nodes[i].Rank); - else - val = GetArrayValue(arp, Nodes[i].Rx); - - } else { - // Unexpected array, unwrap it as [0] - val = GetArrayValue(arp, 0); - i--; - } // endif Nodes - - break; - case TYPE_JVAL: - val = MVP(row->To_Val); - break; - default: - sprintf(g->Message, "Invalid row JSON type %d", row->Type); - val = NULL; - } // endswitch Type - - if (val) { - row = val; - } else { - // Construct missing objects - for (i++; row && i < Nod; i++) { - if (Nodes[i].Op == OP_XX) - break; -// else if (!Nodes[i].Key) - // Construct intermediate array -// nwr = SubAllocVal(g); -// else -// nwr = SubAllocPair(g); - - // Construct new row - nwr = SubAllocVal(g); - - if (row->Type == TYPE_JOB) { - SetKeyValue(g, MPP(row->To_Val), MOF(nwr), Nodes[i - 1].Key); - } else if (row->Type == TYPE_JAR) { - AddArrayValue(g, MVP(row->To_Val), nwr); - } else { - strcpy(g->Message, "Wrong type when writing new row"); - nwr = NULL; - } // endif's - - row = nwr; - } // endfor i - - break; - } // endelse - - } // endfor i - - return row; -} // end of GetRow - -/***********************************************************************/ -/* WriteValue: */ -/***********************************************************************/ -my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) { - PBPR objp = NULL; - PBVAL arp = NULL; - PBVAL jvp = NULL; - PBVAL row = GetRow(g); - - if (!row) - return true; - - switch (row->Type) { - case TYPE_JOB: objp = MPP(row->To_Val); break; - case TYPE_JAR: arp = MVP(row->To_Val); break; - case TYPE_JVAL: jvp = MVP(row->To_Val); break; - default: - strcpy(g->Message, "Invalid target type"); - return true; - } // endswitch Type - - if (arp) { - if (!Nodes[Nod - 1].Key) { - if (Nodes[Nod - 1].Op == OP_EQ) - SetArrayValue(g, arp, jvalp, Nodes[Nod - 1].Rank); - else - AddArrayValue(g, arp, jvalp); - - } // endif Key - - } else if (objp) { - if (Nodes[Nod - 1].Key) - SetKeyValue(g, objp, MOF(jvalp), Nodes[Nod - 1].Key); - - } else if (jvp) - SetValueVal(jvp, jvalp); - - return false; -} // end of WriteValue - -/*********************************************************************************/ -/* Locate a value in a JSON tree: */ -/*********************************************************************************/ -PSZ BJNX::Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k) { - PSZ str = NULL; - my_bool b = false, err = true; - - g->Message[0] = 0; - - if (!jsp) { - strcpy(g->Message, "Null json tree"); - return NULL; - } // endif jsp - - try { - // Write to the path string - Jp = new(g) JOUTSTR(g); - Jp->WriteChr('$'); - Bvalp = jvp; - K = k; - - switch (jsp->Type) { - case TYPE_JAR: - err = LocateArray(g, MVP(jsp->To_Val)); - break; - case TYPE_JOB: - err = LocateObject(g, MPP(jsp->To_Val)); - break; - case TYPE_JVAL: - err = LocateValue(g, MVP(jsp->To_Val)); - break; - default: - err = true; - } // endswitch Type - - if (err) { - if (!g->Message[0]) - strcpy(g->Message, "Invalid json tree"); - - } else if (Found) { - Jp->WriteChr('\0'); - PlugSubAlloc(g, NULL, Jp->N); - str = Jp->Strp; - } // endif's - - } catch (int n) { - if (trace(1)) - htrc("Exception %d: %s\n", n, g->Message); - - PUSH_WARNING(g->Message); - } catch (const char* msg) { - strcpy(g->Message, msg); - } // end catch - - return str; -} // end of Locate - -/*********************************************************************************/ -/* Locate in a JSON Array. */ -/*********************************************************************************/ -my_bool BJNX::LocateArray(PGLOBAL g, PBVAL jarp) { - char s[16]; - int n = GetArraySize(jarp); - size_t m = Jp->N; - - for (int i = 0; i < n && !Found; i++) { - Jp->N = m; - sprintf(s, "[%d]", i + B); - - if (Jp->WriteStr(s)) - return true; - - if (LocateValue(g, GetArrayValue(jarp, i))) - return true; - - } // endfor i - - return false; -} // end of LocateArray - -/*********************************************************************************/ -/* Locate in a JSON Object. */ -/*********************************************************************************/ -my_bool BJNX::LocateObject(PGLOBAL g, PBPR jobp) { - size_t m; - - if (Jp->WriteChr('.')) - return true; - - m = Jp->N; - - for (PBPR pair = jobp; pair && !Found; pair = MPP(pair->Next)) { - Jp->N = m; - - if (Jp->WriteStr(MZP(pair->Key))) - return true; - - if (LocateValue(g, MVP(pair->Vlp))) - return true; - - } // endfor i - - return false; -} // end of LocateObject - -/*********************************************************************************/ -/* Locate a JSON Value. */ -/*********************************************************************************/ -my_bool BJNX::LocateValue(PGLOBAL g, PBVAL jvp) -{ - if (CompareTree(g, Bvalp, jvp)) - Found = (--K == 0); - else if (jvp->Type == TYPE_JAR) - return LocateArray(g, GetArray(jvp)); - else if (jvp->Type == TYPE_JOB) - return LocateObject(g, GetObject(jvp)); - - return false; -} // end of LocateValue - -/*********************************************************************************/ -/* Locate all occurrences of a value in a JSON tree: */ -/*********************************************************************************/ -PSZ BJNX::LocateAll(PGLOBAL g, PBVAL jsp, PBVAL bvp, int mx) -{ - PSZ str = NULL; - my_bool b = false, err = true; - PJPN jnp; - - if (!jsp) { - strcpy(g->Message, "Null json tree"); - return NULL; - } // endif jsp - - try { - jnp = (PJPN)PlugSubAlloc(g, NULL, sizeof(JPN) * mx); - memset(jnp, 0, sizeof(JPN) * mx); - g->Message[0] = 0; - - // Write to the path string - Jp = new(g)JOUTSTR(g); - Bvalp = bvp; - Imax = mx - 1; - Jpnp = jnp; - Jp->WriteChr('['); - - switch (jsp->Type) { - case TYPE_JAR: - err = LocateArrayAll(g, MVP(jsp->To_Val)); - break; - case TYPE_JOB: - err = LocateObjectAll(g, MPP(jsp->To_Val)); - break; - case TYPE_JVAL: - err = LocateValueAll(g, MVP(jsp->To_Val)); - break; - default: - err = LocateValueAll(g, jsp); - } // endswitch Type - - if (!err) { - if (Jp->N > 1) - Jp->N--; - - Jp->WriteChr(']'); - Jp->WriteChr('\0'); - PlugSubAlloc(g, NULL, Jp->N); - str = Jp->Strp; - } else if (!g->Message[0]) - strcpy(g->Message, "Invalid json tree"); - - } catch (int n) { - xtrc(1, "Exception %d: %s\n", n, g->Message); - PUSH_WARNING(g->Message); - } catch (const char* msg) { - strcpy(g->Message, msg); - } // end catch - - return str; -} // end of LocateAll - -/*********************************************************************************/ -/* Locate in a JSON Array. */ -/*********************************************************************************/ -my_bool BJNX::LocateArrayAll(PGLOBAL g, PBVAL jarp) -{ - int i = 0; - - if (I < Imax) { - Jpnp[++I].Type = TYPE_JAR; - - for (PBVAL vp = jarp; vp; vp = MVP(vp->Next)) { - Jpnp[I].N = i; - - if (LocateValueAll(g, GetArrayValue(jarp, i))) - return true; - - i++; - } // endfor i - - I--; - } // endif I - - return false; -} // end of LocateArrayAll - -/*********************************************************************************/ -/* Locate in a JSON Object. */ -/*********************************************************************************/ -my_bool BJNX::LocateObjectAll(PGLOBAL g, PBPR jobp) -{ - if (I < Imax) { - Jpnp[++I].Type = TYPE_JOB; - - for (PBPR pair = jobp; pair; pair = MPP(pair->Next)) { - Jpnp[I].Key = MZP(pair->Key); - - if (LocateValueAll(g, MVP(pair->Vlp))) - return true; - - } // endfor i - - I--; - } // endif I - - return false; -} // end of LocateObjectAll - -/*********************************************************************************/ -/* Locate a JSON Value. */ -/*********************************************************************************/ -my_bool BJNX::LocateValueAll(PGLOBAL g, PBVAL jvp) { - if (CompareTree(g, Bvalp, jvp)) - return AddPath(); - else if (jvp->Type == TYPE_JAR) - return LocateArrayAll(g, GetArray(jvp)); - else if (jvp->Type == TYPE_JOB) - return LocateObjectAll(g, GetObject(jvp)); - - return false; -} // end of LocateValueAll - -/*********************************************************************************/ -/* Compare two JSON trees. */ -/*********************************************************************************/ -my_bool BJNX::CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2) -{ - if (!jp1 || !jp2 || jp1->Type != jp2->Type || GetSize(jp1) != GetSize(jp2)) - return false; - - my_bool found = true; - - if (jp1->Type == TYPE_JAR) { - for (int i = 0; found && i < GetArraySize(jp1); i++) - found = (CompareValues(g, GetArrayValue(jp1, i), GetArrayValue(jp2, i))); - - } else if (jp1->Type == TYPE_JOB) { - PBPR p1 = MPP(jp1->To_Val), p2 = MPP(jp2->To_Val); - - // Keys can be differently ordered - for (; found && p1 && p2; p1 = MPP(p1->Next)) - found = CompareValues(g, MVP(p1->Vlp), GetKeyValue(p2, MZP(p1->Key))); - - } else if (jp1->Type == TYPE_JVAL) { - found = CompareTree(g, MVP(jp1->To_Val), (MVP(jp2->To_Val))); - } else - found = CompareValues(g, jp1, jp2); - - return found; -} // end of CompareTree - -/*********************************************************************************/ -/* Compare two VAL values and return true if they are equal. */ -/*********************************************************************************/ -my_bool BJNX::CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2) -{ - my_bool b = false; - - if (v1 && v2) - switch (v1->Type) { - case TYPE_JAR: - if (v2->Type == TYPE_JAR) - b = CompareTree(g, MVP(v1->To_Val), MVP(v2->To_Val)); - - break; - case TYPE_STRG: - if (v2->Type == TYPE_STRG) { - if (v1->Nd || v2->Nd) // Case insensitive - b = (!stricmp(MZP(v1->To_Val), MZP(v2->To_Val))); - else - b = (!strcmp(MZP(v1->To_Val), MZP(v2->To_Val))); - - } // endif Type - - break; - case TYPE_DTM: - if (v2->Type == TYPE_DTM) - b = (!strcmp(MZP(v1->To_Val), MZP(v2->To_Val))); - - break; - case TYPE_INTG: - if (v2->Type == TYPE_INTG) - b = (v1->N == v2->N); - else if (v2->Type == TYPE_BINT) - b = ((longlong)v1->N == LLN(v2->To_Val)); - - break; - case TYPE_BINT: - if (v2->Type == TYPE_INTG) - b = (LLN(v1->To_Val) == (longlong)v2->N); - else if (v2->Type == TYPE_BINT) - b = (LLN(v1->To_Val) == LLN(v2->To_Val)); - - break; - case TYPE_FLOAT: - if (v2->Type == TYPE_FLOAT) - b = (v1->F == v2->F); - else if (v2->Type == TYPE_DBL) - b = ((double)v1->F == DBL(v2->To_Val)); - - break; - case TYPE_DBL: - if (v2->Type == TYPE_DBL) - b = (DBL(v1->To_Val) == DBL(v2->To_Val)); - else if (v2->Type == TYPE_FLOAT) - b = (DBL(v1->To_Val) == (double)v2->F); - - break; - case TYPE_BOOL: - if (v2->Type == TYPE_BOOL) - b = (v1->B == v2->B); - - break; - case TYPE_NULL: - b = (v2->Type == TYPE_NULL); - break; - default: - break; - } // endswitch Type - - else - b = (!v1 && !v2); - - return b; -} // end of CompareValues - -/*********************************************************************************/ -/* Add the found path to the list. */ -/*********************************************************************************/ -my_bool BJNX::AddPath(void) { - char s[16]; - - if (Jp->WriteStr("\"$")) - return true; - - for (int i = 0; i <= I; i++) { - if (Jpnp[i].Type == TYPE_JAR) { - sprintf(s, "[%d]", Jpnp[i].N + B); - - if (Jp->WriteStr(s)) - return true; - - } else { - if (Jp->WriteChr('.')) - return true; - - if (Jp->WriteStr(Jpnp[i].Key)) - return true; - - } // endif's - - } // endfor i - - if (Jp->WriteStr("\",")) - return true; - - return false; -} // end of AddPath - -/*********************************************************************************/ -/* Make a BVAL value from the passed argument. */ -/*********************************************************************************/ -static PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) { - char* sap = (args->arg_count > i) ? args->args[i] : NULL; - int n, len; - int ci; - longlong bigint; - void* Base = g->Sarea; // Required by MOF - BDOC doc(Base); - PBVAL bp; - PBVAL bvp = doc.SubAllocVal(g); - - if (sap) switch (args->arg_type[i]) { - case STRING_RESULT: - if ((len = args->lengths[i])) { - if ((n = IsJson(args, i)) < 3) - sap = MakePSZ(g, args, i); - - if (n) { - if (n == 2) { - if (!(sap = GetJsonFile(g, sap))) { - PUSH_WARNING(g->Message); - return NULL; - } // endif sap - - len = strlen(sap); - } // endif 2 - - if (!(bp = doc.ParseJson(g, sap, strlen(sap)))) - PUSH_WARNING(g->Message); - - bvp = bp; - } else { - // Check whether this string is a valid json string - JsonMemSave(g); - - if (!(bvp = doc.ParseJson(g, sap, strlen(sap)))) { - // Recover suballocated memory - JsonSubSet(g); - ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; - bvp = doc.SubAllocVal(g, MOF(sap), TYPE_STRG, ci); - } else - g->Saved_Size = 0; - - } // endif n - - } // endif len - - break; - case INT_RESULT: - bigint = *(longlong*)sap; - - if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) || - (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) - doc.SetBool(bvp, (bool)bigint); - else - doc.SetBigint(g, bvp, bigint); - - break; - case REAL_RESULT: - doc.SetFloat(bvp, *(double*)sap); - break; - case DECIMAL_RESULT: - doc.SetFloat(bvp, atof(MakePSZ(g, args, i))); - break; - case TIME_RESULT: - case ROW_RESULT: - default: - bvp = NULL; - break; - } // endswitch arg_type - - return bvp; -} // end of MakeBinValue - -/*********************************************************************************/ -/* Test BJSON parse and serialize. */ -/*********************************************************************************/ -my_bool json_test_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { - unsigned long reslen, memlen, more = 1000; - - if (args->arg_count == 0) { - strcpy(message, "At least 1 argument required (json)"); - return true; - } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { - strcpy(message, "First argument must be a json item"); - return true; - } else - CalcLen(args, false, reslen, memlen); - - return JsonInit(initid, args, message, true, reslen, memlen, more); -} // end of json_test_bson_init - -char* json_test_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, - unsigned long* res_length, char* is_null, char* error) { - char* str = NULL, * sap = NULL, * fn = NULL; - int pretty = 1; - PBVAL bvp; - PGLOBAL g = (PGLOBAL)initid->ptr; - BDOC doc(g); - - if (g->N) { - str = (char*)g->Activityp; - goto err; - } else if (initid->const_item) - g->N = 1; - - try { - if (!g->Xchk) { - if (CheckMemory(g, initid, args, 1, !g->Xchk)) { - PUSH_WARNING("CheckMemory error"); - *error = 1; - goto err; - } else if (!(bvp = MakeBinValue(g, args, 0))) { - PUSH_WARNING(g->Message); - goto err; - } // endif bvp - - if (g->Mrr) { // First argument is a constant - g->Xchk = bvp; - JsonMemSave(g); - } // endif Mrr - - } else - bvp = (PBVAL)g->Xchk; - - for (uint i = 1; i < args->arg_count; i++) - if (args->arg_type[i] == STRING_RESULT) - fn = args->args[i]; - else if (args->arg_type[i] == INT_RESULT) - pretty = (int)*(longlong*)args->args[i]; - - // Serialize the parse tree - str = doc.Serialize(g, bvp, fn, pretty); - - if (initid->const_item) - // Keep result of constant function - g->Activityp = (PACTIVITY)str; - - } catch (int n) { - xtrc(1, "json_test_bson: error %d: %s\n", n, g->Message); - PUSH_WARNING(g->Message); - *error = 1; - str = NULL; - } catch (const char* msg) { - strcpy(g->Message, msg); - PUSH_WARNING(g->Message); - *error = 1; - str = NULL; - } // end catch - -err: - if (!str) { - *res_length = 0; - *is_null = 1; - } else - *res_length = strlen(str); - - return str; -} // end of json_test_bson - -void json_test_bson_deinit(UDF_INIT* initid) { - JsonFreeMem((PGLOBAL)initid->ptr); -} // end of json_test_bson_deinit - -/*********************************************************************************/ -/* Locate a value in a Json tree. */ -/*********************************************************************************/ -my_bool jsonlocate_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { - unsigned long reslen, memlen, more = 1000; - - if (args->arg_count < 2) { - strcpy(message, "At least 2 arguments required"); - return true; - } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { - strcpy(message, "First argument must be a json item"); - return true; - } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) { - strcpy(message, "Third argument is not an integer (rank)"); - return true; - } // endifs args - - CalcLen(args, false, reslen, memlen); - - // TODO: calculate this - if (IsJson(args, 0) == 3) - more = 0; - - return JsonInit(initid, args, message, true, reslen, memlen, more); -} // end of jsonlocate_bson_init - -char* jsonlocate_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, - unsigned long* res_length, char* is_null, char* error) { - char* path = NULL; - int k; - PBVAL bvp, bvp2; - PBJNX bnxp; - PGLOBAL g = (PGLOBAL)initid->ptr; - - if (g->N) { - if (g->Activityp) { - path = (char*)g->Activityp; - *res_length = strlen(path); - return path; - } else { - *res_length = 0; - *is_null = 1; - return NULL; - } // endif Activityp - - } else if (initid->const_item) - g->N = 1; - - try { - if (!g->Xchk) { - if (CheckMemory(g, initid, args, 1, !g->Xchk)) { - PUSH_WARNING("CheckMemory error"); - *error = 1; - goto err; - } else - bvp = MakeBinValue(g, args, 0); - - if (!bvp) { - PUSH_WARNING("First argument is not a valid JSON item"); - goto err; - } // endif bvp - - if (g->Mrr) { // First argument is a constant - g->Xchk = bvp; - JsonMemSave(g); - } // endif Mrr - - } else - bvp = (PBVAL)g->Xchk; - - // The item to locate - bvp2 = MakeBinValue(g, args, 1); - - k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1; - - bnxp = new(g) BJNX(g, bvp, TYPE_STRING); - path = bnxp->Locate(g, bvp, bvp2, k); - - if (initid->const_item) - // Keep result of constant function - g->Activityp = (PACTIVITY)path; - - } catch (int n) { - xtrc(1, "Exception %d: %s\n", n, g->Message); - PUSH_WARNING(g->Message); - *error = 1; - path = NULL; - } catch (const char* msg) { - strcpy(g->Message, msg); - PUSH_WARNING(g->Message); - *error = 1; - path = NULL; - } // end catch - -err: - if (!path) { - *res_length = 0; - *is_null = 1; - } else - *res_length = strlen(path); - - return path; -} // end of jsonlocate_bson - -void jsonlocate_bson_deinit(UDF_INIT* initid) { - JsonFreeMem((PGLOBAL)initid->ptr); -} // end of jsonlocate_bson_deinit - -/*********************************************************************************/ -/* Locate all occurences of a value in a Json tree. */ -/*********************************************************************************/ -my_bool json_locate_all_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) -{ - unsigned long reslen, memlen, more = 1000; - - if (args->arg_count < 2) { - strcpy(message, "At least 2 arguments required"); - return true; - } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { - strcpy(message, "First argument must be a json item"); - return true; - } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) { - strcpy(message, "Third argument is not an integer (Depth)"); - return true; - } // endifs - - CalcLen(args, false, reslen, memlen); - - // TODO: calculate this - if (IsJson(args, 0) == 3) - more = 0; - - return JsonInit(initid, args, message, true, reslen, memlen, more); -} // end of json_locate_all_bson_init - -char* json_locate_all_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, - unsigned long* res_length, char* is_null, char* error) -{ - char *path = NULL; - int mx = 10; - PBVAL bvp, bvp2; - PBJNX bnxp; - PGLOBAL g = (PGLOBAL)initid->ptr; - - if (g->N) { - if (g->Activityp) { - path = (char*)g->Activityp; - *res_length = strlen(path); - return path; - } else { - *error = 1; - *res_length = 0; - *is_null = 1; - return NULL; - } // endif Activityp - - } else if (initid->const_item) - g->N = 1; - - try { - if (!g->Xchk) { - if (CheckMemory(g, initid, args, 1, true)) { - PUSH_WARNING("CheckMemory error"); - *error = 1; - goto err; - } else - bvp = MakeBinValue(g, args, 0); - - if (!bvp) { - PUSH_WARNING("First argument is not a valid JSON item"); - goto err; - } // endif bvp - - if (g->Mrr) { // First argument is a constant - g->Xchk = bvp; - JsonMemSave(g); - } // endif Mrr - - } else - bvp = (PBVAL)g->Xchk; - - // The item to locate - bvp2 = MakeBinValue(g, args, 1); - - if (args->arg_count > 2) - mx = (int)*(long long*)args->args[2]; - - bnxp = new(g) BJNX(g, bvp, TYPE_STRING); - path = bnxp->LocateAll(g, bvp, bvp2, mx); - - if (initid->const_item) - // Keep result of constant function - g->Activityp = (PACTIVITY)path; - - } catch (int n) { - xtrc(1, "Exception %d: %s\n", n, g->Message); - PUSH_WARNING(g->Message); - *error = 1; - path = NULL; - } catch (const char* msg) { - strcpy(g->Message, msg); - PUSH_WARNING(g->Message); - *error = 1; - path = NULL; - } // end catch - -err: - if (!path) { - *res_length = 0; - *is_null = 1; - } else - *res_length = strlen(path); - - return path; -} // end of json_locate_all_bson - -void json_locate_all_bson_deinit(UDF_INIT* initid) { - JsonFreeMem((PGLOBAL)initid->ptr); -} // end of json_locate_all_bson_deinit - diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h index 886f380d426..2a2b2cac20e 100644 --- a/storage/connect/jsonudf.h +++ b/storage/connect/jsonudf.h @@ -5,12 +5,13 @@ /* */ /* This file contains the JSON UDF function and class declares. */ /*********************************************************************************/ +#pragma once #include "global.h" #include "plgdbsem.h" #include "block.h" #include "osutil.h" #include "maputil.h" -#include "bson.h" +#include "json.h" #define UDF_EXEC_ARGS \ UDF_INIT*, UDF_ARGS*, char*, unsigned long*, char*, char* @@ -51,6 +52,28 @@ typedef struct _jnode { typedef class JSNX *PJSNX; +/*********************************************************************************/ +/* The JSON tree node. Can be an Object or an Array. */ +/*********************************************************************************/ +bool IsNum(PSZ s); +char *NextChr(PSZ s, char sep); +char *GetJsonNull(void); +uint GetJsonGrpSize(void); +my_bool JsonSubSet(PGLOBAL g); +my_bool CalcLen(UDF_ARGS* args, my_bool obj, unsigned long& reslen, + unsigned long& memlen, my_bool mod = false); +my_bool JsonInit(UDF_INIT* initid, UDF_ARGS* args, char* message, my_bool mbn, + unsigned long reslen, unsigned long memlen, + unsigned long more = 0); +my_bool CheckMemory(PGLOBAL g, UDF_INIT* initid, UDF_ARGS* args, uint n, + my_bool m, my_bool obj = false, my_bool mod = false); +PSZ MakePSZ(PGLOBAL g, UDF_ARGS* args, int i); +int IsJson(UDF_ARGS* args, uint i, bool b = false); +char *GetJsonFile(PGLOBAL g, char* fn); + +/*********************************************************************************/ +/* The JSON UDF functions. */ +/*********************************************************************************/ extern "C" { DllExport my_bool jsonvalue_init(UDF_INIT*, UDF_ARGS*, char*); DllExport char *jsonvalue(UDF_EXEC_ARGS); @@ -272,16 +295,16 @@ extern "C" { DllExport my_bool countin_init(UDF_INIT*, UDF_ARGS*, char*); DllExport long long countin(UDF_EXEC_ARGS); -} // extern "C" +} // extern "C" /*********************************************************************************/ /* Structure JPN. Used to make the locate path. */ /*********************************************************************************/ typedef struct _jpn { - enum JTYP Type; - PCSZ Key; - int N; + int Type; + PCSZ Key; + int N; } JPN, *PJPN; /*********************************************************************************/ @@ -386,91 +409,3 @@ public: uint i; int k, recl; }; // end of class JUP - - -/* --------------------------- New Testing BJSON Stuff --------------------------*/ - -typedef class BJNX* PBJNX; - -/*********************************************************************************/ -/* Class BJNX: BJSON access methods. */ -/*********************************************************************************/ -class BJNX : public BDOC { -public: - // Constructors - BJNX(PGLOBAL g, PBVAL row, int type, int len = 64, int prec = 0, my_bool wr = false); - - // Implementation - int GetPrecision(void) { return Prec; } - PVAL GetValue(void) { return Value; } - - // Methods - my_bool SetJpath(PGLOBAL g, char* path, my_bool jb = false); - my_bool ParseJpath(PGLOBAL g); - void ReadValue(PGLOBAL g); - PBVAL GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b = true); - PBVAL GetJson(PGLOBAL g); - my_bool CheckPath(PGLOBAL g); - my_bool WriteValue(PGLOBAL g, PBVAL jvalp); - char* Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k = 1); - char* LocateAll(PGLOBAL g, PBVAL jsp, PBVAL jvp, int mx = 10); - -protected: - my_bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm); - PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i); - PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n); - PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n); - PVAL MakeJson(PGLOBAL g, PBVAL bvp); - void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp); - PBVAL GetRow(PGLOBAL g); - my_bool CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2); - my_bool LocateArray(PGLOBAL g, PBVAL jarp); - my_bool LocateObject(PGLOBAL g, PBPR jobp); - my_bool LocateValue(PGLOBAL g, PBVAL jvp); - my_bool LocateArrayAll(PGLOBAL g, PBVAL jarp); - my_bool LocateObjectAll(PGLOBAL g, PBPR jobp); - my_bool LocateValueAll(PGLOBAL g, PBVAL jvp); - my_bool CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2); - my_bool AddPath(void); - - // Default constructor not to be used - BJNX(void) {} - - // Members - PBVAL Row; - PBVAL Bvalp; - PJPN Jpnp; - JOUTSTR* Jp; - JNODE* Nodes; // The intermediate objects - PVAL Value; - PVAL MulVal; // To value used by multiple column - char* Jpath; // The json path - int Buf_Type; - int Long; - int Prec; - int Nod; // The number of intermediate objects - int Xnod; // Index of multiple values - int K; // Kth item to locate - int I; // Index of JPN - int Imax; // Max number of JPN's - int B; // Index base - my_bool Xpd; // True for expandable column - my_bool Parsed; // True when parsed - my_bool Found; // Item found by locate - my_bool Wr; // Write mode - my_bool Jb; // Must return json item -}; // end of class BJNX - -extern "C" { -DllExport my_bool json_test_bson_init(UDF_INIT*, UDF_ARGS*, char*); -DllExport char* json_test_bson(UDF_EXEC_ARGS); -DllExport void json_test_bson_deinit(UDF_INIT*); - -DllExport my_bool jsonlocate_bson_init(UDF_INIT*, UDF_ARGS*, char*); -DllExport char* jsonlocate_bson(UDF_EXEC_ARGS); -DllExport void jsonlocate_bson_deinit(UDF_INIT*); - -DllExport my_bool json_locate_all_bson_init(UDF_INIT*, UDF_ARGS*, char*); -DllExport char* json_locate_all_bson(UDF_EXEC_ARGS); -DllExport void json_locate_all_bson_deinit(UDF_INIT*); -} // extern "C" From 950bf6ab53d7b6d5db0e7d986b81fbd5709e98f6 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 27 Nov 2020 10:25:47 +0100 Subject: [PATCH 031/150] - Begin implementation of BSON modified: storage/connect/bson.cpp modified: storage/connect/bson.h modified: storage/connect/bsonudf.cpp modified: storage/connect/bsonudf.h modified: storage/connect/jsonudf.cpp --- storage/connect/bson.cpp | 17 +- storage/connect/bson.h | 8 +- storage/connect/bsonudf.cpp | 421 +++++++++++++++++++++++++++--------- storage/connect/bsonudf.h | 30 ++- storage/connect/jsonudf.cpp | 2 + 5 files changed, 357 insertions(+), 121 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index 61e5eb9fe16..4725b67c06b 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -653,7 +653,7 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty) { err = SerializeValue(MVP(bvp->To_Val)); break; default: - strcpy(g->Message, "Invalid json tree"); + err = SerializeValue(bvp); } // endswitch Type if (fs) { @@ -760,7 +760,7 @@ bool BDOC::SerializeObject(OFFSET obp) { bool BDOC::SerializeValue(PBVAL jvp) { char buf[64]; - switch (jvp->Type) { + if (jvp) switch (jvp->Type) { case TYPE_JAR: return SerializeArray(jvp->To_Val, false); case TYPE_JOB: @@ -788,8 +788,7 @@ bool BDOC::SerializeValue(PBVAL jvp) { return jp->WriteStr("???"); // TODO } // endswitch Type - strcpy(jp->g->Message, "Unrecognized value"); - return true; + return jp->WriteStr("null"); } // end of SerializeValue /* --------------------------- Class BJSON --------------------------- */ @@ -860,7 +859,7 @@ int BJSON::GetObjectSize(PBPR bop, bool b) /***********************************************************************/ PBPR BJSON::AddPair(PGLOBAL g, PBPR bop, PSZ key, OFFSET val) { - PBPR brp, nrp = SubAllocPair(g, MOF(key), val); + PBPR brp, nrp = SubAllocPair(g, key, val); if (bop) { for (brp = bop; brp->Next; brp = MPP(brp->Next)); @@ -995,10 +994,10 @@ PBPR BJSON::SetKeyValue(PGLOBAL g, PBPR bop, OFFSET bvp, PSZ key) prp = brp; if (!brp) - prp->Vlp = MOF(SubAllocPair(g, MOF(key), bvp)); + prp->Vlp = MOF(SubAllocPair(g, key, bvp)); } else - bop = SubAllocPair(g, MOF(key), bvp); + bop = SubAllocPair(g, key, bvp); // Return the first pair of this object return bop; @@ -1094,7 +1093,7 @@ PBVAL BJSON::AddArrayValue(PGLOBAL g, PBVAL bap, PBVAL nvp, int* x) nvp = SubAllocVal(g); if (bap) { - int i = 0, n = *x; + int i = 0, n = (x) ? *x : INT_MAX32; PBVAL bvp; for (bvp = bap; bvp; bvp = MVP(bvp->Next), i++) @@ -1240,7 +1239,7 @@ PBVAL BJSON::SubAllocVal(PGLOBAL g) bvp->To_Val = 0; bvp->Nd = 0; - bvp->Type = TYPE_UNKNOWN; + bvp->Type = TYPE_NULL; bvp->Next = 0; return bvp; } // end of SubAllocVal diff --git a/storage/connect/bson.h b/storage/connect/bson.h index bffda8ea316..284bee1da48 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -76,8 +76,14 @@ public: // SubAlloc functions void* BsonSubAlloc(PGLOBAL g, size_t size); PBPR SubAllocPair(PGLOBAL g, OFFSET key, OFFSET val = 0); + PBPR SubAllocPair(PGLOBAL g, PSZ key, OFFSET val = 0) + {return SubAllocPair(g, MOF(key), val);} PBVAL SubAllocVal(PGLOBAL g); - PBVAL SubAllocVal(PGLOBAL g, OFFSET toval, int type = TYPE_UNKNOWN, short nd = 0); + PBVAL SubAllocVal(PGLOBAL g, OFFSET toval, int type = TYPE_NULL, short nd = 0); + PBVAL SubAllocVal(PGLOBAL g, PBVAL toval, int type = TYPE_NULL, short nd = 0) + {return SubAllocVal(g, MOF(toval), type, nd);} + PBVAL SubAllocVal(PGLOBAL g, PSZ str, int type = TYPE_STRG, short nd = 0) + {return SubAllocVal(g, MOF(str), type, nd);} PBVAL SubAllocVal(PGLOBAL g, PVAL valp); PBVAL DupVal(PGLOBAL g, PBVAL bvp); diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index bbb279ce6ce..95cc8aa7da8 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -1,7 +1,7 @@ /****************** bsonudf C++ Program Source Code File (.CPP) ******************/ /* PROGRAM NAME: bsonudf Version 1.0 */ /* (C) Copyright to the author Olivier BERTRAND 2020 */ -/* This program are the BSON User Defined Functions . */ +/* This program are the BSON User Defined Functions. */ /*********************************************************************************/ /*********************************************************************************/ @@ -25,7 +25,7 @@ #else #define PUSH_WARNING(M) htrc(M) #endif -#define M 9 +#define M 6 /* --------------------------------- JSON UDF ---------------------------------- */ @@ -49,7 +49,8 @@ inline void JsonFreeMem(PGLOBAL g) { /*********************************************************************************/ /* SubAlloc a new BJNX class with protection against memory exhaustion. */ /*********************************************************************************/ -static PBJNX BjnxNew(PGLOBAL g, PBVAL vlp, int type, int len) { +static PBJNX BjnxNew(PGLOBAL g, PBVAL vlp, int type, int len) +{ PBJNX bjnx; try { @@ -71,7 +72,8 @@ static PBJNX BjnxNew(PGLOBAL g, PBVAL vlp, int type, int len) { /* BSNX public constructor. */ /*********************************************************************************/ BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) - : BDOC(g->Sarea) { + : BDOC(g->Sarea) +{ Row = row; Bvalp = NULL; Jpnp = NULL; @@ -99,7 +101,8 @@ BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) /*********************************************************************************/ /* SetJpath: set and parse the json path. */ /*********************************************************************************/ -my_bool BJNX::SetJpath(PGLOBAL g, char* path, my_bool jb) { +my_bool BJNX::SetJpath(PGLOBAL g, char* path, my_bool jb) +{ // Check Value was allocated if (!Value) return true; @@ -117,7 +120,8 @@ my_bool BJNX::SetJpath(PGLOBAL g, char* path, my_bool jb) { /*********************************************************************************/ /* Analyse array processing options. */ /*********************************************************************************/ -my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) { +my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) +{ int n = (int)strlen(p); my_bool dg = true, b = false; PJNODE jnp = &Nodes[i]; @@ -243,7 +247,8 @@ my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) { /* creating the table. It permits to indicate the position of the node */ /* corresponding to that column. */ /*********************************************************************************/ -my_bool BJNX::ParseJpath(PGLOBAL g) { +my_bool BJNX::ParseJpath(PGLOBAL g) +{ char* p, * p1 = NULL, * p2 = NULL, * pbuf = NULL; int i; my_bool a, mul = false; @@ -329,7 +334,8 @@ my_bool BJNX::ParseJpath(PGLOBAL g) { /*********************************************************************************/ /* MakeJson: Serialize the json item and set value to it. */ /*********************************************************************************/ -PVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp) { +PVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp) +{ if (Value->IsTypeNum()) { strcpy(g->Message, "Cannot make Json for a numeric value"); Value->Reset(); @@ -345,7 +351,8 @@ PVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp) { /*********************************************************************************/ /* SetValue: Set a value from a JVALUE contains. */ /*********************************************************************************/ -void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) { +void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) +{ if (vlp) { vp->SetNull(false); @@ -396,21 +403,24 @@ void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) { /*********************************************************************************/ /* GetJson: */ /*********************************************************************************/ -PBVAL BJNX::GetJson(PGLOBAL g) { +PBVAL BJNX::GetJson(PGLOBAL g) +{ return GetRowValue(g, Row, 0); } // end of GetJson /*********************************************************************************/ /* ReadValue: */ /*********************************************************************************/ -void BJNX::ReadValue(PGLOBAL g) { +void BJNX::ReadValue(PGLOBAL g) +{ Value->SetValue_pval(GetColumnValue(g, Row, 0)); } // end of ReadValue /*********************************************************************************/ /* GetColumnValue: */ /*********************************************************************************/ -PVAL BJNX::GetColumnValue(PGLOBAL g, PBVAL row, int i) { +PVAL BJNX::GetColumnValue(PGLOBAL g, PBVAL row, int i) +{ PBVAL vlp = GetRowValue(g, row, i); SetJsonValue(g, Value, vlp); @@ -420,7 +430,8 @@ PVAL BJNX::GetColumnValue(PGLOBAL g, PBVAL row, int i) { /*********************************************************************************/ /* GetRowValue: */ /*********************************************************************************/ -PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) { +PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) +{ my_bool expd = false; PBVAL bap; PBVAL vlp = NULL; @@ -488,7 +499,8 @@ PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) { /*********************************************************************************/ /* ExpandArray: */ /*********************************************************************************/ -PVAL BJNX::ExpandArray(PGLOBAL g, PBVAL arp, int n) { +PVAL BJNX::ExpandArray(PGLOBAL g, PBVAL arp, int n) +{ strcpy(g->Message, "Expand cannot be done by this function"); return NULL; } // end of ExpandArray @@ -496,7 +508,8 @@ PVAL BJNX::ExpandArray(PGLOBAL g, PBVAL arp, int n) { /*********************************************************************************/ /* CalculateArray: NIY */ /*********************************************************************************/ -PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) { +PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) +{ #if 0 int i, ars = GetArraySize(bap), nv = 0; bool err; @@ -592,7 +605,8 @@ PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) { /*********************************************************************************/ /* CheckPath: Checks whether the path exists in the document. */ /*********************************************************************************/ -my_bool BJNX::CheckPath(PGLOBAL g) { +my_bool BJNX::CheckPath(PGLOBAL g) +{ PBVAL val = NULL; PBVAL row = Row; @@ -632,7 +646,8 @@ my_bool BJNX::CheckPath(PGLOBAL g) { /***********************************************************************/ /* GetRow: Set the complete path of the object to be set. */ /***********************************************************************/ -PBVAL BJNX::GetRow(PGLOBAL g) { +PBVAL BJNX::GetRow(PGLOBAL g) +{ PBVAL val = NULL; PBVAL arp; PBVAL nwr, row = Row; @@ -711,7 +726,8 @@ PBVAL BJNX::GetRow(PGLOBAL g) { /***********************************************************************/ /* WriteValue: */ /***********************************************************************/ -my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) { +my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) +{ PBPR objp = NULL; PBVAL arp = NULL; PBVAL jvp = NULL; @@ -751,7 +767,8 @@ my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) { /*********************************************************************************/ /* Locate a value in a JSON tree: */ /*********************************************************************************/ -PSZ BJNX::Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k) { +PSZ BJNX::Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k) +{ PSZ str = NULL; my_bool b = false, err = true; @@ -808,7 +825,8 @@ PSZ BJNX::Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k) { /*********************************************************************************/ /* Locate in a JSON Array. */ /*********************************************************************************/ -my_bool BJNX::LocateArray(PGLOBAL g, PBVAL jarp) { +my_bool BJNX::LocateArray(PGLOBAL g, PBVAL jarp) +{ char s[16]; int n = GetArraySize(jarp); size_t m = Jp->N; @@ -831,7 +849,8 @@ my_bool BJNX::LocateArray(PGLOBAL g, PBVAL jarp) { /*********************************************************************************/ /* Locate in a JSON Object. */ /*********************************************************************************/ -my_bool BJNX::LocateObject(PGLOBAL g, PBPR jobp) { +my_bool BJNX::LocateObject(PGLOBAL g, PBPR jobp) +{ size_t m; if (Jp->WriteChr('.')) @@ -856,7 +875,8 @@ my_bool BJNX::LocateObject(PGLOBAL g, PBPR jobp) { /*********************************************************************************/ /* Locate a JSON Value. */ /*********************************************************************************/ -my_bool BJNX::LocateValue(PGLOBAL g, PBVAL jvp) { +my_bool BJNX::LocateValue(PGLOBAL g, PBVAL jvp) +{ if (CompareTree(g, Bvalp, jvp)) Found = (--K == 0); else if (jvp->Type == TYPE_JAR) @@ -870,7 +890,8 @@ my_bool BJNX::LocateValue(PGLOBAL g, PBVAL jvp) { /*********************************************************************************/ /* Locate all occurrences of a value in a JSON tree: */ /*********************************************************************************/ -PSZ BJNX::LocateAll(PGLOBAL g, PBVAL jsp, PBVAL bvp, int mx) { +PSZ BJNX::LocateAll(PGLOBAL g, PBVAL jsp, PBVAL bvp, int mx) +{ PSZ str = NULL; my_bool b = false, err = true; PJPN jnp; @@ -930,7 +951,8 @@ PSZ BJNX::LocateAll(PGLOBAL g, PBVAL jsp, PBVAL bvp, int mx) { /*********************************************************************************/ /* Locate in a JSON Array. */ /*********************************************************************************/ -my_bool BJNX::LocateArrayAll(PGLOBAL g, PBVAL jarp) { +my_bool BJNX::LocateArrayAll(PGLOBAL g, PBVAL jarp) +{ int i = 0; if (I < Imax) { @@ -954,7 +976,8 @@ my_bool BJNX::LocateArrayAll(PGLOBAL g, PBVAL jarp) { /*********************************************************************************/ /* Locate in a JSON Object. */ /*********************************************************************************/ -my_bool BJNX::LocateObjectAll(PGLOBAL g, PBPR jobp) { +my_bool BJNX::LocateObjectAll(PGLOBAL g, PBPR jobp) +{ if (I < Imax) { Jpnp[++I].Type = TYPE_JOB; @@ -975,7 +998,8 @@ my_bool BJNX::LocateObjectAll(PGLOBAL g, PBPR jobp) { /*********************************************************************************/ /* Locate a JSON Value. */ /*********************************************************************************/ -my_bool BJNX::LocateValueAll(PGLOBAL g, PBVAL jvp) { +my_bool BJNX::LocateValueAll(PGLOBAL g, PBVAL jvp) +{ if (CompareTree(g, Bvalp, jvp)) return AddPath(); else if (jvp->Type == TYPE_JAR) @@ -989,7 +1013,8 @@ my_bool BJNX::LocateValueAll(PGLOBAL g, PBVAL jvp) { /*********************************************************************************/ /* Compare two JSON trees. */ /*********************************************************************************/ -my_bool BJNX::CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2) { +my_bool BJNX::CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2) +{ if (!jp1 || !jp2 || jp1->Type != jp2->Type || GetSize(jp1) != GetSize(jp2)) return false; @@ -1017,7 +1042,8 @@ my_bool BJNX::CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2) { /*********************************************************************************/ /* Compare two VAL values and return true if they are equal. */ /*********************************************************************************/ -my_bool BJNX::CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2) { +my_bool BJNX::CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2) +{ my_bool b = false; if (v1 && v2) @@ -1091,7 +1117,8 @@ my_bool BJNX::CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2) { /*********************************************************************************/ /* Add the found path to the list. */ /*********************************************************************************/ -my_bool BJNX::AddPath(void) { +my_bool BJNX::AddPath(void) +{ char s[16]; if (Jp->WriteStr("\"$")) @@ -1121,86 +1148,270 @@ my_bool BJNX::AddPath(void) { return false; } // end of AddPath +/* -----------------------------Utility functions ------------------------------ */ + /*********************************************************************************/ /* Make a BVAL value from the passed argument. */ /*********************************************************************************/ -static PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) { +static PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) +{ char* sap = (args->arg_count > i) ? args->args[i] : NULL; int n, len; int ci; longlong bigint; - void* Base = g->Sarea; // Required by MOF - BDOC doc(Base); - PBVAL bp; - PBVAL bvp = doc.SubAllocVal(g); + BDOC doc(g->Sarea); + PBVAL bp, bvp = doc.SubAllocVal(g); - if (sap) switch (args->arg_type[i]) { - case STRING_RESULT: - if ((len = args->lengths[i])) { - if ((n = IsJson(args, i)) < 3) - sap = MakePSZ(g, args, i); + if (sap) { + if (args->arg_type[i] == STRING_RESULT) { + if ((len = args->lengths[i])) { + if ((n = IsJson(args, i)) < 3) + sap = MakePSZ(g, args, i); - if (n) { - if (n == 2) { - if (!(sap = GetJsonFile(g, sap))) { + if (n) { + if (n == 2) { + if (!(sap = GetJsonFile(g, sap))) { + PUSH_WARNING(g->Message); + return NULL; + } // endif sap + + len = strlen(sap); + } // endif 2 + + if (!(bp = doc.ParseJson(g, sap, strlen(sap)))) { PUSH_WARNING(g->Message); return NULL; - } // endif sap + } else + bvp = bp; - len = strlen(sap); - } // endif 2 + } else { + // Check whether this string is a valid json string + JsonMemSave(g); - if (!(bp = doc.ParseJson(g, sap, strlen(sap)))) - PUSH_WARNING(g->Message); + if (!(bp = doc.ParseJson(g, sap, strlen(sap)))) { + // Recover suballocated memory + JsonSubSet(g); + ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; + doc.SetString(bvp, sap, ci); + } else + bvp = bp; - bvp = bp; - } else { - // Check whether this string is a valid json string - JsonMemSave(g); - - if (!(bvp = doc.ParseJson(g, sap, strlen(sap)))) { - // Recover suballocated memory - JsonSubSet(g); - ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; - bvp = doc.SubAllocVal(g, MOF(sap), TYPE_STRG, ci); - } else g->Saved_Size = 0; + } // endif n - } // endif n + } // endif len - } // endif len + } else switch (args->arg_type[i]) { + case INT_RESULT: + bigint = *(longlong*)sap; - break; - case INT_RESULT: - bigint = *(longlong*)sap; + if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) || + (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) + doc.SetBool(bvp, (bool)bigint); + else + doc.SetBigint(g, bvp, bigint); - if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) || - (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) - doc.SetBool(bvp, (bool)bigint); - else - doc.SetBigint(g, bvp, bigint); + break; + case REAL_RESULT: + doc.SetFloat(bvp, *(double*)sap); + break; + case DECIMAL_RESULT: + doc.SetFloat(bvp, atof(MakePSZ(g, args, i))); + break; + case TIME_RESULT: + case ROW_RESULT: + default: + bvp->Type = TYPE_UNKNOWN; + break; + } // endswitch arg_type - break; - case REAL_RESULT: - doc.SetFloat(bvp, *(double*)sap); - break; - case DECIMAL_RESULT: - doc.SetFloat(bvp, atof(MakePSZ(g, args, i))); - break; - case TIME_RESULT: - case ROW_RESULT: - default: - bvp = NULL; - break; - } // endswitch arg_type + } // endif sap return bvp; } // end of MakeBinValue +/* ------------------------- Now the new Bin UDF's ----------------------------- */ + +/*********************************************************************************/ +/* Make a Json value containing the parameter. */ +/*********************************************************************************/ +my_bool bsonvalue_init(UDF_INIT* initid, UDF_ARGS* args, char* message) +{ + unsigned long reslen, memlen; + + if (args->arg_count > 1) { + strcpy(message, "Cannot accept more than 1 argument"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bsonvalue_init + +char* bsonvalue(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char*, char*) +{ + char *str; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, 1, false)) { + BDOC doc(g->Sarea); + PBVAL bvp = MakeBinValue(g, args, 0); + + if (!(str = doc.Serialize(g, bvp, NULL, 0))) + str = strcpy(result, g->Message); + + } else + str = strcpy(result, g->Message); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? str : NULL; + } else + str = (char*)g->Xchk; + + *res_length = strlen(str); + return str; +} // end of bsonValue + +void bsonvalue_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bsonvalue_deinit + +/*********************************************************************************/ +/* Make a Bson array containing all the parameters. */ +/*********************************************************************************/ +my_bool bson_make_array_init(UDF_INIT* initid, UDF_ARGS* args, char* message) +{ + unsigned long reslen, memlen; + + CalcLen(args, false, reslen, memlen); + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bson_make_array_init + +char* bson_make_array(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char*, char*) +{ + char* str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, false)) { + BDOC doc(g->Sarea); + PBVAL bvp = NULL, arp = NULL; + + for (uint i = 0; i < args->arg_count; i++) + bvp = doc.AddArrayValue(g, bvp, MakeBinValue(g, args, i)); + + arp = doc.SubAllocVal(g, bvp, TYPE_JAR); + + if (!(str = doc.Serialize(g, arp, NULL, 0))) + str = strcpy(result, g->Message); + + } else + str = strcpy(result, g->Message); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? str : NULL; + } else + str = (char*)g->Xchk; + + *res_length = strlen(str); + return str; +} // end of bson_make_array + +void bson_make_array_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_make_array_deinit + +/*********************************************************************************/ +/* Add one or several values to a Bson array. */ +/*********************************************************************************/ +my_bool bson_array_add_values_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + //} else if (!IsJson(args, 0, true)) { + // strcpy(message, "First argument must be a valid json string or item"); + // return true; + } else + CalcLen(args, false, reslen, memlen); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_array_add_values_init + +char* bson_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char*) { + char* str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, true)) { + uint n = 1; + bool b = false; + BDOC doc(g->Sarea); + PBVAL bvp = NULL, arp = MakeBinValue(g, args, 0); + + if (arp->Type == TYPE_JAR) { + bvp = doc.GetArray(arp); + b = !bvp; + } else + n = 0; + + for (uint i = n; i < args->arg_count; i++) + bvp = doc.AddArrayValue(g, bvp, MakeBinValue(g, args, i)); + + if (!n) + arp = doc.SubAllocVal(g, bvp, TYPE_JAR); + else if (b) + doc.SetValueArr(arp, bvp); + +// str = MakeResult(g, args, top, args->arg_count); + str = doc.Serialize(g, arp, NULL, 0); + } // endif CheckMemory + + if (!str) { + PUSH_WARNING(g->Message); + str = args->args[0]; + } // endif str + + // Keep result of constant function + g->Xchk = (g->N) ? str : NULL; + } else + str = (char*)g->Xchk; + + if (!str) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(str); + + return str; +} // end of bson_array_add_values + +void bson_array_add_values_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_array_add_values_deinit + /*********************************************************************************/ /* Test BJSON parse and serialize. */ /*********************************************************************************/ -my_bool json_test_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { +my_bool bson_test_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { unsigned long reslen, memlen, more = 1000; if (args->arg_count == 0) { @@ -1213,9 +1424,9 @@ my_bool json_test_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { CalcLen(args, false, reslen, memlen); return JsonInit(initid, args, message, true, reslen, memlen, more); -} // end of json_test_bson_init +} // end of bson_test_init -char* json_test_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, +char* bson_test(UDF_INIT* initid, UDF_ARGS* args, char* result, unsigned long* res_length, char* is_null, char* error) { char* str = NULL, * sap = NULL, * fn = NULL; int pretty = 1; @@ -1281,16 +1492,16 @@ err: *res_length = strlen(str); return str; -} // end of json_test_bson +} // end of bson_test -void json_test_bson_deinit(UDF_INIT* initid) { +void bson_test_deinit(UDF_INIT* initid) { JsonFreeMem((PGLOBAL)initid->ptr); -} // end of json_test_bson_deinit +} // end of bson_test_deinit /*********************************************************************************/ /* Locate a value in a Json tree. */ /*********************************************************************************/ -my_bool jsonlocate_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { +my_bool bsonlocate_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { unsigned long reslen, memlen, more = 1000; if (args->arg_count < 2) { @@ -1311,11 +1522,11 @@ my_bool jsonlocate_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { more = 0; return JsonInit(initid, args, message, true, reslen, memlen, more); -} // end of jsonlocate_bson_init +} // end of bsonlocate_init -char* jsonlocate_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, +char* bsonlocate(UDF_INIT* initid, UDF_ARGS* args, char* result, unsigned long* res_length, char* is_null, char* error) { - char* path = NULL; + char *path = NULL; int k; PBVAL bvp, bvp2; PBJNX bnxp; @@ -1358,7 +1569,10 @@ char* jsonlocate_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, bvp = (PBVAL)g->Xchk; // The item to locate - bvp2 = MakeBinValue(g, args, 1); + if (!(bvp2 = MakeBinValue(g, args, 1))) { + PUSH_WARNING("Invalid second argument"); + goto err; + } // endif bvp k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1; @@ -1389,16 +1603,16 @@ err: *res_length = strlen(path); return path; -} // end of jsonlocate_bson +} // end of bsonlocate -void jsonlocate_bson_deinit(UDF_INIT* initid) { +void bsonlocate_deinit(UDF_INIT* initid) { JsonFreeMem((PGLOBAL)initid->ptr); -} // end of jsonlocate_bson_deinit +} // end of bsonlocate_deinit /*********************************************************************************/ /* Locate all occurences of a value in a Json tree. */ /*********************************************************************************/ -my_bool json_locate_all_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { +my_bool bson_locate_all_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { unsigned long reslen, memlen, more = 1000; if (args->arg_count < 2) { @@ -1419,9 +1633,9 @@ my_bool json_locate_all_bson_init(UDF_INIT* initid, UDF_ARGS* args, char* messag more = 0; return JsonInit(initid, args, message, true, reslen, memlen, more); -} // end of json_locate_all_bson_init +} // end of bson_locate_all_init -char* json_locate_all_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, +char* bson_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result, unsigned long* res_length, char* is_null, char* error) { char* path = NULL; int mx = 10; @@ -1467,7 +1681,10 @@ char* json_locate_all_bson(UDF_INIT* initid, UDF_ARGS* args, char* result, bvp = (PBVAL)g->Xchk; // The item to locate - bvp2 = MakeBinValue(g, args, 1); + if (!(bvp2 = MakeBinValue(g, args, 1))) { + PUSH_WARNING("Invalid second argument"); + goto err; + } // endif bvp if (args->arg_count > 2) mx = (int)*(long long*)args->args[2]; @@ -1499,9 +1716,9 @@ err: *res_length = strlen(path); return path; -} // end of json_locate_all_bson +} // end of bson_locate_all -void json_locate_all_bson_deinit(UDF_INIT* initid) { +void bson_locate_all_deinit(UDF_INIT* initid) { JsonFreeMem((PGLOBAL)initid->ptr); -} // end of json_locate_all_bson_deinit +} // end of bson_locate_all_deinit diff --git a/storage/connect/bsonudf.h b/storage/connect/bsonudf.h index 6687d4c1640..b310aa1827b 100644 --- a/storage/connect/bsonudf.h +++ b/storage/connect/bsonudf.h @@ -83,16 +83,28 @@ protected: }; // end of class BJNX extern "C" { - DllExport my_bool json_test_bson_init(UDF_INIT*, UDF_ARGS*, char*); - DllExport char* json_test_bson(UDF_EXEC_ARGS); - DllExport void json_test_bson_deinit(UDF_INIT*); + DllExport my_bool bson_test_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_test(UDF_EXEC_ARGS); + DllExport void bson_test_deinit(UDF_INIT*); - DllExport my_bool jsonlocate_bson_init(UDF_INIT*, UDF_ARGS*, char*); - DllExport char* jsonlocate_bson(UDF_EXEC_ARGS); - DllExport void jsonlocate_bson_deinit(UDF_INIT*); + DllExport my_bool bsonvalue_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bsonvalue(UDF_EXEC_ARGS); + DllExport void bsonvalue_deinit(UDF_INIT*); - DllExport my_bool json_locate_all_bson_init(UDF_INIT*, UDF_ARGS*, char*); - DllExport char* json_locate_all_bson(UDF_EXEC_ARGS); - DllExport void json_locate_all_bson_deinit(UDF_INIT*); + DllExport my_bool bson_make_array_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_make_array(UDF_EXEC_ARGS); + DllExport void bson_make_array_deinit(UDF_INIT*); + + DllExport my_bool bson_array_add_values_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_array_add_values(UDF_EXEC_ARGS); + DllExport void bson_array_add_values_deinit(UDF_INIT*); + + DllExport my_bool bsonlocate_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bsonlocate(UDF_EXEC_ARGS); + DllExport void bsonlocate_deinit(UDF_INIT*); + + DllExport my_bool bson_locate_all_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_locate_all(UDF_EXEC_ARGS); + DllExport void bson_locate_all_deinit(UDF_INIT*); } // extern "C" diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index cb29b9f5d6c..d993947589f 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -1950,6 +1950,8 @@ static PJVAL MakeTypedValue(PGLOBAL g, UDF_ARGS *args, uint i, return jvp; } // end of MakeTypedValue +/* ------------------------------ The JSON UDF's ------------------------------- */ + /*********************************************************************************/ /* Make a Json value containing the parameter. */ /*********************************************************************************/ From 4e8af8a6645136be34649abacb5b31ef64264584 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 1 Dec 2020 19:30:56 +0100 Subject: [PATCH 032/150] - Fix memory leak for the JSON table type (and continue BSON implementatio) modified: storage/connect/bson.cpp modified: storage/connect/bson.h modified: storage/connect/bsonudf.cpp modified: storage/connect/connect.cc modified: storage/connect/global.h modified: storage/connect/ha_connect.cc modified: storage/connect/jsonudf.cpp modified: storage/connect/mycat.cc modified: storage/connect/plgdbsem.h modified: storage/connect/plugutil.cpp modified: storage/connect/tabjson.cpp modified: storage/connect/tabjson.h modified: storage/connect/user_connect.cc - Desesperatly trying to fix xml.test failure modified: storage/connect/mysql-test/connect/r/xml.result --- storage/connect/bson.cpp | 236 +- storage/connect/bson.h | 83 +- storage/connect/bsonudf.cpp | 47 +- storage/connect/connect.cc | 3 +- storage/connect/global.h | 2 +- storage/connect/ha_connect.cc | 23 +- storage/connect/jsonudf.cpp | 6 +- storage/connect/mycat.cc | 27 +- .../connect/mysql-test/connect/r/xml.result | 4 +- storage/connect/plgdbsem.h | 3 +- storage/connect/plugutil.cpp | 9 +- storage/connect/tabbson.cpp | 2599 +++++++++++++++++ storage/connect/tabbson.h | 342 +++ storage/connect/tabjson.cpp | 24 +- storage/connect/tabjson.h | 2 + storage/connect/user_connect.cc | 3 +- 16 files changed, 3223 insertions(+), 190 deletions(-) create mode 100644 storage/connect/tabbson.cpp create mode 100644 storage/connect/tabbson.h diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index 4725b67c06b..e395bd8988d 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -87,7 +87,7 @@ char* NextChr(PSZ s, char sep) { /***********************************************************************/ /* BDOC constructor. */ /***********************************************************************/ -BDOC::BDOC(void *base) : BJSON(base, NULL) +BDOC::BDOC(PGLOBAL G) : BJSON(G, NULL) { jp = NULL; s = NULL; @@ -118,25 +118,25 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { pty[0] = false; try { - Bvp = SubAllocVal(g); + Bvp = NewVal(); Bvp->Type = TYPE_UNKNOWN; for (i = 0; i < len; i++) switch (s[i]) { case '[': if (Bvp->Type != TYPE_UNKNOWN) - Bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); + Bvp->To_Val = ParseAsArray(i, pretty, ptyp); else - Bvp->To_Val = ParseArray(g, ++i); + Bvp->To_Val = ParseArray(++i); Bvp->Type = TYPE_JAR; break; case '{': if (Bvp->Type != TYPE_UNKNOWN) { - Bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); + Bvp->To_Val = ParseAsArray(i, pretty, ptyp); Bvp->Type = TYPE_JAR; } else { - Bvp->To_Val = ParseObject(g, ++i); + Bvp->To_Val = ParseObject(++i); Bvp->Type = TYPE_JOB; } // endif Type @@ -168,9 +168,9 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { default: if (Bvp->Type != TYPE_UNKNOWN) { - Bvp->To_Val = ParseAsArray(g, i, pretty, ptyp); + Bvp->To_Val = ParseAsArray(i, pretty, ptyp); Bvp->Type = TYPE_JAR; - } else if ((Bvp->To_Val = MOF(ParseValue(g, i)))) + } else if ((Bvp->To_Val = MOF(ParseValue(i)))) Bvp->Type = TYPE_JVAL; else throw 4; @@ -193,7 +193,8 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { } catch (int n) { if (trace(1)) - htrc("Exception %d: %s\n", n, g->Message); + htrc("Exception %d: %s\n", n, G->Message); + GetMsg(g); Bvp = NULL; } catch (const char* msg) { strcpy(g->Message, msg); @@ -206,16 +207,16 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { /***********************************************************************/ /* Parse several items as being in an array. */ /***********************************************************************/ -OFFSET BDOC::ParseAsArray(PGLOBAL g, int& i, int pretty, int* ptyp) { +OFFSET BDOC::ParseAsArray(int& i, int pretty, int* ptyp) { if (pty[0] && (!pretty || pretty > 2)) { OFFSET jsp; - if ((jsp = ParseArray(g, (i = 0))) && ptyp && pretty == 3) + if ((jsp = ParseArray((i = 0))) && ptyp && pretty == 3) *ptyp = (pty[0]) ? 0 : 3; return jsp; } else - strcpy(g->Message, "More than one item in file"); + strcpy(G->Message, "More than one item in file"); return 0; } // end of ParseAsArray @@ -223,7 +224,7 @@ OFFSET BDOC::ParseAsArray(PGLOBAL g, int& i, int pretty, int* ptyp) { /***********************************************************************/ /* Parse a JSON Array. */ /***********************************************************************/ -OFFSET BDOC::ParseArray(PGLOBAL g, int& i) { +OFFSET BDOC::ParseArray(int& i) { int level = 0; bool b = (!i); PBVAL vlp, firstvlp, lastvlp; @@ -234,7 +235,7 @@ OFFSET BDOC::ParseArray(PGLOBAL g, int& i) { switch (s[i]) { case ',': if (level < 2) { - sprintf(g->Message, "Unexpected ',' near %.*s", ARGS); + sprintf(G->Message, "Unexpected ',' near %.*s", ARGS); throw 1; } else level = 1; @@ -242,7 +243,7 @@ OFFSET BDOC::ParseArray(PGLOBAL g, int& i) { break; case ']': if (level == 1) { - sprintf(g->Message, "Unexpected ',]' near %.*s", ARGS); + sprintf(G->Message, "Unexpected ',]' near %.*s", ARGS); throw 1; } // endif level @@ -256,14 +257,14 @@ OFFSET BDOC::ParseArray(PGLOBAL g, int& i) { break; default: if (level == 2) { - sprintf(g->Message, "Unexpected value near %.*s", ARGS); + sprintf(G->Message, "Unexpected value near %.*s", ARGS); throw 1; } else if (lastvlp) { - vlp = ParseValue(g, i); + vlp = ParseValue(i); lastvlp->Next = MOF(vlp); lastvlp = vlp; } else - firstvlp = lastvlp = ParseValue(g, i); + firstvlp = lastvlp = ParseValue(i); level = (b) ? 1 : 2; break; @@ -280,7 +281,7 @@ OFFSET BDOC::ParseArray(PGLOBAL g, int& i) { /***********************************************************************/ /* Parse a JSON Object. */ /***********************************************************************/ -OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { +OFFSET BDOC::ParseObject(int& i) { OFFSET key; int level = 0; PBPR bpp, firstbpp, lastbpp; @@ -291,8 +292,8 @@ OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { switch (s[i]) { case '"': if (level < 2) { - key = ParseString(g, ++i); - bpp = SubAllocPair(g, key); + key = ParseString(++i); + bpp = SubAllocPair(key); if (lastbpp) { lastbpp->Next = MOF(bpp); @@ -302,24 +303,24 @@ OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { level = 2; } else { - sprintf(g->Message, "misplaced string near %.*s", ARGS); + sprintf(G->Message, "misplaced string near %.*s", ARGS); throw 2; } // endif level break; case ':': if (level == 2) { - lastbpp->Vlp = MOF(ParseValue(g, ++i)); + lastbpp->Vlp = MOF(ParseValue(++i)); level = 3; } else { - sprintf(g->Message, "Unexpected ':' near %.*s", ARGS); + sprintf(G->Message, "Unexpected ':' near %.*s", ARGS); throw 2; } // endif level break; case ',': if (level < 3) { - sprintf(g->Message, "Unexpected ',' near %.*s", ARGS); + sprintf(G->Message, "Unexpected ',' near %.*s", ARGS); throw 2; } else level = 1; @@ -327,7 +328,7 @@ OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { break; case '}': if (!(level == 0 || level == 3)) { - sprintf(g->Message, "Unexpected '}' near %.*s", ARGS); + sprintf(G->Message, "Unexpected '}' near %.*s", ARGS); throw 2; } // endif level @@ -339,20 +340,21 @@ OFFSET BDOC::ParseObject(PGLOBAL g, int& i) { case '\t': break; default: - sprintf(g->Message, "Unexpected character '%c' near %.*s", + sprintf(G->Message, "Unexpected character '%c' near %.*s", s[i], ARGS); throw 2; }; // endswitch s[i] - strcpy(g->Message, "Unexpected EOF in Object"); + strcpy(G->Message, "Unexpected EOF in Object"); throw 2; } // end of ParseObject /***********************************************************************/ /* Parse a JSON Value. */ /***********************************************************************/ -PBVAL BDOC::ParseValue(PGLOBAL g, int& i) { - PBVAL bvp = SubAllocVal(g); +PBVAL BDOC::ParseValue(int& i) +{ + PBVAL bvp = NewVal(); for (; i < len; i++) switch (s[i]) { @@ -369,16 +371,16 @@ PBVAL BDOC::ParseValue(PGLOBAL g, int& i) { suite: switch (s[i]) { case '[': - bvp->To_Val = ParseArray(g, ++i); + bvp->To_Val = ParseArray(++i); bvp->Type = TYPE_JAR; break; case '{': - bvp->To_Val = ParseObject(g, ++i); + bvp->To_Val = ParseObject(++i); bvp->Type = TYPE_JOB; break; case '"': // jvp->Val = AllocVal(g, TYPE_STRG); - bvp->To_Val = ParseString(g, ++i); + bvp->To_Val = ParseString(++i); bvp->Type = TYPE_STRG; break; case 't': @@ -412,7 +414,7 @@ suite: case '-': default: if (s[i] == '-' || isdigit(s[i])) - ParseNumeric(g, i, bvp); + ParseNumeric(i, bvp); else goto err; @@ -421,29 +423,29 @@ suite: return bvp; err: - sprintf(g->Message, "Unexpected character '%c' near %.*s", s[i], ARGS); + sprintf(G->Message, "Unexpected character '%c' near %.*s", s[i], ARGS); throw 3; } // end of ParseValue /***********************************************************************/ /* Unescape and parse a JSON string. */ /***********************************************************************/ -OFFSET BDOC::ParseString(PGLOBAL g, int& i) { +OFFSET BDOC::ParseString(int& i) { uchar* p; int n = 0; // Be sure of memory availability - if (((size_t)len + 1 - i) > ((PPOOLHEADER)g->Sarea)->FreeBlk) + if (((size_t)len + 1 - i) > ((PPOOLHEADER)G->Sarea)->FreeBlk) throw("ParseString: Out of memory"); // The size to allocate is not known yet - p = (uchar*)PlugSubAlloc(g, NULL, 0); + p = (uchar*)PlugSubAlloc(G, NULL, 0); for (; i < len; i++) switch (s[i]) { case '"': p[n++] = 0; - PlugSubAlloc(g, NULL, n); + PlugSubAlloc(G, NULL, n); return MOF(p); case '\\': if (++i < len) { @@ -514,7 +516,7 @@ throw("Unexpected EOF in String"); /***********************************************************************/ /* Parse a JSON numeric value. */ /***********************************************************************/ -void BDOC::ParseNumeric(PGLOBAL g, int& i, PBVAL vlp) { +void BDOC::ParseNumeric(int& i, PBVAL vlp) { char buf[50]; int n = 0; short nd = 0; @@ -570,7 +572,7 @@ fin: double dv = strtod(buf, NULL); if (nd > 6) { - double* dvp = (double*)PlugSubAlloc(g, NULL, sizeof(double)); + double* dvp = (double*)PlugSubAlloc(G, NULL, sizeof(double)); *dvp = dv; vlp->To_Val = MOF(dvp); @@ -585,7 +587,7 @@ fin: longlong iv = strtoll(buf, NULL, 10); if (iv > INT_MAX32 || iv < INT_MIN32) { - longlong *llp = (longlong*)PlugSubAlloc(g, NULL, sizeof(longlong)); + longlong *llp = (longlong*)PlugSubAlloc(G, NULL, sizeof(longlong)); *llp = iv; vlp->To_Val = MOF(llp); @@ -614,7 +616,7 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty) { bool b = false, err = true; FILE* fs = NULL; - g->Message[0] = 0; + G->Message[0] = 0; try { if (!bvp) { @@ -664,15 +666,15 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty) { str = ((JOUTSTR*)jp)->Strp; jp->WriteChr('\0'); PlugSubAlloc(g, NULL, ((JOUTSTR*)jp)->N); - } else { - if (!g->Message[0]) + } else if (G->Message[0]) strcpy(g->Message, "Error in Serialize"); - - } // endif's + else + GetMsg(g); } catch (int n) { if (trace(1)) - htrc("Exception %d: %s\n", n, g->Message); + htrc("Exception %d: %s\n", n, G->Message); + GetMsg(g); str = NULL; } catch (const char* msg) { strcpy(g->Message, msg); @@ -796,10 +798,10 @@ bool BDOC::SerializeValue(PBVAL jvp) { /***********************************************************************/ /* Program for sub-allocating Bjson structures. */ /***********************************************************************/ -void* BJSON::BsonSubAlloc(PGLOBAL g, size_t size) +void* BJSON::BsonSubAlloc(size_t size) { PPOOLHEADER pph; /* Points on area header. */ - void* memp = g->Sarea; + void* memp = G->Sarea; size = ((size + 3) / 4) * 4; /* Round up size to multiple of 4 */ pph = (PPOOLHEADER)memp; @@ -808,10 +810,10 @@ void* BJSON::BsonSubAlloc(PGLOBAL g, size_t size) memp, size, pph->To_Free, pph->FreeBlk); if (size > pph->FreeBlk) { /* Not enough memory left in pool */ - sprintf(g->Message, + sprintf(G->Message, "Not enough memory for request of %zd (used=%zd free=%zd)", size, pph->To_Free, pph->FreeBlk); - xtrc(1, "BsonSubAlloc: %s\n", g->Message); + xtrc(1, "BsonSubAlloc: %s\n", G->Message); throw(1234); } /* endif size OS32 code */ @@ -824,14 +826,29 @@ void* BJSON::BsonSubAlloc(PGLOBAL g, size_t size) return memp; } /* end of BsonSubAlloc */ +/*********************************************************************************/ +/* Program for SubSet re-initialization of the memory pool. */ +/*********************************************************************************/ +void BJSON::SubSet(bool b) +{ + PPOOLHEADER pph = (PPOOLHEADER)G->Sarea; + + pph->To_Free = (G->Saved_Size) ? G->Saved_Size : sizeof(POOLHEADER); + pph->FreeBlk = G->Sarea_Size - pph->To_Free; + + if (b) + G->Saved_Size = 0; + +} /* end of JsonSubSet */ + /* ------------------------ Bobject functions ------------------------ */ /***********************************************************************/ /* Sub-allocate and initialize a BPAIR. */ /***********************************************************************/ -PBPR BJSON::SubAllocPair(PGLOBAL g, OFFSET key, OFFSET val) +PBPR BJSON::SubAllocPair(OFFSET key, OFFSET val) { - PBPR bpp = (PBPR)BsonSubAlloc(g, sizeof(BPAIR)); + PBPR bpp = (PBPR)BsonSubAlloc(sizeof(BPAIR)); bpp->Key = key; bpp->Vlp = val; @@ -857,9 +874,9 @@ int BJSON::GetObjectSize(PBPR bop, bool b) /***********************************************************************/ /* Add a new pair to an Object and return it. */ /***********************************************************************/ -PBPR BJSON::AddPair(PGLOBAL g, PBPR bop, PSZ key, OFFSET val) +PBPR BJSON::AddPair(PBPR bop, PSZ key, OFFSET val) { - PBPR brp, nrp = SubAllocPair(g, key, val); + PBPR brp, nrp = SubAllocPair(key, val); if (bop) { for (brp = bop; brp->Next; brp = MPP(brp->Next)); @@ -874,17 +891,17 @@ PBPR BJSON::AddPair(PGLOBAL g, PBPR bop, PSZ key, OFFSET val) /***********************************************************************/ /* Return all object keys as an array. */ /***********************************************************************/ -PBVAL BJSON::GetKeyList(PGLOBAL g, PBPR bop) +PBVAL BJSON::GetKeyList(PBPR bop) { PBVAL bvp, lvp, fvp = NULL; for (PBPR brp = bop; brp; brp = MPP(brp->Next)) if (fvp) { - bvp = SubAllocVal(g, brp->Key, TYPE_STRG); + bvp = SubAllocVal(brp->Key, TYPE_STRG); lvp->Next = MOF(bvp); lvp = bvp; } else - lvp = fvp = SubAllocVal(g, brp->Key, TYPE_STRG); + lvp = fvp = SubAllocVal(brp->Key, TYPE_STRG); return fvp; } // end of GetKeyList @@ -892,17 +909,17 @@ PBVAL BJSON::GetKeyList(PGLOBAL g, PBPR bop) /***********************************************************************/ /* Return all object values as an array. */ /***********************************************************************/ -PBVAL BJSON::GetObjectValList(PGLOBAL g, PBPR bop) +PBVAL BJSON::GetObjectValList(PBPR bop) { PBVAL bvp, lvp, fvp = NULL; for (PBPR brp = bop; brp; brp = MPP(brp->Next)) if (fvp) { - bvp = DupVal(g, MVP(brp->Vlp)); + bvp = DupVal(MVP(brp->Vlp)); lvp->Next = MOF(bvp); lvp = bvp; } else - lvp = fvp = DupVal(g, MVP(brp->Vlp)); + lvp = fvp = DupVal(MVP(brp->Vlp)); return fvp; } // end of GetObjectValList @@ -981,7 +998,7 @@ PSZ BJSON::GetObjectText(PGLOBAL g, PBPR bop, PSTRG text) { /***********************************************************************/ /* Set or add a value corresponding to the given key. */ /***********************************************************************/ -PBPR BJSON::SetKeyValue(PGLOBAL g, PBPR bop, OFFSET bvp, PSZ key) +PBPR BJSON::SetKeyValue(PBPR bop, OFFSET bvp, PSZ key) { PBPR brp = bop, prp = NULL; @@ -994,10 +1011,10 @@ PBPR BJSON::SetKeyValue(PGLOBAL g, PBPR bop, OFFSET bvp, PSZ key) prp = brp; if (!brp) - prp->Vlp = MOF(SubAllocPair(g, key, bvp)); + prp->Vlp = MOF(SubAllocPair(key, bvp)); } else - bop = SubAllocPair(g, key, bvp); + bop = SubAllocPair(key, bvp); // Return the first pair of this object return bop; @@ -1006,11 +1023,11 @@ PBPR BJSON::SetKeyValue(PGLOBAL g, PBPR bop, OFFSET bvp, PSZ key) /***********************************************************************/ /* Merge two objects. */ /***********************************************************************/ -PBPR BJSON::MergeObject(PGLOBAL g, PBPR bop1, PBPR bop2) +PBPR BJSON::MergeObject(PBPR bop1, PBPR bop2) { if (bop1) for (PBPR brp = bop2; brp; brp = MPP(brp->Next)) - SetKeyValue(g, bop1, brp->Vlp, MZP(brp->Key)); + SetKeyValue(bop1, brp->Vlp, MZP(brp->Key)); else bop1 = bop2; @@ -1087,10 +1104,10 @@ PBVAL BJSON::GetArrayValue(PBVAL bap, int n) /***********************************************************************/ /* Add a Value to the Array Value list. */ /***********************************************************************/ -PBVAL BJSON::AddArrayValue(PGLOBAL g, PBVAL bap, PBVAL nvp, int* x) +PBVAL BJSON::AddArrayValue(PBVAL bap, PBVAL nvp, int* x) { if (!nvp) - nvp = SubAllocVal(g); + nvp = NewVal(); if (bap) { int i = 0, n = (x) ? *x : INT_MAX32; @@ -1112,11 +1129,11 @@ PBVAL BJSON::AddArrayValue(PGLOBAL g, PBVAL bap, PBVAL nvp, int* x) /***********************************************************************/ /* Merge two arrays. */ /***********************************************************************/ -PBVAL BJSON::MergeArray(PGLOBAL g, PBVAL bap1, PBVAL bap2) +PBVAL BJSON::MergeArray(PBVAL bap1, PBVAL bap2) { if (bap1) { for (PBVAL bvp = bap2; bvp; bvp = MVP(bvp->Next)) - AddArrayValue(g, bap1, bvp); + AddArrayValue(bap1, bvp); return bap1; } else @@ -1127,7 +1144,7 @@ PBVAL BJSON::MergeArray(PGLOBAL g, PBVAL bap1, PBVAL bap2) /***********************************************************************/ /* Set the nth Value of the Array Value list or add it. */ /***********************************************************************/ -PBVAL BJSON::SetArrayValue(PGLOBAL g, PBVAL bap, PBVAL nvp, int n) +PBVAL BJSON::SetArrayValue(PBVAL bap, PBVAL nvp, int n) { PBVAL bvp = bap, pvp = NULL; @@ -1144,7 +1161,7 @@ PBVAL BJSON::SetArrayValue(PGLOBAL g, PBVAL bap, PBVAL nvp, int n) } // endif bap if (!bvp) { - bvp = DupVal(g, nvp); + bvp = DupVal(nvp); if (pvp) pvp->Next = MOF(bvp); @@ -1233,47 +1250,56 @@ bool BJSON::IsArrayNull(PBVAL bap) /***********************************************************************/ /* Sub-allocate and clear a BVAL. */ /***********************************************************************/ -PBVAL BJSON::SubAllocVal(PGLOBAL g) +PBVAL BJSON::NewVal(int type) { - PBVAL bvp = (PBVAL)BsonSubAlloc(g, sizeof(BVAL)); + PBVAL bvp = (PBVAL)BsonSubAlloc(sizeof(BVAL)); bvp->To_Val = 0; bvp->Nd = 0; - bvp->Type = TYPE_NULL; - bvp->Next = 0; - return bvp; -} // end of SubAllocVal - -/***********************************************************************/ -/* Sub-allocate and initialize a BVAL as string. */ -/***********************************************************************/ -PBVAL BJSON::SubAllocVal(PGLOBAL g, OFFSET toval, int type, short nd) -{ - PBVAL bvp = (PBVAL)BsonSubAlloc(g, sizeof(BVAL)); - - bvp->To_Val = toval; - bvp->Nd = nd; bvp->Type = type; bvp->Next = 0; return bvp; } // end of SubAllocVal +/***********************************************************************/ +/* Sub-allocate and initialize a BVAL as type. */ +/***********************************************************************/ +PBVAL BJSON::SubAllocVal(OFFSET toval, int type, short nd) +{ + PBVAL bvp = NewVal(type); + + bvp->To_Val = toval; + bvp->Nd = nd; + return bvp; +} // end of SubAllocVal + +/***********************************************************************/ +/* Sub-allocate and initialize a BVAL as string. */ +/***********************************************************************/ +PBVAL BJSON::SubAllocStr(OFFSET toval, short nd) +{ + PBVAL bvp = NewVal(TYPE_STRG); + + bvp->To_Val = toval; + bvp->Nd = nd; + return bvp; +} // end of SubAllocVal + /***********************************************************************/ /* Allocate a BVALUE with a given string or numeric value. */ /***********************************************************************/ -PBVAL BJSON::SubAllocVal(PGLOBAL g, PVAL valp) +PBVAL BJSON::SubAllocVal(PVAL valp) { - PBVAL vlp = SubAllocVal(g); - SetValue(g, vlp, valp); - vlp->Next = NULL; + PBVAL vlp = NewVal(); + SetValue(vlp, valp); return vlp; } // end of SubAllocVal /***********************************************************************/ /* Sub-allocate and initialize a BVAL from another BVAL. */ /***********************************************************************/ -PBVAL BJSON::DupVal(PGLOBAL g, PBVAL bvlp) { - PBVAL bvp = (PBVAL)BsonSubAlloc(g, sizeof(BVAL)); +PBVAL BJSON::DupVal(PBVAL bvlp) { + PBVAL bvp = NewVal(); *bvp = *bvlp; bvp->Next = 0; @@ -1539,7 +1565,7 @@ double BJSON::GetDouble(PBVAL vp) /***********************************************************************/ /* Return the Value's String value. */ /***********************************************************************/ -PSZ BJSON::GetString(PGLOBAL g, PBVAL vp, char* buff) +PSZ BJSON::GetString(PBVAL vp, char* buff) { char buf[32]; char* p = (buff) ? buff : buf; @@ -1572,7 +1598,7 @@ PSZ BJSON::GetString(PGLOBAL g, PBVAL vp, char* buff) p = NULL; } // endswitch Type - return (p == buf) ? (PSZ)PlugDup(g, buf) : p; + return (p == buf) ? (PSZ)PlugDup(G, buf) : p; } // end of GetString /***********************************************************************/ @@ -1585,7 +1611,7 @@ PSZ BJSON::GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text) { return GetArrayText(g, MVP(vlp->To_Val), text); char buff[32]; - PSZ s = (vlp->Type == TYPE_NULL) ? NULL : GetString(g, vlp, buff); + PSZ s = (vlp->Type == TYPE_NULL) ? NULL : GetString(vlp, buff); if (s) text->Append(s); @@ -1614,7 +1640,7 @@ void BJSON::SetValueVal(PBVAL vlp, PBVAL vp) vlp->Type = vp->Type; } // end of SetValue; -void BJSON::SetValue(PGLOBAL g, PBVAL vlp, PVAL valp) +void BJSON::SetValue(PBVAL vlp, PVAL valp) { if (!valp || valp->IsNull()) { vlp->Type = TYPE_NULL; @@ -1625,7 +1651,7 @@ void BJSON::SetValue(PGLOBAL g, PBVAL vlp, PVAL valp) else { char buf[32]; - vlp->To_Val = MOF(PlugDup(g, valp->GetCharString(buf))); + vlp->To_Val = MOF(PlugDup(G, valp->GetCharString(buf))); } // endif Formatted vlp->Type = TYPE_DTM; @@ -1642,7 +1668,7 @@ void BJSON::SetValue(PGLOBAL g, PBVAL vlp, PVAL valp) vlp->F = (float)valp->GetFloatValue(); vlp->Type = TYPE_FLOAT; } else { - double *dp = (double*)PlugSubAlloc(g, NULL, sizeof(double)); + double *dp = (double*)PlugSubAlloc(G, NULL, sizeof(double)); *dp = valp->GetFloatValue(); vlp->To_Val = MOF(dp); @@ -1663,7 +1689,7 @@ void BJSON::SetValue(PGLOBAL g, PBVAL vlp, PVAL valp) vlp->N = valp->GetIntValue(); vlp->Type = TYPE_INTG; } else { - longlong* llp = (longlong*)PlugSubAlloc(g, NULL, sizeof(longlong)); + longlong* llp = (longlong*)PlugSubAlloc(G, NULL, sizeof(longlong)); *llp = valp->GetBigintValue(); vlp->To_Val = MOF(llp); @@ -1672,7 +1698,7 @@ void BJSON::SetValue(PGLOBAL g, PBVAL vlp, PVAL valp) break; default: - sprintf(g->Message, "Unsupported typ %d\n", valp->GetType()); + sprintf(G->Message, "Unsupported typ %d\n", valp->GetType()); throw(777); } // endswitch Type @@ -1699,13 +1725,13 @@ void BJSON::SetBool(PBVAL vlp, bool b) /***********************************************************************/ /* Set the Value's value as the given big integer. */ /***********************************************************************/ -void BJSON::SetBigint(PGLOBAL g, PBVAL vlp, longlong ll) +void BJSON::SetBigint(PBVAL vlp, longlong ll) { if (ll >= INT_MIN32 && ll <= INT_MAX32) { vlp->N = (int)ll; vlp->Type = TYPE_INTG; } else { - longlong* llp = (longlong*)PlugSubAlloc(g, NULL, sizeof(longlong)); + longlong* llp = (longlong*)PlugSubAlloc(G, NULL, sizeof(longlong)); *llp = ll; vlp->To_Val = MOF(llp); diff --git a/storage/connect/bson.h b/storage/connect/bson.h index 284bee1da48..077e71b1413 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -69,54 +69,64 @@ DllExport bool IsNum(PSZ s); class BJSON : public BLOCK { public: // Constructor - BJSON(void* base, PBVAL vp = NULL) { Base = base; Bvp = vp; } + BJSON(PGLOBAL g, PBVAL vp = NULL) { G = g, Base = G->Sarea; Bvp = vp; } void* GetBase(void) { return Base; } + void SubSet(bool b = false); + void MemSave(void) {G->Saved_Size = ((PPOOLHEADER)G->Sarea)->To_Free;} + void GetMsg(PGLOBAL g) { if (g != G) strcpy(g->Message, G->Message); } // SubAlloc functions - void* BsonSubAlloc(PGLOBAL g, size_t size); - PBPR SubAllocPair(PGLOBAL g, OFFSET key, OFFSET val = 0); - PBPR SubAllocPair(PGLOBAL g, PSZ key, OFFSET val = 0) - {return SubAllocPair(g, MOF(key), val);} - PBVAL SubAllocVal(PGLOBAL g); - PBVAL SubAllocVal(PGLOBAL g, OFFSET toval, int type = TYPE_NULL, short nd = 0); - PBVAL SubAllocVal(PGLOBAL g, PBVAL toval, int type = TYPE_NULL, short nd = 0) - {return SubAllocVal(g, MOF(toval), type, nd);} - PBVAL SubAllocVal(PGLOBAL g, PSZ str, int type = TYPE_STRG, short nd = 0) - {return SubAllocVal(g, MOF(str), type, nd);} - PBVAL SubAllocVal(PGLOBAL g, PVAL valp); - PBVAL DupVal(PGLOBAL g, PBVAL bvp); + void* BsonSubAlloc(size_t size); + PBPR SubAllocPair(OFFSET key, OFFSET val = 0); + PBPR SubAllocPair(PSZ key, OFFSET val = 0) + {return SubAllocPair(MOF(key), val);} + PBVAL NewVal(int type = TYPE_NULL); + PBVAL SubAllocVal(OFFSET toval, int type = TYPE_NULL, short nd = 0); + PBVAL SubAllocVal(PBVAL toval, int type = TYPE_NULL, short nd = 0) + {return SubAllocVal(MOF(toval), type, nd);} + PBVAL SubAllocStr(OFFSET str, short nd = 0); + PBVAL SubAllocStr(PSZ str, short nd = 0) + {return SubAllocStr(MOF(str), nd);} + PBVAL SubAllocVal(PVAL valp); + PBVAL DupVal(PBVAL bvp); // Array functions int GetArraySize(PBVAL bap, bool b = false); PBVAL GetArrayValue(PBVAL bap, int i); PSZ GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text); - PBVAL MergeArray(PGLOBAL g, PBVAL bap1,PBVAL bap2); + PBVAL MergeArray(PBVAL bap1,PBVAL bap2); PBVAL DeleteValue(PBVAL bap, int n); - PBVAL AddArrayValue(PGLOBAL g, PBVAL bap, PBVAL nvp = NULL, int* x = NULL); - PBVAL SetArrayValue(PGLOBAL g, PBVAL bap, PBVAL nvp, int n); + PBVAL AddArrayValue(PBVAL bap, PBVAL nvp = NULL, int* x = NULL); + PBVAL SetArrayValue(PBVAL bap, PBVAL nvp, int n); bool IsArrayNull(PBVAL bap); // Object functions int GetObjectSize(PBPR bop, bool b = false); + PBPR GetNext(PBPR prp) {return MPP(prp->Next);} PSZ GetObjectText(PGLOBAL g, PBPR bop, PSTRG text); - PBPR MergeObject(PGLOBAL g, PBPR bop1, PBPR bop2); - PBPR AddPair(PGLOBAL g, PBPR bop, PSZ key, OFFSET val = 0); + PBPR MergeObject(PBPR bop1, PBPR bop2); + PBPR AddPair(PBPR bop, PSZ key, OFFSET val = 0); + PSZ GetKey(PBPR prp) {return MZP(prp->Key);} + PBVAL GetVal(PBPR prp) {return MVP(prp->Vlp);} PBVAL GetKeyValue(PBPR bop, PSZ key); - PBVAL GetKeyList(PGLOBAL g, PBPR bop); - PBVAL GetObjectValList(PGLOBAL g, PBPR bop); - PBPR SetKeyValue(PGLOBAL g, PBPR bop, OFFSET bvp, PSZ key); + PBVAL GetKeyList(PBPR bop); + PBVAL GetObjectValList(PBPR bop); + PBPR SetKeyValue(PBPR bop, OFFSET bvp, PSZ key); + inline PBPR SetKeyValue(PBPR bop, PBVAL vlp, PSZ key) + {return SetKeyValue(bop, MOF(vlp), key);} PBPR DeleteKey(PBPR bop, PCSZ k); bool IsObjectNull(PBPR bop); // Value functions int GetSize(PBVAL vlp, bool b = false); + PBVAL GetNext(PBVAL vlp) {return MVP(vlp->Next);} PBPR GetObject(PBVAL vlp); PBVAL GetArray(PBVAL vlp); //PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); } PSZ GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text); - //inline PJSON GetJson(void) { return (DataType == TYPE_JSON ? Jsp : this); } - PSZ GetString(PGLOBAL g, PBVAL vp, char* buff = NULL); + inline PBVAL GetBson(PBVAL bvp) { return IsJson(bvp) ? MVP(bvp->To_Val) : bvp; } + PSZ GetString(PBVAL vp, char* buff = NULL); int GetInteger(PBVAL vp); long long GetBigint(PBVAL vp); double GetDouble(PBVAL vp); @@ -124,17 +134,20 @@ public: void SetValueObj(PBVAL vlp, PBPR bop); void SetValueArr(PBVAL vlp, PBVAL bap); void SetValueVal(PBVAL vlp, PBVAL vp); - void SetValue(PGLOBAL g, PBVAL vlp, PVAL valp); + void SetValue(PBVAL vlp, PVAL valp); void SetString(PBVAL vlp, PSZ s, int ci = 0); void SetInteger(PBVAL vlp, int n); - void SetBigint(PGLOBAL g, PBVAL vlp, longlong ll); + void SetBigint(PBVAL vlp, longlong ll); void SetFloat(PBVAL vlp, double f); void SetBool(PBVAL vlp, bool b); + void Clear(PBVAL vlp) { vlp->N = 0; vlp->Nd = 0; vlp->Next = 0; vlp->Type = TYPE_NULL; } bool IsValueNull(PBVAL vlp); + bool IsJson(PBVAL vlp) {return (vlp->Type == TYPE_JAR || vlp->Type == TYPE_JOB);} // Members - PBVAL Bvp; - void* Base; + PGLOBAL G; + PBVAL Bvp; + void *Base; protected: // Default constructor not to be used @@ -146,18 +159,18 @@ protected: /***********************************************************************/ class BDOC : public BJSON { public: - BDOC(void *); + BDOC(PGLOBAL G); PBVAL ParseJson(PGLOBAL g, char* s, size_t n, int* prty = NULL, bool* b = NULL); PSZ Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty); protected: - OFFSET ParseArray(PGLOBAL g, int& i); - OFFSET ParseObject(PGLOBAL g, int& i); - PBVAL ParseValue(PGLOBAL g, int& i); - OFFSET ParseString(PGLOBAL g, int& i); - void ParseNumeric(PGLOBAL g, int& i, PBVAL bvp); - OFFSET ParseAsArray(PGLOBAL g, int& i, int pretty, int* ptyp); + OFFSET ParseArray(int& i); + OFFSET ParseObject(int& i); + PBVAL ParseValue(int& i); + OFFSET ParseString(int& i); + void ParseNumeric(int& i, PBVAL bvp); + OFFSET ParseAsArray(int& i, int pretty, int* ptyp); bool SerializeArray(OFFSET arp, bool b); bool SerializeObject(OFFSET obp); bool SerializeValue(PBVAL vp); @@ -166,7 +179,7 @@ protected: JOUT* jp; // Used with serialize char* s; // The Json string to parse int len; // The Json string length - bool pty[3]; // Used to guess what pretty is + bool pty[3]; // Used to guess what pretty is // Default constructor not to be used BDOC(void) {} diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 95cc8aa7da8..36bec919ffd 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -41,7 +41,7 @@ inline void JsonMemSave(PGLOBAL g) { /*********************************************************************************/ inline void JsonFreeMem(PGLOBAL g) { g->Activityp = NULL; - PlugExit(g); + g = PlugExit(g); } /* end of JsonFreeMem */ /* --------------------------- New Testing BJSON Stuff --------------------------*/ @@ -71,8 +71,7 @@ static PBJNX BjnxNew(PGLOBAL g, PBVAL vlp, int type, int len) /*********************************************************************************/ /* BSNX public constructor. */ /*********************************************************************************/ -BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) - : BDOC(g->Sarea) +BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) : BDOC(g) { Row = row; Bvalp = NULL; @@ -361,7 +360,7 @@ void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) } else switch (vlp->Type) { case TYPE_DTM: case TYPE_STRG: - vp->SetValue_psz(GetString(g, vlp)); + vp->SetValue_psz(GetString(vlp)); break; case TYPE_INTG: case TYPE_BINT: @@ -371,14 +370,14 @@ void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) if (vp->IsTypeNum()) vp->SetValue(GetDouble(vlp)); else // Get the proper number of decimals - vp->SetValue_psz(GetString(g, vlp)); + vp->SetValue_psz(GetString(vlp)); break; case TYPE_BOOL: if (vp->IsTypeNum()) vp->SetValue(GetInteger(vlp) ? 1 : 0); else - vp->SetValue_psz(GetString(g, vlp)); + vp->SetValue_psz(GetString(vlp)); break; case TYPE_JAR: @@ -439,7 +438,7 @@ PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) for (; i < Nod && row; i++) { if (Nodes[i].Op == OP_NUM) { Value->SetValue(row->Type == TYPE_JAR ? GetArraySize(MVP(row->To_Val)) : 1); - vlp = SubAllocVal(g, Value); + vlp = SubAllocVal(Value); return vlp; } else if (Nodes[i].Op == OP_XX) { Jb = b; @@ -473,7 +472,7 @@ PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) else if (Nodes[i].Op == OP_EXP) return (PBVAL)ExpandArray(g, bap, i); else - return SubAllocVal(g, CalculateArray(g, bap, i)); + return SubAllocVal(CalculateArray(g, bap, i)); } else { // Unexpected array, unwrap it as [0] @@ -701,12 +700,12 @@ PBVAL BJNX::GetRow(PGLOBAL g) // nwr = SubAllocPair(g); // Construct new row - nwr = SubAllocVal(g); + nwr = NewVal(); if (row->Type == TYPE_JOB) { - SetKeyValue(g, MPP(row->To_Val), MOF(nwr), Nodes[i - 1].Key); + SetKeyValue(MPP(row->To_Val), MOF(nwr), Nodes[i - 1].Key); } else if (row->Type == TYPE_JAR) { - AddArrayValue(g, MVP(row->To_Val), nwr); + AddArrayValue(MVP(row->To_Val), nwr); } else { strcpy(g->Message, "Wrong type when writing new row"); nwr = NULL; @@ -748,15 +747,15 @@ my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) if (arp) { if (!Nodes[Nod - 1].Key) { if (Nodes[Nod - 1].Op == OP_EQ) - SetArrayValue(g, arp, jvalp, Nodes[Nod - 1].Rank); + SetArrayValue(arp, jvalp, Nodes[Nod - 1].Rank); else - AddArrayValue(g, arp, jvalp); + AddArrayValue(arp, jvalp); } // endif Key } else if (objp) { if (Nodes[Nod - 1].Key) - SetKeyValue(g, objp, MOF(jvalp), Nodes[Nod - 1].Key); + SetKeyValue(objp, MOF(jvalp), Nodes[Nod - 1].Key); } else if (jvp) SetValueVal(jvp, jvalp); @@ -1159,8 +1158,8 @@ static PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) int n, len; int ci; longlong bigint; - BDOC doc(g->Sarea); - PBVAL bp, bvp = doc.SubAllocVal(g); + BDOC doc(g); + PBVAL bp, bvp = doc.NewVal(); if (sap) { if (args->arg_type[i] == STRING_RESULT) { @@ -1209,7 +1208,7 @@ static PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) doc.SetBool(bvp, (bool)bigint); else - doc.SetBigint(g, bvp, bigint); + doc.SetBigint(bvp, bigint); break; case REAL_RESULT: @@ -1256,7 +1255,7 @@ char* bsonvalue(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, 1, false)) { - BDOC doc(g->Sarea); + BDOC doc(g); PBVAL bvp = MakeBinValue(g, args, 0); if (!(str = doc.Serialize(g, bvp, NULL, 0))) @@ -1297,13 +1296,13 @@ char* bson_make_array(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, args->arg_count, false)) { - BDOC doc(g->Sarea); + BDOC doc(g); PBVAL bvp = NULL, arp = NULL; for (uint i = 0; i < args->arg_count; i++) - bvp = doc.AddArrayValue(g, bvp, MakeBinValue(g, args, i)); + bvp = doc.AddArrayValue(bvp, MakeBinValue(g, args, i)); - arp = doc.SubAllocVal(g, bvp, TYPE_JAR); + arp = doc.SubAllocVal(bvp, TYPE_JAR); if (!(str = doc.Serialize(g, arp, NULL, 0))) str = strcpy(result, g->Message); @@ -1364,7 +1363,7 @@ char* bson_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!CheckMemory(g, initid, args, args->arg_count, true)) { uint n = 1; bool b = false; - BDOC doc(g->Sarea); + BDOC doc(g); PBVAL bvp = NULL, arp = MakeBinValue(g, args, 0); if (arp->Type == TYPE_JAR) { @@ -1374,10 +1373,10 @@ char* bson_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result, n = 0; for (uint i = n; i < args->arg_count; i++) - bvp = doc.AddArrayValue(g, bvp, MakeBinValue(g, args, i)); + bvp = doc.AddArrayValue(bvp, MakeBinValue(g, args, i)); if (!n) - arp = doc.SubAllocVal(g, bvp, TYPE_JAR); + arp = doc.SubAllocVal(bvp, TYPE_JAR); else if (b) doc.SetValueArr(arp, bvp); diff --git a/storage/connect/connect.cc b/storage/connect/connect.cc index 2a0f2ed037f..250ff7fa62f 100644 --- a/storage/connect/connect.cc +++ b/storage/connect/connect.cc @@ -73,8 +73,7 @@ PGLOBAL CntExit(PGLOBAL g) g->Activityp = NULL; } // endif Activityp - PlugExit(g); - g= NULL; + g= PlugExit(g); } // endif g return g; diff --git a/storage/connect/global.h b/storage/connect/global.h index 294ad0e1d7b..f09d5250124 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -208,7 +208,7 @@ DllExport char *PlugGetMessage(PGLOBAL, int); DllExport short GetLineLength(PGLOBAL); // Console line length #endif // __WIN__ DllExport PGLOBAL PlugInit(LPCSTR, size_t); // Plug global initialization -DllExport int PlugExit(PGLOBAL); // Plug global termination +DllExport PGLOBAL PlugExit(PGLOBAL); // Plug global termination DllExport LPSTR PlugRemoveType(LPSTR, LPCSTR); DllExport LPCSTR PlugSetPath(LPSTR to, LPCSTR prefix, LPCSTR name, LPCSTR dir); DllExport BOOL PlugIsAbsolutePath(LPCSTR path); diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index e9b9e5c24aa..4e7dd3ff394 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -170,7 +170,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.07.0002 November 13, 2020"; + char version[]= "Version 1.07.0002 November 30, 2020"; #if defined(__WIN__) char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__; char slash= '\\'; @@ -230,6 +230,9 @@ char *GetUserVariable(PGLOBAL g, const uchar *varname) PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); PQRYRES VirColumns(PGLOBAL g, bool info); PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info); +#ifdef DEVELOPMENT +PQRYRES BSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info); +#endif // DEVEOPMENT PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info); #if defined(REST_SUPPORT) PQRYRES RESTColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); @@ -4513,7 +4516,10 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn, bool quick) case TAB_VEC: case TAB_REST: case TAB_JSON: - if (options->filename && *options->filename) { +#if defined DEVELOPMENT + case TAB_BSON: +#endif // DEVELOPMENT + if (options->filename && *options->filename) { if (!quick) { char path[FN_REFLEN], dbpath[FN_REFLEN]; @@ -5679,7 +5685,10 @@ static int connect_assisted_discovery(handlerton *, THD* thd, } else if (topt->http) { switch (ttp) { case TAB_JSON: - case TAB_XML: +#ifdef DEVELOPMENT + case TAB_BSON: +#endif // DEVELOPMENT + case TAB_XML: case TAB_CSV: ttp = TAB_REST; break; @@ -5863,6 +5872,9 @@ static int connect_assisted_discovery(handlerton *, THD* thd, case TAB_XML: #endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT case TAB_JSON: +#ifdef DEVELOPMENT + case TAB_BSON: +#endif // DEVELOPMENT dsn= strz(g, create_info->connect_string); if (!fn && !zfn && !mul && !dsn) @@ -6029,6 +6041,11 @@ static int connect_assisted_discovery(handlerton *, THD* thd, case TAB_JSON: qrp= JSONColumns(g, db, dsn, topt, fnc == FNC_COL); break; +#ifdef DEVELOPMENT + case TAB_BSON: + qrp= BSONColumns(g, db, dsn, topt, fnc == FNC_COL); + break; +#endif // DEVELOPMENT #if defined(JAVA_SUPPORT) case TAB_MONGO: url= strz(g, create_info->connect_string); diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index d993947589f..773828a96dd 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -1178,7 +1178,7 @@ my_bool JsonSubSet(PGLOBAL g) { PPOOLHEADER pph = (PPOOLHEADER)g->Sarea; - pph->To_Free = (g->Saved_Size) ? g->Saved_Size : (size_t)sizeof(POOLHEADER); + pph->To_Free = (g->Saved_Size) ? g->Saved_Size : sizeof(POOLHEADER); pph->FreeBlk = g->Sarea_Size - pph->To_Free; g->Saved_Size = 0; return FALSE; @@ -1198,7 +1198,7 @@ inline void JsonMemSave(PGLOBAL g) inline void JsonFreeMem(PGLOBAL g) { g->Activityp = NULL; - PlugExit(g); + g = PlugExit(g); } /* end of JsonFreeMem */ /*********************************************************************************/ @@ -1281,7 +1281,7 @@ my_bool JsonInit(UDF_INIT *initid, UDF_ARGS *args, char *message, my_bool mbn, return true; } else if (g->Sarea_Size == 0) { strcpy(message, g->Message); - PlugExit(g); + g = PlugExit(g); return true; } // endif g diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc index f8b3dc03aa5..395e1192b45 100644 --- a/storage/connect/mycat.cc +++ b/storage/connect/mycat.cc @@ -16,9 +16,9 @@ /*************** Mycat CC Program Source Code File (.CC) ***************/ /* PROGRAM NAME: MYCAT */ /* ------------- */ -/* Version 1.7 */ +/* Version 1.8 */ /* */ -/* Author: Olivier Bertrand 2012 - 2019 */ +/* Author: Olivier Bertrand 2012 - 2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -82,7 +82,11 @@ #endif // JAVA_SUPPORT #include "tabpivot.h" #include "tabvir.h" +#if defined(DEVELOPMENT) +#include "tabbson.h" +#else #include "tabjson.h" +#endif // DEVELOPMENT #include "ha_connect.h" #if defined(XML_SUPPORT) #include "tabxml.h" @@ -157,6 +161,9 @@ TABTYPE GetTypeID(const char *type) : (!stricmp(type, "PIVOT")) ? TAB_PIVOT : (!stricmp(type, "VIR")) ? TAB_VIR : (!stricmp(type, "JSON")) ? TAB_JSON +#if defined(DEVELOPMENT) + : (!stricmp(type, "BSON")) ? TAB_BSON +#endif #if defined(ZIP_SUPPORT) : (!stricmp(type, "ZIP")) ? TAB_ZIP #endif @@ -181,6 +188,9 @@ bool IsFileType(TABTYPE type) case TAB_INI: case TAB_VEC: case TAB_JSON: +#if defined(DEVELOPMENT) + case TAB_BSON: +#endif case TAB_REST: // case TAB_ZIP: isfile= true; @@ -276,6 +286,9 @@ bool IsTypeIndexable(TABTYPE type) case TAB_VEC: case TAB_DBF: case TAB_JSON: +#if defined(DEVELOPMENT) + case TAB_BSON: +#endif idx= true; break; default: @@ -302,6 +315,9 @@ int GetIndexType(TABTYPE type) case TAB_VEC: case TAB_DBF: case TAB_JSON: +#if defined(DEVELOPMENT) + case TAB_BSON: +#endif xtyp= 1; break; case TAB_MYSQL: @@ -445,7 +461,7 @@ PTABDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am) case TAB_XML: tdp= new(g) XMLDEF; break; #endif // XML_SUPPORT #if defined(VCT_SUPPORT) - case TAB_VEC: tdp = new(g) VCTDEF; break; + case TAB_VEC: tdp= new(g) VCTDEF; break; #endif // VCT_SUPPORT #if defined(ODBC_SUPPORT) case TAB_ODBC: tdp= new(g) ODBCDEF; break; @@ -466,8 +482,11 @@ PTABDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am) case TAB_PIVOT: tdp= new(g) PIVOTDEF; break; case TAB_VIR: tdp= new(g) VIRDEF; break; case TAB_JSON: tdp= new(g) JSONDEF; break; +#if defined(DEVELOPMENT) + case TAB_BSON: tdp= new(g) BSONDEF; break; +#endif #if defined(ZIP_SUPPORT) - case TAB_ZIP: tdp = new(g) ZIPDEF; break; + case TAB_ZIP: tdp= new(g) ZIPDEF; break; #endif // ZIP_SUPPORT #if defined(REST_SUPPORT) case TAB_REST: tdp= new (g) RESTDEF; break; diff --git a/storage/connect/mysql-test/connect/r/xml.result b/storage/connect/mysql-test/connect/r/xml.result index 99739b1ec10..599c35cb1ed 100644 --- a/storage/connect/mysql-test/connect/r/xml.result +++ b/storage/connect/mysql-test/connect/r/xml.result @@ -323,7 +323,7 @@ HEX(c) 3F3F3F3F3F3F3F Warnings: Level Warning Code 1366 -Message Incorrect string value: '\xC3\x81\xC3\x82\xC3\x83...' for column 'c' at row 1 +Message Incorrect string value: '\xC3\x81\xC3\x82\xC3\x83...' for column `test`.`t1`.`c` at row 1 Level Warning Code 1105 Message Out of range value ÁÂÃÄÅÆÇ for column 'c' at row 1 @@ -374,7 +374,7 @@ INSERT INTO t1 VALUES (_cp1251 0xC0C1C2C3); Warnings: Level Warning Code 1105 -Message Com error: Unable to save character to 'iso-8859-1' encoding. +Message Com error: Impossible d'enregistrer le caractre dans le codage iso-8859-1. INSERT INTO t1 VALUES ('&<>"\''); SELECT node, hex(node) FROM t1; diff --git a/storage/connect/plgdbsem.h b/storage/connect/plgdbsem.h index 1d644cb75c2..dd204d065ed 100644 --- a/storage/connect/plgdbsem.h +++ b/storage/connect/plgdbsem.h @@ -83,7 +83,8 @@ enum TABTYPE {TAB_UNDEF = 0, /* Table of undefined type */ TAB_ZIP = 27, /* ZIP file info table */ TAB_MONGO = 28, /* Table retrieved from MongoDB */ TAB_REST = 29, /* Table retrieved from Rest */ - TAB_NIY = 30}; /* Table not implemented yet */ + TAB_BSON = 30, /* BSON Table (development) */ + TAB_NIY = 31}; /* Table not implemented yet */ enum AMT {TYPE_AM_ERROR = 0, /* Type not defined */ TYPE_AM_ROWID = 1, /* ROWID type (special column) */ diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp index 0ab594f5533..479310703eb 100644 --- a/storage/connect/plugutil.cpp +++ b/storage/connect/plugutil.cpp @@ -184,7 +184,7 @@ PGLOBAL PlugInit(LPCSTR Language, size_t worksize) /***********************************************************************/ /* PlugExit: Terminate Plug operations. */ /***********************************************************************/ -int PlugExit(PGLOBAL g) +PGLOBAL PlugExit(PGLOBAL g) { if (g) { PDBUSER dup = PlgGetUser(g); @@ -196,7 +196,7 @@ int PlugExit(PGLOBAL g) delete g; } // endif g - return 0; + return NULL; } // end of PlugExit /***********************************************************************/ @@ -483,9 +483,10 @@ bool AllocSarea(PGLOBAL g, size_t size) #else if (trace(8)) { #endif - if (g->Sarea) + if (g->Sarea) { htrc("Work area of %zd allocated at %p\n", size, g->Sarea); - else + PlugSubSet(g->Sarea, size); + } else htrc("SareaAlloc: %s\n", g->Message); } // endif trace diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp new file mode 100644 index 00000000000..aa2f5957911 --- /dev/null +++ b/storage/connect/tabbson.cpp @@ -0,0 +1,2599 @@ +/************* tabbson C++ Program Source Code File (.CPP) *************/ +/* PROGRAM NAME: tabjson Version 1.0 */ +/* (C) Copyright to the author Olivier BERTRAND 2020 */ +/* This program are the BSON class DB execution routines. */ +/***********************************************************************/ + +/***********************************************************************/ +/* Include relevant sections of the MariaDB header file. */ +/***********************************************************************/ +#include + +/***********************************************************************/ +/* Include application header files: */ +/* global.h is header containing all global declarations. */ +/* plgdbsem.h is header containing the DB application declarations. */ +/* tdbdos.h is header containing the TDBDOS declarations. */ +/* json.h is header containing the JSON classes declarations. */ +/***********************************************************************/ +#include "global.h" +#include "plgdbsem.h" +#include "maputil.h" +#include "filamtxt.h" +#include "tabdos.h" +#include "tabbson.h" +#include "filamap.h" +#if defined(GZ_SUPPORT) +#include "filamgz.h" +#endif // GZ_SUPPORT +#if defined(ZIP_SUPPORT) +#include "filamzip.h" +#endif // ZIP_SUPPORT +#if 0 +#if defined(JAVA_SUPPORT) +#include "jmgfam.h" +#endif // JAVA_SUPPORT +#if defined(CMGO_SUPPORT) +#include "cmgfam.h" +#endif // CMGO_SUPPORT +#endif // 0 +#include "tabmul.h" +#include "checklvl.h" +#include "resource.h" +#include "mycat.h" // for FNC_COL + +/***********************************************************************/ +/* This should be an option. */ +/***********************************************************************/ +#define MAXCOL 200 /* Default max column nb in result */ +//#define TYPE_UNKNOWN 12 /* Must be greater than other types */ + +/***********************************************************************/ +/* External functions. */ +/***********************************************************************/ +USETEMP UseTemp(void); +bool JsonAllPath(void); +int GetDefaultDepth(void); +char *GetJsonNull(void); + +/***********************************************************************/ +/* BSONColumns: construct the result blocks containing the description */ +/* of all the columns of a table contained inside a JSON file. */ +/***********************************************************************/ +PQRYRES BSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info) +{ + static int buftyp[] = { TYPE_STRING, TYPE_SHORT, TYPE_STRING, TYPE_INT, + TYPE_INT, TYPE_SHORT, TYPE_SHORT, TYPE_STRING }; + static XFLD fldtyp[] = { FLD_NAME, FLD_TYPE, FLD_TYPENAME, FLD_PREC, + FLD_LENGTH, FLD_SCALE, FLD_NULL, FLD_FORMAT }; + static unsigned int length[] = { 0, 6, 8, 10, 10, 6, 6, 0 }; + int i, n = 0; + int ncol = sizeof(buftyp) / sizeof(int); + PJCL jcp; + BSONDISC* pjdc = NULL; + PQRYRES qrp; + PCOLRES crp; + + if (info) { + length[0] = 128; + length[7] = 256; + goto skipit; + } // endif info + + if (GetIntegerTableOption(g, topt, "Multiple", 0)) { + strcpy(g->Message, "Cannot find column definition for multiple table"); + return NULL; + } // endif Multiple + + pjdc = new(g) BSONDISC(g, length); + + if (!(n = pjdc->GetColumns(g, db, dsn, topt))) + return NULL; + +skipit: + if (trace(1)) + htrc("BSONColumns: n=%d len=%d\n", n, length[0]); + + /*********************************************************************/ + /* Allocate the structures used to refer to the result set. */ + /*********************************************************************/ + qrp = PlgAllocResult(g, ncol, n, IDS_COLUMNS + 3, + buftyp, fldtyp, length, false, false); + + crp = qrp->Colresp->Next->Next->Next->Next->Next->Next; + crp->Name = PlugDup(g, "Nullable"); + crp->Next->Name = PlugDup(g, "Jpath"); + + if (info || !qrp) + return qrp; + + qrp->Nblin = n; + + /*********************************************************************/ + /* Now get the results into blocks. */ + /*********************************************************************/ + for (i = 0, jcp = pjdc->fjcp; jcp; i++, jcp = jcp->Next) { + if (jcp->Type == TYPE_UNKNOWN) + jcp->Type = TYPE_STRG; // Void column + + crp = qrp->Colresp; // Column Name + crp->Kdata->SetValue(jcp->Name, i); + crp = crp->Next; // Data Type + crp->Kdata->SetValue(jcp->Type, i); + crp = crp->Next; // Type Name + crp->Kdata->SetValue(GetTypeName(jcp->Type), i); + crp = crp->Next; // Precision + crp->Kdata->SetValue(jcp->Len, i); + crp = crp->Next; // Length + crp->Kdata->SetValue(jcp->Len, i); + crp = crp->Next; // Scale (precision) + crp->Kdata->SetValue(jcp->Scale, i); + crp = crp->Next; // Nullable + crp->Kdata->SetValue(jcp->Cbn ? 1 : 0, i); + crp = crp->Next; // Field format + + if (crp->Kdata) + crp->Kdata->SetValue(jcp->Fmt, i); + + } // endfor i + +/*********************************************************************/ +/* Return the result pointer. */ +/*********************************************************************/ + return qrp; +} // end of BSONColumns + +/* -------------------------- Class BSONDISC ------------------------- */ + +/***********************************************************************/ +/* Class used to get the columns of a JSON table. */ +/***********************************************************************/ +BSONDISC::BSONDISC(PGLOBAL g, uint* lg) +{ + length = lg; + jcp = fjcp = pjcp = NULL; + tdp = NULL; + tjnp = NULL; + jpp = NULL; + tjsp = NULL; + jsp = NULL; + bp = NULL; + row = NULL; + sep = NULL; + i = n = bf = ncol = lvl = sz = limit = 0; + all = strfy = false; +} // end of BSONDISC constructor + +int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) +{ + char filename[_MAX_PATH]; + bool mgo = (GetTypeID(topt->type) == TAB_MONGO); + PBVAL bdp = NULL; + + lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth()); + lvl = GetIntegerTableOption(g, topt, "Depth", lvl); + sep = GetStringTableOption(g, topt, "Separator", "."); + sz = GetIntegerTableOption(g, topt, "Jsize", 1024); + limit = GetIntegerTableOption(g, topt, "Limit", 10); + strfy = GetBooleanTableOption(g, topt, "Stringify", false); + + /*********************************************************************/ + /* Open the input file. */ + /*********************************************************************/ + tdp = new(g) BSONDEF; + tdp->G = NULL; +#if defined(ZIP_SUPPORT) + tdp->Entry = GetStringTableOption(g, topt, "Entry", NULL); + tdp->Zipped = GetBooleanTableOption(g, topt, "Zipped", false); +#endif // ZIP_SUPPORT + tdp->Fn = GetStringTableOption(g, topt, "Filename", NULL); + + if (!(tdp->Database = SetPath(g, db))) + return 0; + + tdp->Objname = GetStringTableOption(g, topt, "Object", NULL); + tdp->Base = GetIntegerTableOption(g, topt, "Base", 0) ? 1 : 0; + tdp->Pretty = GetIntegerTableOption(g, topt, "Pretty", 2); + tdp->Xcol = GetStringTableOption(g, topt, "Expand", NULL); + tdp->Accept = GetBooleanTableOption(g, topt, "Accept", false); + tdp->Uri = (dsn && *dsn ? dsn : NULL); + + if (!tdp->Fn && !tdp->Uri) { + strcpy(g->Message, MSG(MISSING_FNAME)); + return 0; + } // endif Fn + + if (tdp->Fn) { + // We used the file name relative to recorded datapath + PlugSetPath(filename, tdp->Fn, tdp->GetPath()); + tdp->Fn = PlugDup(g, filename); + } // endif Fn + + if (trace(1)) + htrc("File %s objname=%s pretty=%d lvl=%d\n", + tdp->Fn, tdp->Objname, tdp->Pretty, lvl); + + if (tdp->Uri) { +#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) + tdp->Collname = GetStringTableOption(g, topt, "Name", NULL); + tdp->Collname = GetStringTableOption(g, topt, "Tabname", tdp->Collname); + tdp->Schema = GetStringTableOption(g, topt, "Dbname", "test"); + tdp->Options = (PSZ)GetStringTableOption(g, topt, "Colist", "all"); + tdp->Pipe = GetBooleanTableOption(g, topt, "Pipeline", false); + tdp->Driver = (PSZ)GetStringTableOption(g, topt, "Driver", NULL); + tdp->Version = GetIntegerTableOption(g, topt, "Version", 3); + tdp->Wrapname = (PSZ)GetStringTableOption(g, topt, "Wrapper", + (tdp->Version == 2) ? "Mongo2Interface" : "Mongo3Interface"); + tdp->Pretty = 0; +#else // !MONGO_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); + return 0; +#endif // !MONGO_SUPPORT + } // endif Uri + + if (tdp->Pretty == 2) { + tdp->G = g; + + if (tdp->Zipped) { +#if defined(ZIP_SUPPORT) + tjsp = new(g) TDBBSON(g, tdp, new(g) UNZFAM(tdp)); +#else // !ZIP_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return 0; +#endif // !ZIP_SUPPORT + } else + tjsp = new(g) TDBBSON(g, tdp, new(g) MAPFAM(tdp)); + + if (tjsp->MakeDocument(g)) + return 0; + + bp = tjsp->Bp; + bdp = tjsp->GetDoc() ? bp->GetBson(tjsp->GetDoc()) : NULL; + jsp = bdp ? bp->GetArrayValue(bdp, 0) : NULL; + } else { + if (!((tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0)))) { + if (!mgo) { + sprintf(g->Message, "LRECL must be specified for pretty=%d", tdp->Pretty); + return 0; + } else + tdp->Lrecl = 8192; // Should be enough + + } // endif Lrecl + + // Allocate the parse work memory + tdp->G = PlugInit(NULL, (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 6 : 2)); + tdp->Ending = GetIntegerTableOption(g, topt, "Ending", CRLF); + + if (tdp->Zipped) { +#if defined(ZIP_SUPPORT) + tjnp = new(g)TDBBSN(tdp->G, tdp, new(g) UNZFAM(tdp)); +#else // !ZIP_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return NULL; +#endif // !ZIP_SUPPORT +#if 0 + } else if (tdp->Uri) { + if (tdp->Driver && toupper(*tdp->Driver) == 'C') { +#if defined(CMGO_SUPPORT) + tjnp = new(g) TDBBSN(G, tdp, new(g) CMGFAM(tdp)); +#else + sprintf(g->Message, "Mongo %s Driver not available", "C"); + return 0; +#endif + } else if (tdp->Driver && toupper(*tdp->Driver) == 'J') { +#if defined(JAVA_SUPPORT) + tjnp = new(g) TDBBSN(G, tdp, new(g) JMGFAM(tdp)); +#else + sprintf(g->Message, "Mongo %s Driver not available", "Java"); + return 0; +#endif + } else { // Driver not specified +#if defined(CMGO_SUPPORT) + tjnp = new(g) TDBBSN(G, tdp, new(g) CMGFAM(tdp)); +#elif defined(JAVA_SUPPORT) + tjnp = new(g) TDBBSN(G, tdp, new(g) JMGFAM(tdp)); +#else + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); + return 0; +#endif + } // endif Driver +#endif // 0 + + } else if (tdp->Pretty >= 0) + tjnp = new(g) TDBBSN(g, tdp, new(g) DOSFAM(tdp)); + else + tjnp = new(g) TDBBSN(g, tdp, new(g) BINFAM(tdp)); + + tjnp->SetMode(MODE_READ); + bp = tjnp->Bp; + + if (tjnp->OpenDB(g)) + return 0; + + switch (tjnp->ReadDB(g)) { + case RC_EF: + strcpy(g->Message, "Void json table"); + case RC_FX: + goto err; + default: + jsp = bp->FindRow(g); + } // endswitch ReadDB + + } // endif pretty + + if (!(row = (jsp) ? bp->GetObject(jsp) : NULL)) { + strcpy(g->Message, "Can only retrieve columns from object rows"); + goto err; + } // endif row + + all = GetBooleanTableOption(g, topt, "Fullarray", false); + jcol.Name = jcol.Fmt = NULL; + jcol.Next = NULL; + jcol.Found = true; + colname[0] = 0; + + if (!tdp->Uri) { + fmt[0] = '$'; + fmt[1] = '.'; + bf = 2; + } // endif Uri + + /*********************************************************************/ + /* Analyse the JSON tree and define columns. */ + /*********************************************************************/ + for (i = 1; ; i++) { + for (jpp = row; jpp; jpp = bp->GetNext(jpp)) { + strncpy(colname, bp->GetKey(jpp), 64); + fmt[bf] = 0; + + if (Find(g, bp->GetVal(jpp), colname, MY_MIN(lvl, 0))) + goto err; + + } // endfor jpp + + // Missing column can be null + for (jcp = fjcp; jcp; jcp = jcp->Next) { + jcp->Cbn |= !jcp->Found; + jcp->Found = false; + } // endfor jcp + + if (tdp->Pretty != 2) { + // Read next record + switch (tjnp->ReadDB(g)) { + case RC_EF: + jsp = NULL; + break; + case RC_FX: + goto err; + default: + jsp = bp->FindRow(g); + } // endswitch ReadDB + + } else + jsp = bp->GetArrayValue(bdp, i); + + if (!(row = (jsp) ? bp->GetObject(jsp) : NULL)) + break; + + } // endfor i + + if (tdp->Pretty != 2) + tjnp->CloseDB(g); + + return n; + +err: + if (tdp->Pretty != 2) + tjnp->CloseDB(g); + + return 0; +} // end of GetColumns + +bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j) +{ + char *p, *pc = colname + strlen(colname), buf[32]; + int ars; + size_t n; + PBPR job; + PBVAL jar; + + if (jvp && !bp->IsJson(jvp)) { + if (JsonAllPath() && !fmt[bf]) + strcat(fmt, colname); + + jcol.Type = (JTYP)jvp->Type; + + switch (jvp->Type) { + case TYPE_STRG: + case TYPE_DTM: + jcol.Len = (int)strlen(bp->GetString(jvp)); + break; + case TYPE_INTG: + case TYPE_BINT: + case TYPE_DBL: + jcol.Len = (int)strlen(bp->GetString(jvp, buf)); + break; + case TYPE_BOOL: + jcol.Len = 1; + break; + default: + jcol.Len = 0; + break; + } // endswitch Type + + jcol.Scale = jvp->Nd; + jcol.Cbn = jvp->Type == TYPE_NULL; + } else if (!jvp || bp->IsValueNull(jvp)) { + jcol.Type = TYPE_UNKNOWN; + jcol.Len = jcol.Scale = 0; + jcol.Cbn = true; + } else if (j < lvl) { + if (!fmt[bf]) + strcat(fmt, colname); + + p = fmt + strlen(fmt); + jsp = jvp; + + switch (jsp->Type) { + case TYPE_JOB: + job = bp->GetObject(jsp); + + for (PBPR jrp = job; jrp; jrp = bp->GetNext(jrp)) { + PCSZ k = bp->GetKey(jrp); + + if (*k != '$') { + n = sizeof(fmt) - strlen(fmt) - 1; + strncat(strncat(fmt, sep, n), k, n - strlen(sep)); + n = sizeof(colname) - strlen(colname) - 1; + strncat(strncat(colname, "_", n), k, n - 1); + } // endif Key + + if (Find(g, bp->GetVal(jrp), k, j + 1)) + return true; + + *p = *pc = 0; + } // endfor jrp + + return false; + case TYPE_JAR: + jar = bp->GetArray(jsp); + + if (all || (tdp->Xcol && !stricmp(tdp->Xcol, key))) + ars = MY_MIN(bp->GetArraySize(jar), limit); + else + ars = MY_MIN(bp->GetArraySize(jar), 1); + + for (int k = 0; k < ars; k++) { + n = sizeof(fmt) - (strlen(fmt) + 1); + + if (!tdp->Xcol || stricmp(tdp->Xcol, key)) { + sprintf(buf, "%d", k); + + if (tdp->Uri) { + strncat(strncat(fmt, sep, n), buf, n - strlen(sep)); + } else { + strncat(strncat(fmt, "[", n), buf, n - 1); + strncat(fmt, "]", n - (strlen(buf) + 1)); + } // endif uri + + if (all) { + n = sizeof(colname) - (strlen(colname) + 1); + strncat(strncat(colname, "_", n), buf, n - 1); + } // endif all + + } else { + strncat(fmt, (tdp->Uri ? sep : "[*]"), n); + } + + if (Find(g, bp->GetArrayValue(jar, k), "", j)) + return true; + + *p = *pc = 0; + } // endfor k + + return false; + default: + sprintf(g->Message, "Logical error after %s", fmt); + return true; + } // endswitch Type + + } else if (lvl >= 0) { + if (strfy) { + if (!fmt[bf]) + strcat(fmt, colname); + + strcat(fmt, ".*"); + } else if (JsonAllPath() && !fmt[bf]) + strcat(fmt, colname); + + jcol.Type = TYPE_STRG; + jcol.Len = sz; + jcol.Scale = 0; + jcol.Cbn = true; + } else + return false; + + AddColumn(g); + return false; +} // end of Find + +void BSONDISC::AddColumn(PGLOBAL g) { + bool b = fmt[bf] != 0; // True if formatted + + // Check whether this column was already found + for (jcp = fjcp; jcp; jcp = jcp->Next) + if (!strcmp(colname, jcp->Name)) + break; + + if (jcp) { + if (jcp->Type != jcol.Type) { + if (jcp->Type == TYPE_UNKNOWN || jcp->Type == TYPE_NULL) + jcp->Type = jcol.Type; + // else if (jcol.Type != TYPE_UNKNOWN && jcol.Type != TYPE_VOID) + // jcp->Type = TYPE_STRING; + else if (jcp->Type != TYPE_STRG) + switch (jcol.Type) { + case TYPE_STRG: + case TYPE_DBL: + jcp->Type = jcol.Type; + break; + case TYPE_BINT: + if (jcp->Type == TYPE_INTG || jcp->Type == TYPE_BOOL) + jcp->Type = jcol.Type; + + break; + case TYPE_INTG: + if (jcp->Type == TYPE_BOOL) + jcp->Type = jcol.Type; + + break; + default: + break; + } // endswith Type + + } // endif Type + + if (b && (!jcp->Fmt || strlen(jcp->Fmt) < strlen(fmt))) { + jcp->Fmt = PlugDup(g, fmt); + length[7] = MY_MAX(length[7], strlen(fmt)); + } // endif fmt + + jcp->Len = MY_MAX(jcp->Len, jcol.Len); + jcp->Scale = MY_MAX(jcp->Scale, jcol.Scale); + jcp->Cbn |= jcol.Cbn; + jcp->Found = true; + } else if (jcol.Type != TYPE_UNKNOWN || tdp->Accept) { + // New column + jcp = (PJCL)PlugSubAlloc(g, NULL, sizeof(JCOL)); + *jcp = jcol; + jcp->Cbn |= (i > 1); + jcp->Name = PlugDup(g, colname); + length[0] = MY_MAX(length[0], strlen(colname)); + + if (b) { + jcp->Fmt = PlugDup(g, fmt); + length[7] = MY_MAX(length[7], strlen(fmt)); + } else + jcp->Fmt = NULL; + + if (pjcp) { + jcp->Next = pjcp->Next; + pjcp->Next = jcp; + } else + fjcp = jcp; + + n++; + } // endif jcp + + if (jcp) + pjcp = jcp; + +} // end of AddColumn + +/* -------------------------- Class BTUTIL --------------------------- */ + +/***********************************************************************/ +/* Find the row in the tree structure. */ +/***********************************************************************/ +PBVAL BTUTIL::FindRow(PGLOBAL g) +{ + char *p, *objpath; + PBVAL jsp = Tp->Row; + PBVAL val = NULL; + + for (objpath = PlugDup(g, Tp->Objname); jsp && objpath; objpath = p) { + if ((p = strchr(objpath, Tp->Sep))) + *p++ = 0; + + if (*objpath != '[' && !IsNum(objpath)) { // objpass is a key + val = (jsp->Type == TYPE_JOB) ? + GetKeyValue(GetObject(jsp), objpath) : NULL; + } else { + if (*objpath == '[') { + if (objpath[strlen(objpath) - 1] == ']') + objpath++; + else + return NULL; + } // endif [ + + val = (jsp->Type == TYPE_JAR) ? + GetArrayValue(GetArray(jsp), atoi(objpath) - Tp->B) : NULL; + } // endif objpath + + // jsp = (val) ? val->GetJson() : NULL; + jsp = val; + } // endfor objpath + + return jsp; +} // end of FindRow + +/***********************************************************************/ +/* Parse the read line. */ +/***********************************************************************/ +PBVAL BTUTIL::ParseLine(PGLOBAL g, int *pretty, bool *comma) +{ + return ParseJson(g, Tp->To_Line, strlen(Tp->To_Line), pretty, comma); +} // end of ParseLine + +/***********************************************************************/ +/* Make the top tree from the object path. */ +/***********************************************************************/ +PBVAL BTUTIL::MakeTopTree(PGLOBAL g, PBVAL jsp) +{ + PBVAL top = NULL; + + if (Tp->Objname) { + if (!Tp->Val) { + // Parse and allocate Objname item(s) + char* p; + char* objpath = PlugDup(g, Tp->Objname); + int i; + PBPR objp = NULL; + PBVAL arp = NULL; + PBVAL val = NULL; + + for (; objpath; objpath = p) { + if ((p = strchr(objpath, Tp->Sep))) + *p++ = 0; + + if (*objpath != '[' && !IsNum(objpath)) { + // objp = new(g) JOBJECT; + + if (!top) + top = NewVal(TYPE_JOB); + + if (val) + SetValueObj(val, objp); + + val = NewVal(); + SetKeyValue(objp, MOF(val), objpath); + } else { + if (*objpath == '[') { + // Old style + if (objpath[strlen(objpath) - 1] != ']') { + sprintf(g->Message, "Invalid Table path %s", Tp->Objname); + return NULL; + } else + objpath++; + + } // endif objpath + + if (!top) + top = NewVal(TYPE_JAR); + + if (val) + SetValueArr(val, arp); + + val = NewVal(); + i = atoi(objpath) - Tp->B; + SetArrayValue(arp, val, i); + } // endif objpath + + } // endfor p + + Tp->Val = val; + } // endif Val + + SetValueVal(Tp->Val, jsp); + } else + top = jsp; + + return top; +} // end of MakeTopTree + +PSZ BTUTIL::SerialVal(PGLOBAL g, PBVAL vlp, int pretty) +{ + return Serialize(g, vlp, NULL, pretty); +} // en of SerialTop + +/* -------------------------- Class BCUTIL --------------------------- */ + +/***********************************************************************/ +/* SetValue: Set a value from a BVALUE contains. */ +/***********************************************************************/ +void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp) +{ + if (jvp) { + vp->SetNull(false); + + switch (jvp->Type) { + case TYPE_STRG: + case TYPE_INTG: + case TYPE_BINT: + case TYPE_DBL: + case TYPE_DTM: + switch (vp->GetType()) { + case TYPE_STRING: + case TYPE_DATE: + vp->SetValue_psz(GetString(jvp)); + break; + case TYPE_INT: + case TYPE_SHORT: + case TYPE_TINY: + vp->SetValue(GetInteger(jvp)); + break; + case TYPE_BIGINT: + vp->SetValue(GetBigint(jvp)); + break; + case TYPE_DOUBLE: + vp->SetValue(GetDouble(jvp)); + + if (jvp->Type == TYPE_DBL) + vp->SetPrec(jvp->Nd); + + break; + default: + sprintf(G->Message, "Unsupported column type %d", vp->GetType()); + throw 888; + } // endswitch Type + + break; + case TYPE_BOOL: + if (vp->IsTypeNum()) + vp->SetValue(GetInteger(jvp) ? 1 : 0); + else + vp->SetValue_psz((PSZ)(GetInteger(jvp) ? "true" : "false")); + + break; + case TYPE_JAR: + case TYPE_JOB: + // SetJsonValue(g, vp, val->GetArray()->GetValue(0)); + vp->SetValue_psz(GetValueText(g, jvp, NULL)); + break; + default: + vp->Reset(); + vp->SetNull(true); + } // endswitch Type + + } else { + vp->Reset(); + vp->SetNull(true); + } // endif val + +} // end of SetJsonValue + +/***********************************************************************/ +/* MakeJson: Serialize the json item and set value to it. */ +/***********************************************************************/ +PVAL BCUTIL::MakeBson(PGLOBAL g, PBVAL jsp) +{ + if (Cp->Value->IsTypeNum()) { + strcpy(g->Message, "Cannot make Json for a numeric column"); + Cp->Value->Reset(); +#if 0 + } else if (Value->GetType() == TYPE_BIN) { + if ((unsigned)Value->GetClen() >= sizeof(BSON)) { + ulong len = Tjp->Lrecl ? Tjp->Lrecl : 500; + PBSON bsp = JbinAlloc(g, NULL, len, jsp); + + strcat(bsp->Msg, " column"); + ((BINVAL*)Value)->SetBinValue(bsp, sizeof(BSON)); + } else { + strcpy(g->Message, "Column size too small"); + Value->SetValue_char(NULL, 0); + } // endif Clen +#endif // 0 + } else + Cp->Value->SetValue_psz(Serialize(g, jsp, NULL, 0)); + + return Cp->Value; +} // end of MakeJson + + /***********************************************************************/ +/* GetColumnValue: */ +/***********************************************************************/ +PVAL BCUTIL::GetColumnValue(PGLOBAL g, PBVAL row, int i) +{ + int nod = Cp->Nod, n = nod - 1; + JNODE *nodes = Cp->Nodes; + PVAL value = Cp->Value; + PBVAL arp; + PBVAL bvp = NULL; + + for (; i < nod && row; i++) { + if (nodes[i].Op == OP_NUM) { + value->SetValue(row->Type == TYPE_JAR ? GetSize(row) : 1); + return(value); + } else if (nodes[i].Op == OP_XX) { + return MakeBson(g, row); + } else switch (row->Type) { + case TYPE_JOB: + if (!nodes[i].Key) { + // Expected Array was not there, wrap the value + if (i < nod - 1) + continue; + else + bvp = row; + + } else + bvp = GetKeyValue(MPP(row->To_Val), nodes[i].Key); + + break; + case TYPE_JAR: + arp = MVP(row->To_Val); + + if (!nodes[i].Key) { + if (nodes[i].Op == OP_EQ) + bvp = GetArrayValue(arp, nodes[i].Rank); + else if (nodes[i].Op == OP_EXP) + return ExpandArray(g, arp, i); + else + return CalculateArray(arp, i); + + } else { + // Unexpected array, unwrap it as [0] + bvp = GetArrayValue(arp, 0); + i--; + } // endif's + + break; + case TYPE_JVAL: + bvp = row; + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + bvp = NULL; + } // endswitch Type + + if (i < nod - 1) + row = bvp; + + } // endfor i + + SetJsonValue(g, value, bvp); + return value; +} // end of GetColumnValue + +/***********************************************************************/ +/* ExpandArray: */ +/***********************************************************************/ +PVAL BCUTIL::ExpandArray(PGLOBAL g, PBVAL arp, int n) +{ + int nod = Cp->Nod, ars = MY_MIN(Tp->Limit, GetArraySize(arp)); + JNODE *nodes = Cp->Nodes; + PVAL value = Cp->Value; + PBVAL bvp; + BVAL bval; + + if (!ars) { + value->Reset(); + value->SetNull(true); + Tp->NextSame = 0; + return value; + } // endif ars + + if (!(bvp = GetArrayValue(arp, (nodes[n].Rx = nodes[n].Nx)))) { + strcpy(g->Message, "Logical error expanding array"); + throw 666; + } // endif jvp + + if (n < nod - 1 && GetBson(bvp)) { + SetValue(&bval, GetColumnValue(g, GetBson(bvp), n + 1)); + bvp = &bval; + } // endif n + + if (n >= Tp->NextSame) { + if (++nodes[n].Nx == ars) { + nodes[n].Nx = 0; + Cp->Xnod = 0; + } else + Cp->Xnod = n; + + Tp->NextSame = Cp->Xnod; + } // endif NextSame + + SetJsonValue(g, value, bvp); + return value; +} // end of ExpandArray + + /***********************************************************************/ + /* CalculateArray: */ + /***********************************************************************/ +PVAL BCUTIL::CalculateArray(PBVAL arp, int n) +{ + throw("CalculateArray NIY"); +#if 0 + int i, ars, nv = 0, nextsame = Tjp->NextSame; + bool err; + OPVAL op = Nodes[n].Op; + PVAL val[2], vp = Nodes[n].Valp; + PJVAL jvrp, jvp; + JVALUE jval; + + vp->Reset(); + ars = MY_MIN(Tjp->Limit, arp->size()); + + if (trace(1)) + htrc("CalculateArray: size=%d op=%d nextsame=%d\n", + ars, op, nextsame); + + for (i = 0; i < ars; i++) { + jvrp = arp->GetArrayValue(i); + + if (trace(1)) + htrc("i=%d nv=%d\n", i, nv); + + if (!jvrp->IsNull() || (op == OP_CNC && GetJsonNull())) do { + if (jvrp->IsNull()) { + jvrp->Strp = PlugDup(g, GetJsonNull()); + jvrp->DataType = TYPE_STRG; + jvp = jvrp; + } else if (n < Nod - 1 && jvrp->GetJson()) { + Tjp->NextSame = nextsame; + jval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1)); + jvp = &jval; + } else + jvp = jvrp; + + if (trace(1)) + htrc("jvp=%s null=%d\n", + jvp->GetString(g), jvp->IsNull() ? 1 : 0); + + if (!nv++) { + SetJsonValue(g, vp, jvp); + continue; + } else + SetJsonValue(g, MulVal, jvp); + + if (!MulVal->IsNull()) { + switch (op) { + case OP_CNC: + if (Nodes[n].CncVal) { + val[0] = Nodes[n].CncVal; + err = vp->Compute(g, val, 1, op); + } // endif CncVal + + val[0] = MulVal; + err = vp->Compute(g, val, 1, op); + break; + // case OP_NUM: + case OP_SEP: + val[0] = Nodes[n].Valp; + val[1] = MulVal; + err = vp->Compute(g, val, 2, OP_ADD); + break; + default: + val[0] = Nodes[n].Valp; + val[1] = MulVal; + err = vp->Compute(g, val, 2, op); + } // endswitch Op + + if (err) + vp->Reset(); + + if (trace(1)) { + char buf(32); + + htrc("vp='%s' err=%d\n", + vp->GetCharString(&buf), err ? 1 : 0); + + } // endif trace + + } // endif Null + + } while (Tjp->NextSame > nextsame); + + } // endfor i + + if (op == OP_SEP) { + // Calculate average + MulVal->SetValue(nv); + val[0] = vp; + val[1] = MulVal; + + if (vp->Compute(g, val, 2, OP_DIV)) + vp->Reset(); + + } // endif Op + + Tjp->NextSame = nextsame; + return vp; +#endif // 0 +} // end of CalculateArray + +/***********************************************************************/ +/* GetRow: Get the object containing this column. */ +/***********************************************************************/ +PBVAL BCUTIL::GetRow(PGLOBAL g) +{ + int nod = Cp->Nod; + JNODE *nodes = Cp->Nodes; + PBVAL val = NULL; + PBVAL arp; + PBVAL nwr, row = Tp->Row; + + for (int i = 0; i < nod && row; i++) { + if (nodes[i + 1].Op == OP_XX) + break; + else switch (row->Type) { + case TYPE_JOB: + if (!nodes[i].Key) + // Expected Array was not there, wrap the value + continue; + + val = GetKeyValue(MPP(row->To_Val), nodes[i].Key); + break; + case TYPE_JAR: + arp = row; + + if (!nodes[i].Key) { + if (nodes[i].Op == OP_EQ) + val = GetArrayValue(arp, nodes[i].Rank); + else + val = GetArrayValue(arp, nodes[i].Rx); + + } else { + // Unexpected array, unwrap it as [0] + val = GetArrayValue(arp, 0); + i--; + } // endif Nodes + + break; + case TYPE_JVAL: + val = row; + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + val = NULL; + } // endswitch Type + + if (val) { + row = GetBson(val); + } else { + // Construct missing objects + for (i++; row && i < nod; i++) { + if (nodes[i].Op == OP_XX) + break; + else if (!nodes[i].Key) + // Construct intermediate array + nwr = NewVal(TYPE_JAR); + else + nwr = NewVal(TYPE_JOB); + + if (row->Type == TYPE_JOB) { + SetKeyValue(MPP(row->To_Val), MOF(nwr), nodes[i - 1].Key); + } else if (row->Type == TYPE_JAR) { + AddArrayValue(MVP(row->To_Val), nwr); + } else { + strcpy(g->Message, "Wrong type when writing new row"); + nwr = NULL; + } // endif's + + row = nwr; + } // endfor i + + break; + } // endelse + + } // endfor i + + return row; +} // end of GetRow + + +/* -------------------------- Class BSONDEF -------------------------- */ + +BSONDEF::BSONDEF(void) +{ + Jmode = MODE_OBJECT; + Objname = NULL; + Xcol = NULL; + Pretty = 2; + Limit = 1; + Base = 0; + Strict = false; + Sep = '.'; + Uri = NULL; + Collname = Options = Filter = NULL; + Pipe = false; + Driver = NULL; + Version = 0; + Wrapname = NULL; +} // end of BSONDEF constructor + +/***********************************************************************/ +/* DefineAM: define specific AM block values. */ +/***********************************************************************/ +bool BSONDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) +{ + G = g; + Schema = GetStringCatInfo(g, "DBname", Schema); + Jmode = (JMODE)GetIntCatInfo("Jmode", MODE_OBJECT); + Objname = GetStringCatInfo(g, "Object", NULL); + Xcol = GetStringCatInfo(g, "Expand", NULL); + Pretty = GetIntCatInfo("Pretty", 2); + Limit = GetIntCatInfo("Limit", 10); + Base = GetIntCatInfo("Base", 0) ? 1 : 0; + Sep = *GetStringCatInfo(g, "Separator", "."); + Accept = GetBoolCatInfo("Accept", false); + + // Don't use url as MONGO uri when called from REST + if (stricmp(am, "REST") && (Uri = GetStringCatInfo(g, "Connect", NULL))) { +#if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) + Collname = GetStringCatInfo(g, "Name", + (Catfunc & (FNC_TABLE | FNC_COL)) ? NULL : Name); + Collname = GetStringCatInfo(g, "Tabname", Collname); + Options = GetStringCatInfo(g, "Colist", NULL); + Filter = GetStringCatInfo(g, "Filter", NULL); + Pipe = GetBoolCatInfo("Pipeline", false); + Driver = GetStringCatInfo(g, "Driver", NULL); + Version = GetIntCatInfo("Version", 3); + Pretty = 0; +#if defined(JAVA_SUPPORT) + if (Version == 2) + Wrapname = GetStringCatInfo(g, "Wrapper", "Mongo2Interface"); + else + Wrapname = GetStringCatInfo(g, "Wrapper", "Mongo3Interface"); +#endif // JAVA_SUPPORT +#else // !MONGO_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); + return true; +#endif // !MONGO_SUPPORT + } // endif Uri + + return DOSDEF::DefineAM(g, (Uri ? "XMGO" : "DOS"), poff); +} // end of DefineAM + +/***********************************************************************/ +/* GetTable: makes a new Table Description Block. */ +/***********************************************************************/ +PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) { + if (trace(1)) + htrc("BSON GetTable Pretty=%d Uri=%s\n", Pretty, SVP(Uri)); + + if (Catfunc == FNC_COL) + return new(g)TDBBCL(this); + + PTDBASE tdbp; + PTXF txfp = NULL; + + // JSN not used for pretty=1 for insert or delete + if (Pretty <= 0 || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) { + PGLOBAL G; + USETEMP tmp = UseTemp(); + bool map = Mapped && Pretty >= 0 && m != MODE_INSERT && + !(tmp != TMP_NO && m == MODE_UPDATE) && + !(tmp == TMP_FORCE && + (m == MODE_UPDATE || m == MODE_DELETE)); + + if (Lrecl) { + // Allocate the parse work memory + G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL)); + memset(G, 0, sizeof(GLOBAL)); + G->Sarea_Size = (size_t)Lrecl * 6; + G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); + PlugSubSet(G->Sarea, G->Sarea_Size); + G->jump_level = 0; +// ((TDBBSN*)tdbp)->G = G; +// ((TDBBSN*)tdbp)->Docp = new(g) BDOC(G->Sarea); + } else { + strcpy(g->Message, "LRECL is not defined"); + return NULL; + } // endif Lrecl + +#if 0 + if (Uri) { + if (Driver && toupper(*Driver) == 'C') { +#if defined(CMGO_SUPPORT) + txfp = new(g) CMGFAM(this); +#else + sprintf(g->Message, "Mongo %s Driver not available", "C"); + return NULL; +#endif + } else if (Driver && toupper(*Driver) == 'J') { +#if defined(JAVA_SUPPORT) + txfp = new(g) JMGFAM(this); +#else + sprintf(g->Message, "Mongo %s Driver not available", "Java"); + return NULL; +#endif + } else { // Driver not specified +#if defined(CMGO_SUPPORT) + txfp = new(g) CMGFAM(this); +#elif defined(JAVA_SUPPORT) + txfp = new(g) JMGFAM(this); +#else // !MONGO_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); + return NULL; +#endif // !MONGO_SUPPORT + } // endif Driver + + } else if (Zipped) { +#endif // 0 + if (Zipped) { +#if defined(ZIP_SUPPORT) + if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) { + txfp = new(g) UNZFAM(this); + } else if (m == MODE_INSERT) { + txfp = new(g) ZIPFAM(this); + } else { + strcpy(g->Message, "UPDATE/DELETE not supported for ZIP"); + return NULL; + } // endif's m +#else // !ZIP_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return NULL; +#endif // !ZIP_SUPPORT + } else if (Compressed) { +#if defined(GZ_SUPPORT) + if (Compressed == 1) + txfp = new(g) GZFAM(this); + else + txfp = new(g) ZLBFAM(this); +#else // !GZ_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "GZ"); + return NULL; +#endif // !GZ_SUPPORT + } else if (map) + txfp = new(g) MAPFAM(this); + else if (Pretty < 0) // BJsonfile + txfp = new(g) BINFAM(this); + else + txfp = new(g) DOSFAM(this); + + // Txfp must be set for TDBBSN + tdbp = new(g) TDBBSN(G, this, txfp); + } else { + if (Zipped) { +#if defined(ZIP_SUPPORT) + if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) { + txfp = new(g) UNZFAM(this); + } else if (m == MODE_INSERT) { + strcpy(g->Message, "INSERT supported only for zipped JSON when pretty=0"); + return NULL; + } else { + strcpy(g->Message, "UPDATE/DELETE not supported for ZIP"); + return NULL; + } // endif's m +#else // !ZIP_SUPPORT + sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); + return NULL; +#endif // !ZIP_SUPPORT + } else + txfp = new(g) MAPFAM(this); + + tdbp = new(g) TDBBSON(g, this, txfp); + } // endif Pretty + + if (Multiple) + tdbp = new(g) TDBMUL(tdbp); + + return tdbp; +} // end of GetTable + +/* --------------------------- Class TDBBSN -------------------------- */ + +/***********************************************************************/ +/* Implementation of the TDBBSN class (Pretty < 2) */ +/***********************************************************************/ +TDBBSN::TDBBSN(PGLOBAL g, PBDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) +{ + Bp = new(g) BTUTIL(tdp->G, this); + Top = NULL; + Row = NULL; + Val = NULL; + Colp = NULL; + + if (tdp) { + Jmode = tdp->Jmode; + Objname = tdp->Objname; + Xcol = tdp->Xcol; + Limit = tdp->Limit; + Pretty = tdp->Pretty; + B = tdp->Base ? 1 : 0; + Sep = tdp->Sep; + Strict = tdp->Strict; + } else { + Jmode = MODE_OBJECT; + Objname = NULL; + Xcol = NULL; + Limit = 1; + Pretty = 0; + B = 0; + Sep = '.'; + Strict = false; + } // endif tdp + + Fpos = -1; + N = M = 0; + NextSame = 0; + SameRow = 0; + Xval = -1; + Comma = false; +} // end of TDBBSN standard constructor + +TDBBSN::TDBBSN(TDBBSN* tdbp) : TDBDOS(NULL, tdbp) +{ + Bp = tdbp->Bp; + Top = tdbp->Top; + Row = tdbp->Row; + Val = tdbp->Val; + Colp = tdbp->Colp; + Jmode = tdbp->Jmode; + Objname = tdbp->Objname; + Xcol = tdbp->Xcol; + Fpos = tdbp->Fpos; + N = tdbp->N; + M = tdbp->M; + Limit = tdbp->Limit; + NextSame = tdbp->NextSame; + SameRow = tdbp->SameRow; + Xval = tdbp->Xval; + B = tdbp->B; + Sep = tdbp->Sep; + Pretty = tdbp->Pretty; + Strict = tdbp->Strict; + Comma = tdbp->Comma; +} // end of TDBBSN copy constructor + +// Used for update +PTDB TDBBSN::Clone(PTABS t) +{ + PTDB tp; + PBSCOL cp1, cp2; + PGLOBAL g = t->G; + + tp = new(g) TDBBSN(this); + + for (cp1 = (PBSCOL)Columns; cp1; cp1 = (PBSCOL)cp1->GetNext()) { + cp2 = new(g) BSONCOL(cp1, tp); // Make a copy + NewPointer(t, cp1, cp2); + } // endfor cp1 + + return tp; +} // end of Clone + +/***********************************************************************/ +/* Allocate JSN column description block. */ +/***********************************************************************/ +PCOL TDBBSN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) { + PBSCOL colp = new(g) BSONCOL(g, cdp, this, cprec, n); + + return (colp->ParseJpath(g)) ? NULL : colp; +} // end of MakeCol + +/***********************************************************************/ +/* InsertSpecialColumn: Put a special column ahead of the column list.*/ +/***********************************************************************/ +PCOL TDBBSN::InsertSpecialColumn(PCOL colp) { + if (!colp->IsSpecial()) + return NULL; + + //if (Xcol && ((SPCBLK*)colp)->GetRnm()) + // colp->SetKey(0); // Rownum is no more a key + + colp->SetNext(Columns); + Columns = colp; + return colp; +} // end of InsertSpecialColumn + +/***********************************************************************/ +/* JSON Cardinality: returns table size in number of rows. */ +/***********************************************************************/ +int TDBBSN::Cardinality(PGLOBAL g) { + if (!g) + return 0; + else if (Cardinal < 0) { + Cardinal = TDBDOS::Cardinality(g); + + } // endif Cardinal + + return Cardinal; +} // end of Cardinality + +/***********************************************************************/ +/* JSON GetMaxSize: returns file size estimate in number of lines. */ +/***********************************************************************/ +int TDBBSN::GetMaxSize(PGLOBAL g) { + if (MaxSize < 0) + MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1); + + return MaxSize; +} // end of GetMaxSize + +/***********************************************************************/ +/* JSON EstimatedLength. Returns an estimated minimum line length. */ +/***********************************************************************/ +int TDBBSN::EstimatedLength(void) { + if (AvgLen <= 0) + return (Lrecl ? Lrecl : 1024) / 8; // TODO: make it better + else + return AvgLen; + +} // end of Estimated Length + +/***********************************************************************/ +/* OpenDB: Data Base open routine for JSN access method. */ +/***********************************************************************/ +bool TDBBSN::OpenDB(PGLOBAL g) { + if (Use == USE_OPEN) { + /*******************************************************************/ + /* Table already open replace it at its beginning. */ + /*******************************************************************/ + Fpos = -1; + NextSame = 0; + SameRow = 0; + } else { + /*******************************************************************/ + /* First opening. */ + /*******************************************************************/ +// Docp = new(g) BDOC(g->Sarea); + + if (Mode == MODE_INSERT) + switch (Jmode) { +// case MODE_OBJECT: Row = new(g) JOBJECT; break; +// case MODE_ARRAY: Row = new(g) JARRAY; break; +// case MODE_VALUE: Row = new(g) JVALUE; break; + default: + sprintf(g->Message, "Invalid Jmode %d", Jmode); + return true; + } // endswitch Jmode + + } // endif Use + + if (Pretty < 0) { +#if 0 + /*******************************************************************/ + /* Binary BJSON table. */ + /*******************************************************************/ + xtrc(1, "JSN OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n", + this, Tdb_No, Use, Mode); + + if (Use == USE_OPEN) { + /*******************************************************************/ + /* Table already open, just replace it at its beginning. */ + /*******************************************************************/ + if (!To_Kindex) { + Txfp->Rewind(); // see comment in Work.log + } else // Table is to be accessed through a sorted index table + To_Kindex->Reset(); + + return false; + } // endif use + + /*********************************************************************/ + /* Open according to logical input/output mode required. */ + /* Use conventionnal input/output functions. */ + /*********************************************************************/ + if (Txfp->OpenTableFile(g)) + return true; + + Use = USE_OPEN; // Do it now in case we are recursively called + + /*********************************************************************/ + /* Lrecl is Ok. */ + /*********************************************************************/ + size_t linelen = Lrecl; + + //To_Line = (char*)PlugSubAlloc(g, NULL, linelen); + //memset(To_Line, 0, linelen); + To_Line = Txfp->GetBuf(); + xtrc(1, "OpenJSN: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line); + return false; +#endif // 0 + strcpy(g->Message, "TDBBSN: Binary NIY"); + return true; + } else if (TDBDOS::OpenDB(g)) + return true; + + if (Xcol) + To_Filter = NULL; // Imcompatible + + return false; +} // end of OpenDB + +/***********************************************************************/ +/* SkipHeader: Physically skip first header line if applicable. */ +/* This is called from TDBDOS::OpenDB and must be executed before */ +/* Kindex construction if the file is accessed using an index. */ +/***********************************************************************/ +bool TDBBSN::SkipHeader(PGLOBAL g) { + int len = GetFileLength(g); + bool rc = false; + +#if defined(_DEBUG) + if (len < 0) + return true; +#endif // _DEBUG + + if (Pretty == 1) { + if (Mode == MODE_INSERT || Mode == MODE_DELETE) { + // Mode Insert and delete are no more handled here + DBUG_ASSERT(false); + } else if (len > 0) // !Insert && !Delete + rc = (Txfp->SkipRecord(g, false) == RC_FX || Txfp->RecordPos(g)); + + } // endif Pretty + + return rc; +} // end of SkipHeader + +/***********************************************************************/ +/* ReadDB: Data Base read routine for JSN access method. */ +/***********************************************************************/ +int TDBBSN::ReadDB(PGLOBAL g) { + int rc; + + N++; + + if (NextSame) { + SameRow = NextSame; + NextSame = 0; + M++; + return RC_OK; + } else if ((rc = TDBDOS::ReadDB(g)) == RC_OK) { + if (!IsRead() && ((rc = ReadBuffer(g)) != RC_OK)) + return rc; // Deferred reading failed + + if (Pretty >= 0) { + // Recover the memory used for parsing + Bp->SubSet(); + + if ((Row = Bp->ParseLine(g, &Pretty, &Comma))) { + Row = Bp->FindRow(g); + SameRow = 0; + Fpos++; + M = 1; + rc = RC_OK; + } else if (Pretty != 1 || strcmp(To_Line, "]")) { + Bp->GetMsg(g); + rc = RC_FX; + } else + rc = RC_EF; + + } else { +#if 0 + // Here we get a movable Json binary tree + PJSON jsp; + SWAP* swp; + + jsp = (PJSON)To_Line; + swp = new(g) SWAP(G, jsp); + swp->SwapJson(jsp, false); // Restore pointers from offsets + Row = jsp; + Row = FindRow(g); + SameRow = 0; + Fpos++; + M = 1; + rc = RC_OK; +#endif // 0 + strcpy(g->Message, "TDBBSN: Binary NIY"); + rc = RC_FX; + } // endif Pretty + + } // endif ReadDB + + return rc; +} // end of ReadDB + +/***********************************************************************/ +/* PrepareWriting: Prepare the line for WriteDB. */ +/***********************************************************************/ +bool TDBBSN::PrepareWriting(PGLOBAL g) { + PSZ s; + + if (!(Top = Bp->MakeTopTree(g, Row))) + return true; + + if ((s = Bp->SerialVal(g, Top, Pretty))) { + if (Comma) + strcat(s, ","); + + if ((signed)strlen(s) > Lrecl) { + strncpy(To_Line, s, Lrecl); + sprintf(g->Message, "Line truncated (lrecl=%d)", Lrecl); + return PushWarning(g, this); + } else + strcpy(To_Line, s); + + return false; + } else + return true; + +} // end of PrepareWriting + +/***********************************************************************/ +/* WriteDB: Data Base write routine for JSON access method. */ +/***********************************************************************/ +int TDBBSN::WriteDB(PGLOBAL g) { + int rc = TDBDOS::WriteDB(g); + + Bp->SubSet(); + Bp->Clear(Row); + return rc; +} // end of WriteDB + +/***********************************************************************/ +/* Data Base close routine for JSON access method. */ +/***********************************************************************/ +void TDBBSN::CloseDB(PGLOBAL g) +{ + TDBDOS::CloseDB(g); + ((PBDEF)To_Def)->G = PlugExit(((PBDEF)To_Def)->G); +} // end of CloseDB + + /* ---------------------------- BSONCOL ------------------------------ */ + +/***********************************************************************/ +/* BSONCOL public constructor. */ +/***********************************************************************/ +BSONCOL::BSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i) + : DOSCOL(g, cdp, tdbp, cprec, i, "DOS") +{ + Tbp = (TDBBSN*)(tdbp->GetOrig() ? tdbp->GetOrig() : tdbp); + Cp = new(g) BCUTIL(((PBDEF)Tbp->To_Def)->G, this, Tbp); + Jpath = cdp->GetFmt(); + MulVal = NULL; + Nodes = NULL; + Nod = 0; + Sep = Tbp->Sep; + Xnod = -1; + Xpd = false; + Parsed = false; +} // end of BSONCOL constructor + +/***********************************************************************/ +/* BSONCOL constructor used for copying columns. */ +/* tdbp is the pointer to the new table descriptor. */ +/***********************************************************************/ +BSONCOL::BSONCOL(BSONCOL* col1, PTDB tdbp) : DOSCOL(col1, tdbp) +{ + Tbp = col1->Tbp; + Cp = col1->Cp; + Jpath = col1->Jpath; + MulVal = col1->MulVal; + Nodes = col1->Nodes; + Nod = col1->Nod; + Sep = col1->Sep; + Xnod = col1->Xnod; + Xpd = col1->Xpd; + Parsed = col1->Parsed; +} // end of BSONCOL copy constructor + +/***********************************************************************/ +/* SetBuffer: prepare a column block for write operation. */ +/***********************************************************************/ +bool BSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) +{ + if (DOSCOL::SetBuffer(g, value, ok, check)) + return true; + + // Parse the json path + if (ParseJpath(g)) + return true; + + Tbp = (TDBBSN*)To_Tdb; + return false; +} // end of SetBuffer + +/***********************************************************************/ +/* Check whether this object is expanded. */ +/***********************************************************************/ +bool BSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b) { + if ((Tbp->Xcol && nm && !strcmp(nm, Tbp->Xcol) && + (Tbp->Xval < 0 || Tbp->Xval == i)) || Xpd) { + Xpd = true; // Expandable object + Nodes[i].Op = OP_EXP; + } else if (b) { + strcpy(g->Message, "Cannot expand more than one branch"); + return true; + } // endif Xcol + + return false; +} // end of CheckExpand + +/***********************************************************************/ +/* Analyse array processing options. */ +/***********************************************************************/ +bool BSONCOL::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) { + int n; + bool dg = true, b = false; + PJNODE jnp = &Nodes[i]; + + //if (*p == '[') p++; // Old syntax .[ or :[ + n = (int)strlen(p); + + if (*p) { + if (p[n - 1] == ']') { + p[--n] = 0; + } else if (!IsNum(p)) { + // Wrong array specification + sprintf(g->Message, "Invalid array specification %s for %s", p, Name); + return true; + } // endif p + + } else + b = true; + + // To check whether a numeric Rank was specified + dg = IsNum(p); + + if (!n) { + // Default specifications + if (CheckExpand(g, i, nm, false)) + return true; + else if (jnp->Op != OP_EXP) { + if (b) { + // Return 1st value (B is the index base) + jnp->Rank = Tbp->B; + jnp->Op = OP_EQ; + } else if (!Value->IsTypeNum()) { + jnp->CncVal = AllocateValue(g, (void*)", ", TYPE_STRING); + jnp->Op = OP_CNC; + } else + jnp->Op = OP_ADD; + + } // endif OP + + } else if (dg) { + // Return nth value + jnp->Rank = atoi(p) - Tbp->B; + jnp->Op = OP_EQ; + } else if (n == 1) { + // Set the Op value; + if (Sep == ':') + switch (*p) { + case '*': *p = 'x'; break; + case 'x': + case 'X': *p = '*'; break; // Expand this array + default: break; + } // endswitch p + + switch (*p) { + case '+': jnp->Op = OP_ADD; break; + case 'x': jnp->Op = OP_MULT; break; + case '>': jnp->Op = OP_MAX; break; + case '<': jnp->Op = OP_MIN; break; + case '!': jnp->Op = OP_SEP; break; // Average + case '#': jnp->Op = OP_NUM; break; + case '*': // Expand this array + if (!Tbp->Xcol && nm) { + Xpd = true; + jnp->Op = OP_EXP; + Tbp->Xval = i; + Tbp->Xcol = nm; + } else if (CheckExpand(g, i, nm, true)) + return true; + + break; + default: + sprintf(g->Message, + "Invalid function specification %c for %s", *p, Name); + return true; + } // endswitch *p + + } else if (*p == '"' && p[n - 1] == '"') { + // This is a concat specification + jnp->Op = OP_CNC; + + if (n > 2) { + // Set concat intermediate string + p[n - 1] = 0; + jnp->CncVal = AllocateValue(g, p + 1, TYPE_STRING); + } // endif n + + } else { + sprintf(g->Message, "Wrong array specification for %s", Name); + return true; + } // endif's + + // For calculated arrays, a local Value must be used + switch (jnp->Op) { + case OP_NUM: + jnp->Valp = AllocateValue(g, TYPE_INT); + break; + case OP_ADD: + case OP_MULT: + case OP_SEP: + if (!IsTypeChar(Buf_Type)) + jnp->Valp = AllocateValue(g, Buf_Type, 0, GetPrecision()); + else + jnp->Valp = AllocateValue(g, TYPE_DOUBLE, 0, 2); + + break; + case OP_MIN: + case OP_MAX: + jnp->Valp = AllocateValue(g, Buf_Type, Long, GetPrecision()); + break; + case OP_CNC: + if (IsTypeChar(Buf_Type)) + jnp->Valp = AllocateValue(g, TYPE_STRING, Long, GetPrecision()); + else + jnp->Valp = AllocateValue(g, TYPE_STRING, 512); + + break; + default: + break; + } // endswitch Op + + if (jnp->Valp) + MulVal = AllocateValue(g, jnp->Valp); + + return false; +} // end of SetArrayOptions + +/***********************************************************************/ +/* Parse the eventual passed Jpath information. */ +/* This information can be specified in the Fieldfmt column option */ +/* when creating the table. It permits to indicate the position of */ +/* the node corresponding to that column. */ +/***********************************************************************/ +bool BSONCOL::ParseJpath(PGLOBAL g) { + char* p, * p1 = NULL, * p2 = NULL, * pbuf = NULL; + int i; + bool a; + + if (Parsed) + return false; // Already done + else if (InitValue(g)) + return true; + else if (!Jpath) + Jpath = Name; + + if (To_Tdb->GetOrig()) { + // This is an updated column, get nodes from origin + for (PBSCOL colp = (PBSCOL)Tbp->GetColumns(); colp; + colp = (PBSCOL)colp->GetNext()) + if (!stricmp(Name, colp->GetName())) { + Nod = colp->Nod; + Nodes = colp->Nodes; + Xpd = colp->Xpd; + goto fin; + } // endif Name + + sprintf(g->Message, "Cannot parse updated column %s", Name); + return true; + } // endif To_Orig + + pbuf = PlugDup(g, Jpath); + if (*pbuf == '$') pbuf++; + if (*pbuf == Sep) pbuf++; + if (*pbuf == '[') p1 = pbuf++; + + // Estimate the required number of nodes + for (i = 0, p = pbuf; (p = NextChr(p, Sep)); i++, p++) + Nod++; // One path node found + + Nodes = (PJNODE)PlugSubAlloc(g, NULL, (++Nod) * sizeof(JNODE)); + memset(Nodes, 0, (Nod) * sizeof(JNODE)); + + // Analyze the Jpath for this column + for (i = 0, p = pbuf; p && i < Nod; i++, p = (p2 ? p2 : NULL)) { + a = (p1 != NULL); + p1 = strchr(p, '['); + p2 = strchr(p, Sep); + + if (!p2) + p2 = p1; + else if (p1) { + if (p1 < p2) + p2 = p1; + else if (p1 == p2 + 1) + *p2++ = 0; // Old syntax .[ or :[ + else + p1 = NULL; + + } // endif p1 + + if (p2) + *p2++ = 0; + + // Jpath must be explicit + if (a || *p == 0 || *p == '[' || IsNum(p)) { + // Analyse intermediate array processing + if (SetArrayOptions(g, p, i, Nodes[i - 1].Key)) + return true; + + } else if (*p == '*') { + // Return JSON + Nodes[i].Op = OP_XX; + } else { + Nodes[i].Key = p; + Nodes[i].Op = OP_EXIST; + } // endif's + + } // endfor i, p + + Nod = i; + +fin: + MulVal = AllocateValue(g, Value); + Parsed = true; + return false; +} // end of ParseJpath + +/***********************************************************************/ +/* Get Jpath converted to Mongo path. */ +/***********************************************************************/ +PSZ BSONCOL::GetJpath(PGLOBAL g, bool proj) { + if (Jpath) { + char* p1, * p2, * mgopath; + int i = 0; + + if (strcmp(Jpath, "*")) { + p1 = Jpath; + if (*p1 == '$') p1++; + if (*p1 == '.') p1++; + mgopath = PlugDup(g, p1); + } else + return NULL; + + for (p1 = p2 = mgopath; *p1; p1++) + if (i) { // Inside [] + if (isdigit(*p1)) { + if (!proj) + *p2++ = *p1; + + } else if (*p1 == ']' && i == 1) { + if (proj && p1[1] == '.') + p1++; + + i = 0; + } else if (*p1 == '.' && i == 2) { + if (!proj) + *p2++ = '.'; + + i = 0; + } else if (!proj) + return NULL; + + } else switch (*p1) { + case ':': + case '.': + if (isdigit(p1[1])) + i = 2; + + *p2++ = '.'; + break; + case '[': + if (*(p2 - 1) != '.') + *p2++ = '.'; + + i = 1; + break; + case '*': + if (*(p2 - 1) == '.' && !*(p1 + 1)) { + p2--; // Suppress last :* + break; + } // endif p2 + + default: + *p2++ = *p1; + break; + } // endswitch p1; + + *p2 = 0; + return mgopath; + } else + return NULL; + +} // end of GetJpath + +/***********************************************************************/ +/* MakeJson: Serialize the json item and set value to it. */ +/***********************************************************************/ +PVAL BSONCOL::MakeBson(PGLOBAL g, PBVAL jsp) { + if (Value->IsTypeNum()) { + strcpy(g->Message, "Cannot make Json for a numeric column"); + Value->Reset(); +#if 0 + } else if (Value->GetType() == TYPE_BIN) { + if ((unsigned)Value->GetClen() >= sizeof(BSON)) { + ulong len = Tjp->Lrecl ? Tjp->Lrecl : 500; + PBSON bsp = JbinAlloc(g, NULL, len, jsp); + + strcat(bsp->Msg, " column"); + ((BINVAL*)Value)->SetBinValue(bsp, sizeof(BSON)); + } else { + strcpy(g->Message, "Column size too small"); + Value->SetValue_char(NULL, 0); + } // endif Clen +#endif 0 + } else + Value->SetValue_psz(Cp->SerialVal(g, jsp, 0)); + + return Value; +} // end of MakeJson + +/***********************************************************************/ +/* ReadColumn: */ +/***********************************************************************/ +void BSONCOL::ReadColumn(PGLOBAL g) { + if (!Tbp->SameRow || Xnod >= Tbp->SameRow) + Value->SetValue_pval(Cp->GetColumnValue(g, Tbp->Row, 0)); + + if (Xpd && Value->IsNull() && !((PBDEF)Tbp->To_Def)->Accept) + throw("Null expandable JSON value"); + + // Set null when applicable + if (!Nullable) + Value->SetNull(false); + +} // end of ReadColumn + +/***********************************************************************/ +/* WriteColumn: */ +/***********************************************************************/ +void BSONCOL::WriteColumn(PGLOBAL g) { + if (Xpd && Tbp->Pretty < 2) { + strcpy(g->Message, "Cannot write expanded column when Pretty is not 2"); + throw 666; + } // endif Xpd + + /*********************************************************************/ + /* Check whether this node must be written. */ + /*********************************************************************/ + if (Value != To_Val) + Value->SetValue_pval(To_Val, FALSE); // Convert the updated value + + /*********************************************************************/ + /* On INSERT Null values are represented by no node. */ + /*********************************************************************/ + if (Value->IsNull() && Tbp->Mode == MODE_INSERT) + return; + + throw "Write BSON NIY"; + +#if 0 + char* s; + PBPR objp = NULL; + PBVAL arp = NULL; + PBVAL jvp = NULL; + PBVAL jsp, row = Cp->GetRow(); + + switch (row->Type) { + case TYPE_JOB: objp = (PJOB)row; break; + case TYPE_JAR: arp = (PJAR)row; break; + case TYPE_JVAL: jvp = (PJVAL)row; break; + default: row = NULL; // ??????????????????????????? + } // endswitch Type + + if (row) switch (Buf_Type) { + case TYPE_STRING: + if (Nodes[Nod - 1].Op == OP_XX) { + s = Value->GetCharValue(); + + if (!(jsp = ParseJson(G, s, strlen(s)))) { + strcpy(g->Message, s); + throw 666; + } // endif jsp + + if (arp) { + if (Nod > 1 && Nodes[Nod - 2].Op == OP_EQ) + arp->SetArrayValue(G, new(G) JVALUE(jsp), Nodes[Nod - 2].Rank); + else + arp->AddArrayValue(G, new(G) JVALUE(jsp)); + + arp->InitArray(G); + } else if (objp) { + if (Nod > 1 && Nodes[Nod - 2].Key) + objp->SetKeyValue(G, new(G) JVALUE(jsp), Nodes[Nod - 2].Key); + + } else if (jvp) + jvp->SetValue(jsp); + + break; + } // endif Op + + // fall through + case TYPE_DATE: + case TYPE_INT: + case TYPE_TINY: + case TYPE_SHORT: + case TYPE_BIGINT: + case TYPE_DOUBLE: + if (arp) { + if (Nodes[Nod - 1].Op == OP_EQ) + arp->SetArrayValue(G, new(G) JVALUE(G, Value), Nodes[Nod - 1].Rank); + else + arp->AddArrayValue(G, new(G) JVALUE(G, Value)); + + arp->InitArray(G); + } else if (objp) { + if (Nodes[Nod - 1].Key) + objp->SetKeyValue(G, new(G) JVALUE(G, Value), Nodes[Nod - 1].Key); + + } else if (jvp) + jvp->SetValue(g, Value); + + break; + default: // ?????????? + sprintf(g->Message, "Invalid column type %d", Buf_Type); + } // endswitch Type +#endif // 0 + +} // end of WriteColumn + +/* -------------------------- Class TDBBSON -------------------------- */ + +/***********************************************************************/ +/* Implementation of the TDBBSON class. */ +/***********************************************************************/ +TDBBSON::TDBBSON(PGLOBAL g, PBDEF tdp, PTXF txfp) : TDBBSN(g, tdp, txfp) +{ + Docp = NULL; + Multiple = tdp->Multiple; + Done = Changed = false; +} // end of TDBBSON standard constructor + +TDBBSON::TDBBSON(PBTDB tdbp) : TDBBSN(tdbp) +{ + Docp = tdbp->Docp; + Multiple = tdbp->Multiple; + Done = tdbp->Done; + Changed = tdbp->Changed; +} // end of TDBBSON copy constructor + +// Used for update +PTDB TDBBSON::Clone(PTABS t) +{ + PTDB tp; + PBSCOL cp1, cp2; + PGLOBAL g = t->G; + + tp = new(g) TDBBSON(this); + + for (cp1 = (PBSCOL)Columns; cp1; cp1 = (PBSCOL)cp1->GetNext()) { + cp2 = new(g) BSONCOL(cp1, tp); // Make a copy + NewPointer(t, cp1, cp2); + } // endfor cp1 + + return tp; +} // end of Clone + +/***********************************************************************/ +/* Make the document tree from the object path. */ +/***********************************************************************/ +int TDBBSON::MakeNewDoc(PGLOBAL g) +{ + // Create a void table that will be populated + Docp = Bp->NewVal(TYPE_JAR); + + if (Bp->MakeTopTree(g, Docp)) + return RC_FX; + + Done = true; + return RC_OK; +} // end of MakeNewDoc + +/***********************************************************************/ +/* Make the document tree from a file. */ +/***********************************************************************/ +int TDBBSON::MakeDocument(PGLOBAL g) +{ + char *p, *p1, *p2, *memory, *objpath, *key = NULL; + int i = 0; + size_t len; + my_bool a; + MODE mode = Mode; + PBVAL jsp; + PBPR objp = NULL; + PBVAL arp = NULL; + PBVAL val = NULL; + + if (Done) + return RC_OK; + + /*********************************************************************/ + /* Create the mapping file object in mode read. */ + /*********************************************************************/ + Mode = MODE_READ; + + if (!Txfp->OpenTableFile(g)) { + PFBLOCK fp = Txfp->GetTo_Fb(); + + if (fp) { + len = fp->Length; + memory = fp->Memory; + } else { + Mode = mode; // Restore saved Mode + return MakeNewDoc(g); + } // endif fp + + } else + return RC_FX; + + /*********************************************************************/ + /* Parse the json file and allocate its tree structure. */ + /*********************************************************************/ + g->Message[0] = 0; + jsp = Top = Bp->ParseJson(g, memory, len, &Pretty); + Txfp->CloseTableFile(g, false); + Mode = mode; // Restore saved Mode + + if (!jsp && g->Message[0]) + return RC_FX; + + if ((objpath = PlugDup(g, Objname))) { + if (*objpath == '$') objpath++; + if (*objpath == '.') objpath++; + p1 = (*objpath == '[') ? objpath++ : NULL; + + /*********************************************************************/ + /* Find the table in the tree structure. */ + /*********************************************************************/ + for (p = objpath; jsp && p; p = (p2 ? p2 : NULL)) { + a = (p1 != NULL); + p1 = strchr(p, '['); + p2 = strchr(p, '.'); + + if (!p2) + p2 = p1; + else if (p1) { + if (p1 < p2) + p2 = p1; + else if (p1 == p2 + 1) + *p2++ = 0; // Old syntax .[ + else + p1 = NULL; + + } // endif p1 + + if (p2) + *p2++ = 0; + + if (!a && *p && *p != '[' && !IsNum(p)) { + // obj is a key + if (jsp->Type != TYPE_JOB) { + strcpy(g->Message, "Table path does not match the json file"); + return RC_FX; + } // endif Type + + key = p; + objp = Bp->GetObject(jsp); + arp = NULL; + val = Bp->GetKeyValue(objp, key); + + if (!val || !(jsp = Bp->GetBson(val))) { + sprintf(g->Message, "Cannot find object key %s", key); + return RC_FX; + } // endif val + + } else { + if (*p == '[') { + // Old style + if (p[strlen(p) - 1] != ']') { + sprintf(g->Message, "Invalid Table path near %s", p); + return RC_FX; + } else + p++; + + } // endif p + + if (jsp->Type != TYPE_JAR) { + strcpy(g->Message, "Table path does not match the json file"); + return RC_FX; + } // endif Type + + arp = Bp->GetArray(jsp); + objp = NULL; + i = atoi(p) - B; + val = Bp->GetArrayValue(arp, i); + + if (!val) { + sprintf(g->Message, "Cannot find array value %d", i); + return RC_FX; + } // endif val + + } // endif + + jsp = val; + } // endfor p + + } // endif objpath + + if (jsp && jsp->Type == TYPE_JAR) + Docp = jsp; + else { + // The table is void or is just one object or one value + Docp = Bp->NewVal(TYPE_JAR); + + if (val) + Bp->AddArrayValue(Docp, val); + else if (jsp) + Bp->AddArrayValue(Docp, Bp->DupVal(jsp)); + + if (objp) + Bp->SetKeyValue(objp, Bp->DupVal(Docp), key); + else if (arp) + Bp->SetArrayValue(arp, Bp->DupVal(Docp), i); + else + Top = Docp; + + } // endif jsp + + Done = true; + return RC_OK; +} // end of MakeDocument + +/***********************************************************************/ +/* JSON Cardinality: returns table size in number of rows. */ +/***********************************************************************/ +int TDBBSON::Cardinality(PGLOBAL g) +{ + if (!g) + return (Xcol || Multiple) ? 0 : 1; + else if (Cardinal < 0) { + if (!Multiple) { + if (MakeDocument(g) == RC_OK) + Cardinal = Bp->GetSize(Docp); + + } else + return 10; + + } // endif Cardinal + + return Cardinal; +} // end of Cardinality + +/***********************************************************************/ +/* JSON GetMaxSize: returns table size estimate in number of rows. */ +/***********************************************************************/ +int TDBBSON::GetMaxSize(PGLOBAL g) +{ + if (MaxSize < 0) + MaxSize = Cardinality(g) * ((Xcol) ? Limit : 1); + + return MaxSize; +} // end of GetMaxSize + +/***********************************************************************/ +/* ResetSize: call by TDBMUL when calculating size estimate. */ +/***********************************************************************/ +void TDBBSON::ResetSize(void) +{ + MaxSize = Cardinal = -1; + Fpos = -1; + N = 0; + Done = false; +} // end of ResetSize + +/***********************************************************************/ +/* TDBBSON is not indexable. */ +/***********************************************************************/ +int TDBBSON::MakeIndex(PGLOBAL g, PIXDEF pxdf, bool) +{ + if (pxdf) { + strcpy(g->Message, "JSON not indexable when pretty = 2"); + return RC_FX; + } else + return RC_OK; + +} // end of MakeIndex + +/***********************************************************************/ +/* Return the position in the table. */ +/***********************************************************************/ +int TDBBSON::GetRecpos(void) +{ +#if 0 + union { + uint Rpos; + BYTE Spos[4]; + }; + + Rpos = htonl(Fpos); + Spos[0] = (BYTE)NextSame; + return Rpos; +#endif // 0 + return Fpos; +} // end of GetRecpos + +/***********************************************************************/ +/* Set the position in the table. */ +/***********************************************************************/ +bool TDBBSON::SetRecpos(PGLOBAL, int recpos) +{ +#if 0 + union { + uint Rpos; + BYTE Spos[4]; + }; + + Rpos = recpos; + NextSame = Spos[0]; + Spos[0] = 0; + Fpos = (signed)ntohl(Rpos); + + //if (Fpos != (signed)ntohl(Rpos)) { + // Fpos = ntohl(Rpos); + // same = false; + //} else + // same = true; +#endif // 0 + + Fpos = recpos - 1; + return false; +} // end of SetRecpos + +/***********************************************************************/ +/* JSON Access Method opening routine. */ +/***********************************************************************/ +bool TDBBSON::OpenDB(PGLOBAL g) +{ + if (Use == USE_OPEN) { + /*******************************************************************/ + /* Table already open replace it at its beginning. */ + /*******************************************************************/ + Fpos = -1; + NextSame = false; + SameRow = 0; + return false; + } // endif use + +/*********************************************************************/ +/* OpenDB: initialize the JSON file processing. */ +/*********************************************************************/ + if (MakeDocument(g) != RC_OK) + return true; + + if (Mode == MODE_INSERT) + switch (Jmode) { + case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break; + case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break; + case MODE_VALUE: Row = Bp->NewVal(); break; + default: + sprintf(g->Message, "Invalid Jmode %d", Jmode); + return true; + } // endswitch Jmode + + if (Xcol) + To_Filter = NULL; // Imcompatible + + Use = USE_OPEN; + return false; +} // end of OpenDB + +/***********************************************************************/ +/* ReadDB: Data Base read routine for JSON access method. */ +/***********************************************************************/ +int TDBBSON::ReadDB(PGLOBAL) +{ + int rc; + + N++; + + if (NextSame) { + SameRow = NextSame; + NextSame = false; + M++; + rc = RC_OK; + } else if (++Fpos < (signed)Bp->GetSize(Docp)) { + Row = Bp->GetArrayValue(Bp->GetBson(Docp), Fpos); + + if (Row->Type == TYPE_JVAL) + Row = Bp->GetBson(Row); + + SameRow = 0; + M = 1; + rc = RC_OK; + } else + rc = RC_EF; + + return rc; +} // end of ReadDB + +/***********************************************************************/ +/* WriteDB: Data Base write routine for JSON access method. */ +/***********************************************************************/ +int TDBBSON::WriteDB(PGLOBAL g) +{ + if (Jmode == MODE_OBJECT) { + PBVAL vp = Bp->DupVal(Row); + + if (Mode == MODE_INSERT) { + Bp->AddArrayValue(Docp, vp); + Row = Bp->NewVal(TYPE_JOB); + } else if (Bp->SetArrayValue(Docp, vp, Fpos)) + return RC_FX; + + } else if (Jmode == MODE_ARRAY) { + PBVAL vp = Bp->DupVal(Row); + + if (Mode == MODE_INSERT) { + Bp->AddArrayValue(Docp, vp); + Row = Bp->NewVal(TYPE_JAR); + } else if (Bp->SetArrayValue(Docp, vp, Fpos)) + return RC_FX; + + } else { // if (Jmode == MODE_VALUE) + if (Mode == MODE_INSERT) { + Bp->AddArrayValue(Docp, Row); + Row = Bp->NewVal(); + } else if (Bp->SetArrayValue(Docp, Row, Fpos)) + return RC_FX; + + } // endif Jmode + + Changed = true; + return RC_OK; +} // end of WriteDB + +/***********************************************************************/ +/* Data Base delete line routine for JSON access method. */ +/***********************************************************************/ +int TDBBSON::DeleteDB(PGLOBAL g, int irc) +{ + strcpy(g->Message, "BSON Delete NIY"); + return RC_FX; +#if 0 + if (irc == RC_OK) { + // Deleted current row + if (Doc->DeleteValue(Fpos)) { + sprintf(g->Message, "Value %d does not exist", Fpos + 1); + return RC_FX; + } // endif Delete + + Changed = true; + } else if (irc == RC_FX) + // Delete all + for (int i = 0; i < Doc->size(); i++) { + Doc->DeleteValue(i); + Changed = true; + } // endfor i + + return RC_OK; +#endif // 0 +} // end of DeleteDB + +/***********************************************************************/ +/* Data Base close routine for JSON access methods. */ +/***********************************************************************/ +void TDBBSON::CloseDB(PGLOBAL g) +{ + if (!Changed) + return; + + // Save the modified document + char filename[_MAX_PATH]; + +//Docp->InitArray(g); + + // We used the file name relative to recorded datapath + PlugSetPath(filename, ((PBDEF)To_Def)->Fn, GetPath()); + + // Serialize the modified table + if (!Bp->Serialize(g, Top, filename, Pretty)) + puts(g->Message); + +} // end of CloseDB + +/* ---------------------------TDBBCL class --------------------------- */ + +/***********************************************************************/ +/* TDBBCL class constructor. */ +/***********************************************************************/ +TDBBCL::TDBBCL(PBDEF tdp) : TDBCAT(tdp) { + Topt = tdp->GetTopt(); + Db = tdp->Schema; + Dsn = tdp->Uri; +} // end of TDBBCL constructor + +/***********************************************************************/ +/* GetResult: Get the list the JSON file columns. */ +/***********************************************************************/ +PQRYRES TDBBCL::GetResult(PGLOBAL g) { + return BSONColumns(g, Db, Dsn, Topt, false); +} // end of GetResult + +/* --------------------------- End of json --------------------------- */ diff --git a/storage/connect/tabbson.h b/storage/connect/tabbson.h new file mode 100644 index 00000000000..127370ce342 --- /dev/null +++ b/storage/connect/tabbson.h @@ -0,0 +1,342 @@ +/*************** tabbson H Declares Source Code File (.H) **************/ +/* Name: tabbson.h Version 1.0 */ +/* */ +/* (C) Copyright to the author Olivier BERTRAND 2020 */ +/* */ +/* This file contains the BSON classes declares. */ +/***********************************************************************/ +#pragma once +#include "block.h" +#include "colblk.h" +#include "bson.h" +#include "tabjson.h" + +typedef class BTUTIL* PBTUT; +typedef class BCUTIL* PBCUT; +typedef class BSONDEF* PBDEF; +typedef class TDBBSON* PBTDB; +typedef class BSONCOL* PBSCOL; +class TDBBSN; +DllExport PQRYRES BSONColumns(PGLOBAL, PCSZ, PCSZ, PTOS, bool); + +/***********************************************************************/ +/* Class used to get the columns of a mongo collection. */ +/***********************************************************************/ +class BSONDISC : public BLOCK { +public: + // Constructor + BSONDISC(PGLOBAL g, uint* lg); + + // Functions + int GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt); + bool Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j); + void AddColumn(PGLOBAL g); + + // Members + JCOL jcol; + PJCL jcp, fjcp, pjcp; + //PVL vlp; + PBDEF tdp; + TDBBSN *tjnp; + PBTDB tjsp; + PBPR jpp; + PBVAL jsp; + PBPR row; + PBTUT bp; + PCSZ sep; + char colname[65], fmt[129], buf[16]; + uint *length; + int i, n, bf, ncol, lvl, sz, limit; + bool all, strfy; +}; // end of BSONDISC + +/***********************************************************************/ +/* JSON table. */ +/***********************************************************************/ +class DllExport BSONDEF : public DOSDEF { /* Table description */ + friend class TDBBSON; + friend class TDBBSN; + friend class TDBBCL; + friend class BSONDISC; + friend class BSONCOL; +#if defined(CMGO_SUPPORT) + friend class CMGFAM; +#endif // CMGO_SUPPORT +#if defined(JAVA_SUPPORT) + friend class JMGFAM; +#endif // JAVA_SUPPORT +public: + // Constructor + BSONDEF(void); + + // Implementation + virtual const char* GetType(void) { return "BSON"; } + + // Methods + virtual bool DefineAM(PGLOBAL g, LPCSTR am, int poff); + virtual PTDB GetTable(PGLOBAL g, MODE m); + +protected: + // Members + PGLOBAL G; /* Bson utility memory */ + JMODE Jmode; /* MODE_OBJECT by default */ + PCSZ Objname; /* Name of first level object */ + PCSZ Xcol; /* Name of expandable column */ + int Limit; /* Limit of multiple values */ + int Pretty; /* Depends on file structure */ + int Base; /* The array index base */ + bool Strict; /* Strict syntax checking */ + char Sep; /* The Jpath separator */ + const char* Uri; /* MongoDB connection URI */ + PCSZ Collname; /* External collection name */ + PSZ Options; /* Colist ; Pipe */ + PSZ Filter; /* Filter */ + PSZ Driver; /* MongoDB Driver (C or JAVA) */ + bool Pipe; /* True if Colist is a pipeline */ + int Version; /* Driver version */ + PSZ Wrapname; /* MongoDB java wrapper name */ +}; // end of BSONDEF + + +/* -------------------------- BTUTIL class --------------------------- */ + +/***********************************************************************/ +/* Handles all BJSON actions for a BSON table. */ +/***********************************************************************/ +class BTUTIL : public BDOC { +public: + // Constructor + BTUTIL(PGLOBAL G, TDBBSN* tp) : BDOC(G) { Tp = tp; } + + // Utility functions + PBVAL FindRow(PGLOBAL g); + PBVAL ParseLine(PGLOBAL g, int *pretty, bool *comma); + PBVAL MakeTopTree(PGLOBAL g, PBVAL jsp); + PSZ SerialVal(PGLOBAL g, PBVAL top, int pretty); + +protected: + // Members + TDBBSN* Tp; +}; // end of class BTUTIL + +/* -------------------------- BCUTIL class --------------------------- */ + +/***********************************************************************/ +/* Handles all BJSON actions for a BSON columns. */ +/***********************************************************************/ +class BCUTIL : public BTUTIL { +public: + // Constructor + BCUTIL(PGLOBAL G, PBSCOL cp, TDBBSN* tp) : BTUTIL(G, tp) { Cp = cp; } + + // Utility functions + void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp); + PVAL MakeBson(PGLOBAL g, PBVAL jsp); + PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i); + PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n); + PVAL CalculateArray(PBVAL arp, int n); + PBVAL GetRow(PGLOBAL g); + +protected: + // Member + PBSCOL Cp; +}; // end of class BCUTIL + + /* -------------------------- TDBBSN class --------------------------- */ + +/***********************************************************************/ +/* This is the BSN Access Method class declaration. */ +/* The table is a DOS file, each record being a JSON object. */ +/***********************************************************************/ +class DllExport TDBBSN : public TDBDOS { + friend class BSONCOL; + friend class BSONDEF; + friend class BTUTIL; + friend class BCUTIL; + friend class BSONDISC; +#if defined(CMGO_SUPPORT) + friend class CMGFAM; +#endif // CMGO_SUPPORT +#if defined(JAVA_SUPPORT) + friend class JMGFAM; +#endif // JAVA_SUPPORT +public: + // Constructor + TDBBSN(PGLOBAL g, PBDEF tdp, PTXF txfp); + TDBBSN(TDBBSN* tdbp); + + // Implementation + virtual AMT GetAmType(void) { return TYPE_AM_JSN; } + virtual bool SkipHeader(PGLOBAL g); + virtual PTDB Duplicate(PGLOBAL g) { return (PTDB)new(g) TDBBSN(this); } + PBVAL GetRow(void) { return Row; } + + // Methods + virtual PTDB Clone(PTABS t); + virtual PCOL MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n); + virtual PCOL InsertSpecialColumn(PCOL colp); + virtual int RowNumber(PGLOBAL g, bool b = FALSE) {return (b) ? M : N;} + virtual bool CanBeFiltered(void) + {return Txfp->GetAmType() == TYPE_AM_MGO || !Xcol;} + + // Database routines + virtual int Cardinality(PGLOBAL g); + virtual int GetMaxSize(PGLOBAL g); + virtual bool OpenDB(PGLOBAL g); + virtual int ReadDB(PGLOBAL g); + virtual bool PrepareWriting(PGLOBAL g); + virtual int WriteDB(PGLOBAL g); + virtual void CloseDB(PGLOBAL g); + + // Specific routine + virtual int EstimatedLength(void); + +protected: + PBVAL FindRow(PGLOBAL g); +//int MakeTopTree(PGLOBAL g, PBVAL jsp); + + // Members + PBTUT Bp; // The BSUTIL handling class + PBVAL Top; // The top JSON tree + PBVAL Row; // The current row + PBVAL Val; // The value of the current row + PBSCOL Colp; // The multiple column + JMODE Jmode; // MODE_OBJECT by default + PCSZ Objname; // The table object name + PCSZ Xcol; // Name of expandable column + int Fpos; // The current row index + int N; // The current Rownum + int M; // Index of multiple value + int Limit; // Limit of multiple values + int Pretty; // Depends on file structure + int NextSame; // Same next row + int SameRow; // Same row nb + int Xval; // Index of expandable array + int B; // Array index base + char Sep; // The Jpath separator + bool Strict; // Strict syntax checking + bool Comma; // Row has final comma +}; // end of class TDBBSN + +/* -------------------------- BSONCOL class -------------------------- */ + +/***********************************************************************/ +/* Class BSONCOL: JSON access method column descriptor. */ +/***********************************************************************/ +class DllExport BSONCOL : public DOSCOL { + friend class TDBBSN; + friend class TDBBSON; + friend class BCUTIL; +#if defined(CMGO_SUPPORT) + friend class CMGFAM; +#endif // CMGO_SUPPORT +#if defined(JAVA_SUPPORT) + friend class JMGFAM; +#endif // JAVA_SUPPORT +public: + // Constructors + BSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i); + BSONCOL(BSONCOL* colp, PTDB tdbp); // Constructor used in copy process + + // Implementation + virtual int GetAmType(void) { return Tbp->GetAmType(); } + + // Methods + virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); + bool ParseJpath(PGLOBAL g); + virtual PSZ GetJpath(PGLOBAL g, bool proj); + virtual void ReadColumn(PGLOBAL g); + virtual void WriteColumn(PGLOBAL g); + +protected: + bool CheckExpand(PGLOBAL g, int i, PSZ nm, bool b); + bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm); +//PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i); +//PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n); +//PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n); + PVAL MakeBson(PGLOBAL g, PBVAL jsp); +//void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL val); +//PBVAL GetRow(PGLOBAL g); + + // Default constructor not to be used + BSONCOL(void) {} + + // Members + TDBBSN *Tbp; // To the JSN table block + PBCUT Cp; // To the BCUTIL handling class + PVAL MulVal; // To value used by multiple column + char *Jpath; // The json path + JNODE *Nodes; // The intermediate objects + int Nod; // The number of intermediate objects + int Xnod; // Index of multiple values + char Sep; // The Jpath separator + bool Xpd; // True for expandable column + bool Parsed; // True when parsed +}; // end of class BSONCOL + +/* -------------------------- TDBBSON class -------------------------- */ + +/***********************************************************************/ +/* This is the JSON Access Method class declaration. */ +/***********************************************************************/ +class DllExport TDBBSON : public TDBBSN { + friend class BSONDEF; + friend class BSONCOL; +public: + // Constructor + TDBBSON(PGLOBAL g, PBDEF tdp, PTXF txfp); + TDBBSON(PBTDB tdbp); + + // Implementation + virtual AMT GetAmType(void) { return TYPE_AM_JSON; } + virtual PTDB Duplicate(PGLOBAL g) { return (PTDB)new(g) TDBBSON(this); } + PBVAL GetDoc(void) { return Docp; } + + // Methods + virtual PTDB Clone(PTABS t); + + // Database routines + virtual int Cardinality(PGLOBAL g); + virtual int GetMaxSize(PGLOBAL g); + virtual void ResetSize(void); + virtual int GetProgCur(void) { return N; } + virtual int GetRecpos(void); + virtual bool SetRecpos(PGLOBAL g, int recpos); + virtual bool OpenDB(PGLOBAL g); + virtual int ReadDB(PGLOBAL g); + virtual bool PrepareWriting(PGLOBAL g) { return false; } + virtual int WriteDB(PGLOBAL g); + virtual int DeleteDB(PGLOBAL g, int irc); + virtual void CloseDB(PGLOBAL g); + int MakeDocument(PGLOBAL g); + + // Optimization routines + virtual int MakeIndex(PGLOBAL g, PIXDEF pxdf, bool add); + +protected: + int MakeNewDoc(PGLOBAL g); + + // Members + PBVAL Docp; // The document array + int Multiple; // 0: No 1: DIR 2: Section 3: filelist + bool Done; // True when document parsing is done + bool Changed; // After Update, Insert or Delete +}; // end of class TDBBSON + +/***********************************************************************/ +/* This is the class declaration for the JSON catalog table. */ +/***********************************************************************/ +class DllExport TDBBCL : public TDBCAT { +public: + // Constructor + TDBBCL(PBDEF tdp); + +protected: + // Specific routines + virtual PQRYRES GetResult(PGLOBAL g); + + // Members + PTOS Topt; + PCSZ Db; + PCSZ Dsn; +}; // end of class TDBBCL diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 336b0f371ca..af45cdab9f7 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -165,8 +165,9 @@ JSONDISC::JSONDISC(PGLOBAL g, uint *lg) int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) { - char filename[_MAX_PATH]; - bool mgo = (GetTypeID(topt->type) == TAB_MONGO); + char filename[_MAX_PATH]; + bool mgo = (GetTypeID(topt->type) == TAB_MONGO); + PGLOBAL G = NULL; lvl = GetIntegerTableOption(g, topt, "Level", GetDefaultDepth()); lvl = GetIntegerTableOption(g, topt, "Depth", lvl); @@ -296,12 +297,15 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) tjnp->SetMode(MODE_READ); // Allocate the parse work memory +#if 0 PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL)); memset(G, 0, sizeof(GLOBAL)); G->Sarea_Size = (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 10 : 2); G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); PlugSubSet(G->Sarea, G->Sarea_Size); G->jump_level = 0; +#endif // 0 + G = PlugInit(NULL, (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 10 : 2)); tjnp->SetG(G); if (tjnp->OpenDB(g)) @@ -738,6 +742,7 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) if (Lrecl) { // Allocate the parse work memory +#if 0 PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL)); memset(G, 0, sizeof(GLOBAL)); G->Sarea_Size = (size_t)Lrecl * 10; @@ -745,6 +750,8 @@ PTDB JSONDEF::GetTable(PGLOBAL g, MODE m) PlugSubSet(G->Sarea, G->Sarea_Size); G->jump_level = 0; ((TDBJSN*)tdbp)->G = G; +#endif // 0 + ((TDBJSN*)tdbp)->G = PlugInit(NULL, (size_t)Lrecl * (Pretty >= 0 ? 10 : 2)); } else { strcpy(g->Message, "LRECL is not defined"); return NULL; @@ -1226,7 +1233,16 @@ int TDBJSN::WriteDB(PGLOBAL g) return rc; } // end of WriteDB -/* ---------------------------- JSONCOL ------------------------------ */ +/***********************************************************************/ +/* Data Base close routine for JSON access method. */ +/***********************************************************************/ +void TDBJSN::CloseDB(PGLOBAL g) +{ + TDBDOS::CloseDB(g); + G = PlugExit(G); +} // end of CloseDB + + /* ---------------------------- JSONCOL ------------------------------ */ /***********************************************************************/ /* JSONCOL public constructor. */ @@ -1608,7 +1624,7 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp) strcpy(g->Message, "Column size too small"); Value->SetValue_char(NULL, 0); } // endif Clen -#endif 0 +#endif // 0 } else Value->SetValue_psz(Serialize(g, jsp, NULL, 0)); diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index c254c3429de..9b4f508880e 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -5,6 +5,7 @@ /* */ /* This file contains the JSON classes declares. */ /***********************************************************************/ +#pragma once //#include "osutil.h" // Unuseful and bad for OEM #include "block.h" #include "colblk.h" @@ -161,6 +162,7 @@ public: virtual int ReadDB(PGLOBAL g); virtual bool PrepareWriting(PGLOBAL g); virtual int WriteDB(PGLOBAL g); + virtual void CloseDB(PGLOBAL g); // Specific routine virtual int EstimatedLength(void); diff --git a/storage/connect/user_connect.cc b/storage/connect/user_connect.cc index 09d6db1ad27..5268651d080 100644 --- a/storage/connect/user_connect.cc +++ b/storage/connect/user_connect.cc @@ -112,8 +112,7 @@ bool user_connect::user_init() if (g) printf("%s\n", g->Message); - int rc= PlugExit(g); - g= NULL; + g= PlugExit(g); if (dup) free(dup); From 7d439334fffcd9b44e524e3b8e3ef59009ebcac0 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 1 Dec 2020 20:57:05 +0100 Subject: [PATCH 033/150] Fix failed compile modified storage/connect/ha_connect.cc and mycat.cc --- storage/connect/ha_connect.cc | 16 ++++++++-------- storage/connect/mycat.cc | 8 ++++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 4e7dd3ff394..fc67edc5330 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -170,7 +170,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.07.0002 November 30, 2020"; + char version[]= "Version 1.07.0002 December 01, 2020"; #if defined(__WIN__) char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__; char slash= '\\'; @@ -4516,7 +4516,7 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn, bool quick) case TAB_VEC: case TAB_REST: case TAB_JSON: -#if defined DEVELOPMENT +#if defined(DEVELOPMENT) case TAB_BSON: #endif // DEVELOPMENT if (options->filename && *options->filename) { @@ -5685,9 +5685,9 @@ static int connect_assisted_discovery(handlerton *, THD* thd, } else if (topt->http) { switch (ttp) { case TAB_JSON: -#ifdef DEVELOPMENT +#if defined(DEVELOPMENT) case TAB_BSON: -#endif // DEVELOPMENT +#endif // DEVELOPMENT case TAB_XML: case TAB_CSV: ttp = TAB_REST; @@ -5872,9 +5872,9 @@ static int connect_assisted_discovery(handlerton *, THD* thd, case TAB_XML: #endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT case TAB_JSON: -#ifdef DEVELOPMENT +#if defined(DEVELOPMENT) case TAB_BSON: -#endif // DEVELOPMENT +#endif // DEVELOPMENT dsn= strz(g, create_info->connect_string); if (!fn && !zfn && !mul && !dsn) @@ -6041,11 +6041,11 @@ static int connect_assisted_discovery(handlerton *, THD* thd, case TAB_JSON: qrp= JSONColumns(g, db, dsn, topt, fnc == FNC_COL); break; -#ifdef DEVELOPMENT +#if defined(DEVELOPMENT) case TAB_BSON: qrp= BSONColumns(g, db, dsn, topt, fnc == FNC_COL); break; -#endif // DEVELOPMENT +#endif // DEVELOPMENT #if defined(JAVA_SUPPORT) case TAB_MONGO: url= strz(g, create_info->connect_string); diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc index 395e1192b45..476baf63039 100644 --- a/storage/connect/mycat.cc +++ b/storage/connect/mycat.cc @@ -190,7 +190,7 @@ bool IsFileType(TABTYPE type) case TAB_JSON: #if defined(DEVELOPMENT) case TAB_BSON: -#endif +#endif // DEVELOPMENT case TAB_REST: // case TAB_ZIP: isfile= true; @@ -288,7 +288,7 @@ bool IsTypeIndexable(TABTYPE type) case TAB_JSON: #if defined(DEVELOPMENT) case TAB_BSON: -#endif +#endif // DEVELOPMENT idx= true; break; default: @@ -317,7 +317,7 @@ int GetIndexType(TABTYPE type) case TAB_JSON: #if defined(DEVELOPMENT) case TAB_BSON: -#endif +#endif // DEVELOPMENT xtyp= 1; break; case TAB_MYSQL: @@ -484,7 +484,7 @@ PTABDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am) case TAB_JSON: tdp= new(g) JSONDEF; break; #if defined(DEVELOPMENT) case TAB_BSON: tdp= new(g) BSONDEF; break; -#endif +#endif // DEVELOPMENT #if defined(ZIP_SUPPORT) case TAB_ZIP: tdp= new(g) ZIPDEF; break; #endif // ZIP_SUPPORT From 4b6d661c7f4bcf9f0f5a9f5d5f4a6743983fc9a5 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 2 Dec 2020 00:35:58 +0100 Subject: [PATCH 034/150] Fix failed compile modified storage/connect/ha_connect.cc --- storage/connect/ha_connect.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index fc67edc5330..95f885c65b4 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -170,7 +170,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.07.0002 December 01, 2020"; + char version[]= "Version 1.07.0002 December 02, 2020"; #if defined(__WIN__) char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__; char slash= '\\'; @@ -4569,6 +4569,7 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn, bool quick) case TAB_OCCUR: case TAB_PIVOT: case TAB_VIR: + default: // This is temporary until a solution is found return false; } // endswitch type From c05b1288fd86c98d9b13724d6b115b35c77ca15a Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 4 Dec 2020 23:21:59 +0100 Subject: [PATCH 035/150] Remove a push warning causing failing assert. Modified storage/connect/filamap.cpp --- storage/connect/bson.cpp | 386 +++++++----------- storage/connect/bson.h | 62 +-- storage/connect/bsonudf.cpp | 106 +++-- storage/connect/bsonudf.h | 4 +- storage/connect/filamap.cpp | 3 +- storage/connect/global.h | 27 +- .../connect/mysql-test/connect/disabled.def | 1 + storage/connect/plugutil.cpp | 25 +- storage/connect/tabbson.cpp | 346 ++++++---------- storage/connect/tabbson.h | 10 +- 10 files changed, 400 insertions(+), 570 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index e395bd8988d..b4ce7ec1505 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -21,6 +21,15 @@ #include "plgdbsem.h" #include "bson.h" +/***********************************************************************/ +/* Check macro. */ +/***********************************************************************/ +#if defined(_DEBUG) +#define CheckType(X,Y) if (!X || X ->Type != Y) throw MSG(VALTYPE_NOMATCH); +#else +#define CheckType(V) +#endif + #if defined(__WIN__) #define EL "\r\n" #else @@ -859,13 +868,14 @@ PBPR BJSON::SubAllocPair(OFFSET key, OFFSET val) /***********************************************************************/ /* Return the number of pairs in this object. */ /***********************************************************************/ -int BJSON::GetObjectSize(PBPR bop, bool b) +int BJSON::GetObjectSize(PBVAL bop, bool b) { + CheckType(bop, TYPE_JOB); int n = 0; - for (PBPR brp = bop; brp; brp = MPP(brp->Next)) + for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) // If b return only non null pairs - if (!b || (brp->Vlp && (MVP(brp->Vlp))->Type != TYPE_NULL)) + if (!b || (brp->Vlp && GetVal(brp)->Type != TYPE_NULL)) n++; return n; @@ -874,64 +884,60 @@ int BJSON::GetObjectSize(PBPR bop, bool b) /***********************************************************************/ /* Add a new pair to an Object and return it. */ /***********************************************************************/ -PBPR BJSON::AddPair(PBPR bop, PSZ key, OFFSET val) +void BJSON::AddPair(PBVAL bop, PSZ key, OFFSET val) { - PBPR brp, nrp = SubAllocPair(key, val); + CheckType(bop, TYPE_JOB); + PBPR brp; + OFFSET nrp = MOF(SubAllocPair(key, val)); - if (bop) { - for (brp = bop; brp->Next; brp = MPP(brp->Next)); + if (bop->To_Val) { + for (brp = GetObject(bop); brp->Next; brp = GetNext(brp)); - brp->Next = MOF(nrp); + brp->Next = nrp; } else - bop = nrp; + bop->To_Val = nrp; - return bop; + bop->Nd++; } // end of AddPair /***********************************************************************/ /* Return all object keys as an array. */ /***********************************************************************/ -PBVAL BJSON::GetKeyList(PBPR bop) +PBVAL BJSON::GetKeyList(PBVAL bop) { - PBVAL bvp, lvp, fvp = NULL; + CheckType(bop, TYPE_JOB); + PBVAL arp = NewVal(TYPE_JAR); - for (PBPR brp = bop; brp; brp = MPP(brp->Next)) - if (fvp) { - bvp = SubAllocVal(brp->Key, TYPE_STRG); - lvp->Next = MOF(bvp); - lvp = bvp; - } else - lvp = fvp = SubAllocVal(brp->Key, TYPE_STRG); + for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) + AddArrayValue(arp, MOF(SubAllocVal(brp->Key, TYPE_STRG))); - return fvp; + return arp; } // end of GetKeyList /***********************************************************************/ /* Return all object values as an array. */ /***********************************************************************/ -PBVAL BJSON::GetObjectValList(PBPR bop) +PBVAL BJSON::GetObjectValList(PBVAL bop) { - PBVAL bvp, lvp, fvp = NULL; + CheckType(bop, TYPE_JOB); + PBVAL arp = NewVal(TYPE_JAR); - for (PBPR brp = bop; brp; brp = MPP(brp->Next)) - if (fvp) { - bvp = DupVal(MVP(brp->Vlp)); - lvp->Next = MOF(bvp); - lvp = bvp; - } else - lvp = fvp = DupVal(MVP(brp->Vlp)); + for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) + AddArrayValue(arp, brp->Vlp); - return fvp; + return arp; } // end of GetObjectValList /***********************************************************************/ /* Get the value corresponding to the given key. */ /***********************************************************************/ -PBVAL BJSON::GetKeyValue(PBPR bop, PSZ key) +PBVAL BJSON::GetKeyValue(PBVAL bop, PSZ key) { - for (PBPR brp = bop; brp; brp = MPP(brp->Next)) - if (!strcmp(MZP(brp->Key), key)) - return MVP(brp->Vlp); + CheckType(bop, TYPE_JOB); + + for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) + if (!strcmp(GetKey(brp), key)) + return GetVal(brp); return NULL; } // end of GetKeyValue; @@ -939,8 +945,11 @@ PBVAL BJSON::GetKeyValue(PBPR bop, PSZ key) /***********************************************************************/ /* Return the text corresponding to all keys (XML like). */ /***********************************************************************/ -PSZ BJSON::GetObjectText(PGLOBAL g, PBPR bop, PSTRG text) { - if (bop) { +PSZ BJSON::GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text) +{ + CheckType(bop, TYPE_JOB); + + if (bop->To_Val) { bool b; if (!text) { @@ -977,8 +986,8 @@ PSZ BJSON::GetObjectText(PGLOBAL g, PBPR bop, PSTRG text) { } else #endif // 0 - for (PBPR brp = bop; brp; brp = MPP(brp->Next)) { - GetValueText(g, MVP(brp->Vlp), text); + for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) { + GetValueText(g, GetVal(brp), text); if (brp->Next) text->Append(' '); @@ -998,39 +1007,44 @@ PSZ BJSON::GetObjectText(PGLOBAL g, PBPR bop, PSTRG text) { /***********************************************************************/ /* Set or add a value corresponding to the given key. */ /***********************************************************************/ -PBPR BJSON::SetKeyValue(PBPR bop, OFFSET bvp, PSZ key) +void BJSON::SetKeyValue(PBVAL bop, OFFSET bvp, PSZ key) { - PBPR brp = bop, prp = NULL; + CheckType(bop, TYPE_JOB); + PBPR brp, prp = NULL; - if (brp) { - for (brp = bop; brp; brp = MPP(brp->Next)) - if (!strcmp(MZP(brp->Key), key)) { + if (bop->To_Val) { + for (brp = GetObject(bop); brp; brp = GetNext(brp)) + if (!strcmp(GetKey(brp), key)) { brp->Vlp = bvp; - break; + return; } else prp = brp; if (!brp) - prp->Vlp = MOF(SubAllocPair(key, bvp)); + prp->Next = MOF(SubAllocPair(key, bvp)); } else - bop = SubAllocPair(key, bvp); + bop->To_Val = MOF(SubAllocPair(key, bvp)); - // Return the first pair of this object - return bop; + bop->Nd++; } // end of SetKeyValue /***********************************************************************/ /* Merge two objects. */ /***********************************************************************/ -PBPR BJSON::MergeObject(PBPR bop1, PBPR bop2) +PBVAL BJSON::MergeObject(PBVAL bop1, PBVAL bop2) { - if (bop1) - for (PBPR brp = bop2; brp; brp = MPP(brp->Next)) - SetKeyValue(bop1, brp->Vlp, MZP(brp->Key)); + CheckType(bop1, TYPE_JOB); + CheckType(bop2, TYPE_JOB); - else - bop1 = bop2; + if (bop1->To_Val) + for (PBPR brp = GetObject(bop2); brp; brp = GetNext(brp)) + SetKeyValue(bop1, brp->Vlp, GetKey(brp)); + + else { + bop1->To_Val = bop2->To_Val; + bop1->Nd = bop2->Nd; + } // endelse To_Val return bop1; } // end of MergeObject; @@ -1038,30 +1052,33 @@ PBPR BJSON::MergeObject(PBPR bop1, PBPR bop2) /***********************************************************************/ /* Delete a value corresponding to the given key. */ /***********************************************************************/ -PBPR BJSON::DeleteKey(PBPR bop, PCSZ key) +void BJSON::DeleteKey(PBVAL bop, PCSZ key) { + CheckType(bop, TYPE_JOB); PBPR brp, pbrp = NULL; - for (brp = bop; brp; brp = MPP(brp->Next)) + for (brp = GetObject(bop); brp; brp = GetNext(brp)) if (!strcmp(MZP(brp->Key), key)) { if (pbrp) { pbrp->Next = brp->Next; - return bop; } else - return MPP(brp->Next); + bop->To_Val = brp->Next; + bop->Nd--; + break; } else pbrp = brp; - return bop; } // end of DeleteKey /***********************************************************************/ /* True if void or if all members are nulls. */ /***********************************************************************/ -bool BJSON::IsObjectNull(PBPR bop) +bool BJSON::IsObjectNull(PBVAL bop) { - for (PBPR brp = bop; brp; brp = MPP(brp->Next)) + CheckType(bop, TYPE_JOB); + + for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) if (brp->Vlp && (MVP(brp->Vlp))->Type != TYPE_NULL) return false; @@ -1075,9 +1092,10 @@ bool BJSON::IsObjectNull(PBPR bop) /***********************************************************************/ int BJSON::GetArraySize(PBVAL bap, bool b) { + CheckType(bap, TYPE_JAR); int n = 0; - for (PBVAL bvp = bap; bvp; bvp = MVP(bvp->Next)) + for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp)) // If b, return only non null values if (!b || bvp->Type != TYPE_NULL) n++; @@ -1090,13 +1108,12 @@ int BJSON::GetArraySize(PBVAL bap, bool b) /***********************************************************************/ PBVAL BJSON::GetArrayValue(PBVAL bap, int n) { + CheckType(bap, TYPE_JAR); int i = 0; - for (PBVAL bvp = bap; bvp; bvp = MVP(bvp->Next)) + for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp), i++) if (i == n) return bvp; - else - i++; return NULL; } // end of GetArrayValue @@ -1104,80 +1121,78 @@ PBVAL BJSON::GetArrayValue(PBVAL bap, int n) /***********************************************************************/ /* Add a Value to the Array Value list. */ /***********************************************************************/ -PBVAL BJSON::AddArrayValue(PBVAL bap, PBVAL nvp, int* x) +void BJSON::AddArrayValue(PBVAL bap, OFFSET nvp, int* x) { + CheckType(bap, TYPE_JAR); if (!nvp) - nvp = NewVal(); + nvp = MOF(NewVal()); - if (bap) { + if (bap->To_Val) { int i = 0, n = (x) ? *x : INT_MAX32; - PBVAL bvp; - for (bvp = bap; bvp; bvp = MVP(bvp->Next), i++) + for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp), i++) if (!bvp->Next || (x && i == n)) { - nvp->Next = bvp->Next; - bvp->Next = MOF(nvp); + MVP(nvp)->Next = bvp->Next; + bvp->Next = nvp; break; } // endif Next } else - bap = nvp; + bap->To_Val = nvp; - return bap; + bap->Nd++; } // end of AddArrayValue /***********************************************************************/ /* Merge two arrays. */ /***********************************************************************/ -PBVAL BJSON::MergeArray(PBVAL bap1, PBVAL bap2) +void BJSON::MergeArray(PBVAL bap1, PBVAL bap2) { - if (bap1) { - for (PBVAL bvp = bap2; bvp; bvp = MVP(bvp->Next)) - AddArrayValue(bap1, bvp); + CheckType(bap1, TYPE_JAR); + CheckType(bap2, TYPE_JAR); - return bap1; - } else - return bap2; + if (bap1->To_Val) { + for (PBVAL bvp = GetArray(bap2); bvp; bvp = GetNext(bvp)) + AddArrayValue(bap1, MOF(DupVal(bvp))); + + } else { + bap1->To_Val = bap2->To_Val; + bap1->Nd = bap2->Nd; + } // endif To_Val } // end of MergeArray /***********************************************************************/ -/* Set the nth Value of the Array Value list or add it. */ +/* Set the nth Value of the Array Value list or add it. */ /***********************************************************************/ -PBVAL BJSON::SetArrayValue(PBVAL bap, PBVAL nvp, int n) +void BJSON::SetArrayValue(PBVAL bap, PBVAL nvp, int n) { - PBVAL bvp = bap, pvp = NULL; + CheckType(bap, TYPE_JAR); + PBVAL bvp = NULL, pvp = NULL; - if (bvp) { - for (int i = 0; bvp; i++, bvp = MVP(bvp->Next)) + if (bap->To_Val) { + for (int i = 0; bvp = GetArray(bap); i++, bvp = GetNext(bvp)) if (i == n) { - bvp->To_Val = nvp->To_Val; - bvp->Nd = nvp->Nd; - bvp->Type = nvp->Type; - return bap; + SetValueVal(bvp, nvp); + return; } else pvp = bvp; } // endif bap - if (!bvp) { - bvp = DupVal(nvp); + if (!bvp) + AddArrayValue(bap, MOF(nvp)); - if (pvp) - pvp->Next = MOF(bvp); - else - bap = bvp; - - } // endif bvp - - return bap; } // end of SetValue /***********************************************************************/ /* Return the text corresponding to all values. */ /***********************************************************************/ -PSZ BJSON::GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text) { - if (bap) { +PSZ BJSON::GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text) +{ + CheckType(bap, TYPE_JAR); + + if (bap->To_Val) { bool b; if (!text) { @@ -1192,7 +1207,7 @@ PSZ BJSON::GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text) { b = false; } // endif text - for (PBVAL bvp = bap; bvp; bvp = MVP(bvp->Next)) { + for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp)) { GetValueText(g, bvp, text); if (bvp->Next) @@ -1200,14 +1215,14 @@ PSZ BJSON::GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text) { else if (!b) text->Append(')'); - } // endfor jp + } // endfor bvp if (b) { text->Trim(); return text->GetStr(); } // endif b - } // endif First + } // endif To_Val return NULL; } // end of GetText; @@ -1215,22 +1230,23 @@ PSZ BJSON::GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text) { /***********************************************************************/ /* Delete a Value from the Arrays Value list. */ /***********************************************************************/ -PBVAL BJSON::DeleteValue(PBVAL bap, int n) +void BJSON::DeleteValue(PBVAL bap, int n) { - PBVAL bvp = bap, pvp = NULL; + CheckType(bap, TYPE_JAR); + int i = 0; + PBVAL bvp, pvp = NULL; - if (bvp) - for (int i = 0; bvp; i++, bvp = MVP(bvp->Next)) + for (bvp = GetArray(bap); bvp; i++, bvp = GetNext(bvp)) if (i == n) { if (pvp) pvp->Next = bvp->Next; else - bap = bvp; + bap->To_Val = bvp->Next; + bap->Nd--; break; } // endif i - return bap; } // end of DeleteValue /***********************************************************************/ @@ -1238,7 +1254,9 @@ PBVAL BJSON::DeleteValue(PBVAL bap, int n) /***********************************************************************/ bool BJSON::IsArrayNull(PBVAL bap) { - for (PBVAL bvp = bap; bvp; bvp = MVP(bvp->Next)) + CheckType(bap, TYPE_JAR); + + for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp)) if (bvp->Type != TYPE_NULL) return false; @@ -1288,9 +1306,10 @@ PBVAL BJSON::SubAllocStr(OFFSET toval, short nd) /***********************************************************************/ /* Allocate a BVALUE with a given string or numeric value. */ /***********************************************************************/ -PBVAL BJSON::SubAllocVal(PVAL valp) +PBVAL BJSON::NewVal(PVAL valp) { PBVAL vlp = NewVal(); + SetValue(vlp, valp); return vlp; } // end of SubAllocVal @@ -1306,90 +1325,6 @@ PBVAL BJSON::DupVal(PBVAL bvlp) { return bvp; } // end of DupVal -#if 0 -/***********************************************************************/ -/* Constructor for a JVALUE. */ -/***********************************************************************/ -JVALUE::JVALUE(PJSON jsp) : JSON() { - if (jsp->GetType() == TYPE_JVAL) { - PJVAL jvp = (PJVAL)jsp; - - // Val = ((PJVAL)jsp)->GetVal(); - if (jvp->DataType == TYPE_JSON) { - Jsp = jvp->GetJsp(); - DataType = TYPE_JSON; - Nd = 0; - } else { - LLn = jvp->LLn; // Must be LLn on 32 bit machines - Nd = jvp->Nd; - DataType = jvp->DataType; - } // endelse Jsp - - } else { - Jsp = jsp; - // Val = NULL; - DataType = TYPE_JSON; - Nd = 0; - } // endif Type - - Next = NULL; - Del = false; - Type = TYPE_JVAL; -} // end of JVALUE constructor - -/***********************************************************************/ -/* Constructor for a JVALUE with a given string or numeric value. */ -/***********************************************************************/ -JVALUE::JVALUE(PGLOBAL g, PVL vlp) : JSON() { - Jsp = NULL; - Val = vlp; - Next = NULL; - Del = false; - Type = TYPE_JVAL; -} // end of JVALUE constructor -#endif // 0 - -#if 0 -/***********************************************************************/ -/* Constructor for a given string. */ -/***********************************************************************/ -JVALUE::JVALUE(PGLOBAL g, PCSZ strp) : JSON() { - Jsp = NULL; - //Val = AllocVal(g, TYPE_STRG); - Strp = (char*)strp; - DataType = TYPE_STRG; - Nd = 0; - Next = NULL; - Del = false; - Type = TYPE_JVAL; -} // end of JVALUE constructor - -/***********************************************************************/ -/* Set or reset all Jvalue members. */ -/***********************************************************************/ -void JVALUE::Clear(void) { - Jsp = NULL; - Next = NULL; - Type = TYPE_JVAL; - Del = false; - Nd = 0; - DataType = TYPE_NULL; -} // end of Clear - -/***********************************************************************/ -/* Returns the type of the Value's value. */ -/***********************************************************************/ -JTYP JVALUE::GetValType(void) { - if (DataType == TYPE_JSON) - return Jsp->GetType(); - //else if (Val) - // return Val->Type; - else - return DataType; - -} // end of GetValType -#endif // 0 - /***********************************************************************/ /* Return the size of value's value. */ /***********************************************************************/ @@ -1397,37 +1332,15 @@ int BJSON::GetSize(PBVAL vlp, bool b) { switch (vlp->Type) { case TYPE_JAR: - return GetArraySize(MVP(vlp->To_Val)); + return GetArraySize(vlp); case TYPE_JOB: - return GetObjectSize(MPP(vlp->To_Val)); + return GetObjectSize(vlp); default: return 1; } // enswitch Type } // end of GetSize -/***********************************************************************/ -/* Return the Value's Object value. */ -/***********************************************************************/ -PBPR BJSON::GetObject(PBVAL vlp) -{ - if (vlp->Type == TYPE_JOB) - return MPP(vlp->To_Val); - - return NULL; -} // end of GetObject - -/***********************************************************************/ -/* Return the Value's Array value. */ -/***********************************************************************/ -PBVAL BJSON::GetArray(PBVAL vlp) -{ - if (vlp->Type == TYPE_JAR) - return MVP(vlp->To_Val); - - return NULL; -} // end of GetArray - /***********************************************************************/ /* Return the Value's as a Value struct. */ /***********************************************************************/ @@ -1604,11 +1517,12 @@ PSZ BJSON::GetString(PBVAL vp, char* buff) /***********************************************************************/ /* Return the Value's String value. */ /***********************************************************************/ -PSZ BJSON::GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text) { +PSZ BJSON::GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text) +{ if (vlp->Type == TYPE_JOB) - return GetObjectText(g, MPP(vlp->To_Val), text); + return GetObjectText(g, vlp, text); else if (vlp->Type == TYPE_JAR) - return GetArrayText(g, MVP(vlp->To_Val), text); + return GetArrayText(g, vlp, text); char buff[32]; PSZ s = (vlp->Type == TYPE_NULL) ? NULL : GetString(vlp, buff); @@ -1621,15 +1535,19 @@ PSZ BJSON::GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text) { return NULL; } // end of GetText -void BJSON::SetValueObj(PBVAL vlp, PBPR bop) +void BJSON::SetValueObj(PBVAL vlp, PBVAL bop) { - vlp->To_Val = MOF(bop); + CheckType(bop, TYPE_JOB); + vlp->To_Val = bop->To_Val; + vlp->Nd = bop->Nd; vlp->Type = TYPE_JOB; } // end of SetValueObj; void BJSON::SetValueArr(PBVAL vlp, PBVAL bap) { - vlp->To_Val = MOF(bap); + CheckType(bap, TYPE_JAR); + vlp->To_Val = bap->To_Val; + vlp->Nd = bap->Nd; vlp->Type = TYPE_JAR; } // end of SetValue; @@ -1640,14 +1558,17 @@ void BJSON::SetValueVal(PBVAL vlp, PBVAL vp) vlp->Type = vp->Type; } // end of SetValue; -void BJSON::SetValue(PBVAL vlp, PVAL valp) +PBVAL BJSON::SetValue(PBVAL vlp, PVAL valp) { + if (!vlp) + vlp = NewVal(); + if (!valp || valp->IsNull()) { vlp->Type = TYPE_NULL; } else switch (valp->GetType()) { case TYPE_DATE: if (((DTVAL*)valp)->IsFormatted()) - vlp->To_Val = MOF(valp->GetCharValue()); + vlp->To_Val = MOF(PlugDup(G, valp->GetCharValue())); else { char buf[32]; @@ -1657,7 +1578,7 @@ void BJSON::SetValue(PBVAL vlp, PVAL valp) vlp->Type = TYPE_DTM; break; case TYPE_STRING: - vlp->To_Val = MOF(valp->GetCharValue()); + vlp->To_Val = MOF(PlugDup(G, valp->GetCharValue())); vlp->Type = TYPE_STRG; break; case TYPE_DOUBLE: @@ -1702,6 +1623,7 @@ void BJSON::SetValue(PBVAL vlp, PVAL valp) throw(777); } // endswitch Type + return vlp; } // end of SetValue /***********************************************************************/ @@ -1769,10 +1691,10 @@ bool BJSON::IsValueNull(PBVAL vlp) { b = true; break; case TYPE_JOB: - b = IsObjectNull(MPP(vlp->To_Val)); + b = IsObjectNull(vlp); break; case TYPE_JAR: - b = IsArrayNull(MVP(vlp->To_Val)); + b = IsArrayNull(vlp); break; default: b = false; diff --git a/storage/connect/bson.h b/storage/connect/bson.h index 077e71b1413..aceaed5eb7c 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -17,13 +17,6 @@ #endif #define ARGS MY_MIN(24,(int)len-i),s+MY_MAX(i-3,0) -#define MOF(X) MakeOff(Base, X) -#define MP(X) MakePtr(Base, X) -#define MPP(X) (PBPR)MakePtr(Base, X) -#define MVP(X) (PBVAL)MakePtr(Base, X) -#define MZP(X) (PSZ)MakePtr(Base, X) -#define LLN(X) *(longlong*)MakePtr(Base, X) -#define DBL(X) *(double*)MakePtr(Base, X) class BDOC; class BOUT; @@ -71,6 +64,15 @@ public: // Constructor BJSON(PGLOBAL g, PBVAL vp = NULL) { G = g, Base = G->Sarea; Bvp = vp; } + // Utility functions + inline OFFSET MOF(void *p) {return MakeOff(Base, p);} + inline void *MP(OFFSET o) {return MakePtr(Base, o);} + inline PBPR MPP(OFFSET o) {return (PBPR)MakePtr(Base, o);} + inline PBVAL MVP(OFFSET o) {return (PBVAL)MakePtr(Base, o);} + inline PSZ MZP(OFFSET o) {return (PSZ)MakePtr(Base, o);} + inline longlong LLN(OFFSET o) {return *(longlong*)MakePtr(Base, o);} + inline double DBL(OFFSET o) {return *(double*)MakePtr(Base, o);} + void* GetBase(void) { return Base; } void SubSet(bool b = false); void MemSave(void) {G->Saved_Size = ((PPOOLHEADER)G->Sarea)->To_Free;} @@ -82,47 +84,49 @@ public: PBPR SubAllocPair(PSZ key, OFFSET val = 0) {return SubAllocPair(MOF(key), val);} PBVAL NewVal(int type = TYPE_NULL); + PBVAL NewVal(PVAL valp); PBVAL SubAllocVal(OFFSET toval, int type = TYPE_NULL, short nd = 0); PBVAL SubAllocVal(PBVAL toval, int type = TYPE_NULL, short nd = 0) {return SubAllocVal(MOF(toval), type, nd);} PBVAL SubAllocStr(OFFSET str, short nd = 0); PBVAL SubAllocStr(PSZ str, short nd = 0) {return SubAllocStr(MOF(str), nd);} - PBVAL SubAllocVal(PVAL valp); PBVAL DupVal(PBVAL bvp); // Array functions + inline PBVAL GetArray(PBVAL vlp) {return MVP(vlp->To_Val);} int GetArraySize(PBVAL bap, bool b = false); PBVAL GetArrayValue(PBVAL bap, int i); PSZ GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text); - PBVAL MergeArray(PBVAL bap1,PBVAL bap2); - PBVAL DeleteValue(PBVAL bap, int n); - PBVAL AddArrayValue(PBVAL bap, PBVAL nvp = NULL, int* x = NULL); - PBVAL SetArrayValue(PBVAL bap, PBVAL nvp, int n); + void MergeArray(PBVAL bap1,PBVAL bap2); + void DeleteValue(PBVAL bap, int n); + void AddArrayValue(PBVAL bap, OFFSET nvp = NULL, int* x = NULL); + inline void AddArrayValue(PBVAL bap, PBVAL nvp = NULL, int* x = NULL) + {AddArrayValue(bap, MOF(nvp), x);} + void SetArrayValue(PBVAL bap, PBVAL nvp, int n); bool IsArrayNull(PBVAL bap); // Object functions - int GetObjectSize(PBPR bop, bool b = false); - PBPR GetNext(PBPR prp) {return MPP(prp->Next);} - PSZ GetObjectText(PGLOBAL g, PBPR bop, PSTRG text); - PBPR MergeObject(PBPR bop1, PBPR bop2); - PBPR AddPair(PBPR bop, PSZ key, OFFSET val = 0); + inline PBPR GetObject(PBVAL bop) {return MPP(bop->To_Val);} + inline PBPR GetNext(PBPR brp) { return MPP(brp->Next); } + int GetObjectSize(PBVAL bop, bool b = false); + PSZ GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text); + PBVAL MergeObject(PBVAL bop1, PBVAL bop2); + void AddPair(PBVAL bop, PSZ key, OFFSET val = 0); PSZ GetKey(PBPR prp) {return MZP(prp->Key);} PBVAL GetVal(PBPR prp) {return MVP(prp->Vlp);} - PBVAL GetKeyValue(PBPR bop, PSZ key); - PBVAL GetKeyList(PBPR bop); - PBVAL GetObjectValList(PBPR bop); - PBPR SetKeyValue(PBPR bop, OFFSET bvp, PSZ key); - inline PBPR SetKeyValue(PBPR bop, PBVAL vlp, PSZ key) - {return SetKeyValue(bop, MOF(vlp), key);} - PBPR DeleteKey(PBPR bop, PCSZ k); - bool IsObjectNull(PBPR bop); + PBVAL GetKeyValue(PBVAL bop, PSZ key); + PBVAL GetKeyList(PBVAL bop); + PBVAL GetObjectValList(PBVAL bop); + void SetKeyValue(PBVAL bop, OFFSET bvp, PSZ key); + inline void SetKeyValue(PBVAL bop, PBVAL vlp, PSZ key) + {SetKeyValue(bop, MOF(vlp), key);} + void DeleteKey(PBVAL bop, PCSZ k); + bool IsObjectNull(PBVAL bop); // Value functions int GetSize(PBVAL vlp, bool b = false); PBVAL GetNext(PBVAL vlp) {return MVP(vlp->Next);} - PBPR GetObject(PBVAL vlp); - PBVAL GetArray(PBVAL vlp); //PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); } PSZ GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text); inline PBVAL GetBson(PBVAL bvp) { return IsJson(bvp) ? MVP(bvp->To_Val) : bvp; } @@ -131,10 +135,10 @@ public: long long GetBigint(PBVAL vp); double GetDouble(PBVAL vp); PVAL GetValue(PGLOBAL g, PBVAL vp); - void SetValueObj(PBVAL vlp, PBPR bop); + void SetValueObj(PBVAL vlp, PBVAL bop); void SetValueArr(PBVAL vlp, PBVAL bap); void SetValueVal(PBVAL vlp, PBVAL vp); - void SetValue(PBVAL vlp, PVAL valp); + PBVAL SetValue(PBVAL vlp, PVAL valp); void SetString(PBVAL vlp, PSZ s, int ci = 0); void SetInteger(PBVAL vlp, int n); void SetBigint(PBVAL vlp, longlong ll); diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 36bec919ffd..356a5c5169f 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -348,7 +348,7 @@ PVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp) } // end of MakeJson /*********************************************************************************/ -/* SetValue: Set a value from a JVALUE contains. */ +/* SetValue: Set a value from a BVALUE contains. */ /*********************************************************************************/ void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) { @@ -381,10 +381,10 @@ void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) break; case TYPE_JAR: - vp->SetValue_psz(GetArrayText(g, MVP(vlp->To_Val), NULL)); + vp->SetValue_psz(GetArrayText(g, vlp, NULL)); break; case TYPE_JOB: - vp->SetValue_psz(GetObjectText(g, MPP(vlp->To_Val), NULL)); + vp->SetValue_psz(GetObjectText(g, vlp, NULL)); break; case TYPE_NULL: vp->SetNull(true); @@ -437,8 +437,8 @@ PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) for (; i < Nod && row; i++) { if (Nodes[i].Op == OP_NUM) { - Value->SetValue(row->Type == TYPE_JAR ? GetArraySize(MVP(row->To_Val)) : 1); - vlp = SubAllocVal(Value); + Value->SetValue(row->Type == TYPE_JAR ? GetArraySize(row) : 1); + vlp = NewVal(Value); return vlp; } else if (Nodes[i].Op == OP_XX) { Jb = b; @@ -460,11 +460,11 @@ PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) } //endif Op } else - vlp = GetKeyValue(MPP(row->To_Val), Nodes[i].Key); + vlp = GetKeyValue(row, Nodes[i].Key); break; case TYPE_JAR: - bap = MVP(row->To_Val); + bap = row; if (!Nodes[i].Key) { if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) @@ -472,7 +472,7 @@ PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) else if (Nodes[i].Op == OP_EXP) return (PBVAL)ExpandArray(g, bap, i); else - return SubAllocVal(CalculateArray(g, bap, i)); + return NewVal(CalculateArray(g, bap, i)); } else { // Unexpected array, unwrap it as [0] @@ -616,13 +616,13 @@ my_bool BJNX::CheckPath(PGLOBAL g) } else switch (row->Type) { case TYPE_JOB: if (Nodes[i].Key) - val = GetKeyValue(MPP(row->To_Val), Nodes[i].Key); + val = GetKeyValue(row, Nodes[i].Key); break; case TYPE_JAR: if (!Nodes[i].Key) if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) - val = GetArrayValue(MVP(row->To_Val), Nodes[i].Rank); + val = GetArrayValue(row, Nodes[i].Rank); break; case TYPE_JVAL: @@ -660,10 +660,10 @@ PBVAL BJNX::GetRow(PGLOBAL g) // Expected Array was not there, wrap the value continue; - val = GetKeyValue(MPP(row->To_Val), Nodes[i].Key); + val = GetKeyValue(row, Nodes[i].Key); break; case TYPE_JAR: - arp = MVP(row->To_Val); + arp = row; if (!Nodes[i].Key) { if (Nodes[i].Op == OP_EQ) @@ -703,9 +703,9 @@ PBVAL BJNX::GetRow(PGLOBAL g) nwr = NewVal(); if (row->Type == TYPE_JOB) { - SetKeyValue(MPP(row->To_Val), MOF(nwr), Nodes[i - 1].Key); + SetKeyValue(row, MOF(nwr), Nodes[i - 1].Key); } else if (row->Type == TYPE_JAR) { - AddArrayValue(MVP(row->To_Val), nwr); + AddArrayValue(row, MOF(nwr)); } else { strcpy(g->Message, "Wrong type when writing new row"); nwr = NULL; @@ -727,7 +727,7 @@ PBVAL BJNX::GetRow(PGLOBAL g) /***********************************************************************/ my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) { - PBPR objp = NULL; + PBVAL objp = NULL; PBVAL arp = NULL; PBVAL jvp = NULL; PBVAL row = GetRow(g); @@ -736,8 +736,8 @@ my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) return true; switch (row->Type) { - case TYPE_JOB: objp = MPP(row->To_Val); break; - case TYPE_JAR: arp = MVP(row->To_Val); break; + case TYPE_JOB: objp = row; break; + case TYPE_JAR: arp = row; break; case TYPE_JVAL: jvp = MVP(row->To_Val); break; default: strcpy(g->Message, "Invalid target type"); @@ -749,7 +749,7 @@ my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) if (Nodes[Nod - 1].Op == OP_EQ) SetArrayValue(arp, jvalp, Nodes[Nod - 1].Rank); else - AddArrayValue(arp, jvalp); + AddArrayValue(arp, MOF(jvalp)); } // endif Key @@ -787,10 +787,10 @@ PSZ BJNX::Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k) switch (jsp->Type) { case TYPE_JAR: - err = LocateArray(g, MVP(jsp->To_Val)); + err = LocateArray(g, jsp); break; case TYPE_JOB: - err = LocateObject(g, MPP(jsp->To_Val)); + err = LocateObject(g, jsp); break; case TYPE_JVAL: err = LocateValue(g, MVP(jsp->To_Val)); @@ -810,9 +810,7 @@ PSZ BJNX::Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k) } // endif's } catch (int n) { - if (trace(1)) - htrc("Exception %d: %s\n", n, g->Message); - + xtrc(1, "Exception %d: %s\n", n, g->Message); PUSH_WARNING(g->Message); } catch (const char* msg) { strcpy(g->Message, msg); @@ -848,7 +846,7 @@ my_bool BJNX::LocateArray(PGLOBAL g, PBVAL jarp) /*********************************************************************************/ /* Locate in a JSON Object. */ /*********************************************************************************/ -my_bool BJNX::LocateObject(PGLOBAL g, PBPR jobp) +my_bool BJNX::LocateObject(PGLOBAL g, PBVAL jobp) { size_t m; @@ -857,7 +855,7 @@ my_bool BJNX::LocateObject(PGLOBAL g, PBPR jobp) m = Jp->N; - for (PBPR pair = jobp; pair && !Found; pair = MPP(pair->Next)) { + for (PBPR pair = GetObject(jobp); pair && !Found; pair = GetNext(pair)) { Jp->N = m; if (Jp->WriteStr(MZP(pair->Key))) @@ -879,9 +877,9 @@ my_bool BJNX::LocateValue(PGLOBAL g, PBVAL jvp) if (CompareTree(g, Bvalp, jvp)) Found = (--K == 0); else if (jvp->Type == TYPE_JAR) - return LocateArray(g, GetArray(jvp)); + return LocateArray(g, jvp); else if (jvp->Type == TYPE_JOB) - return LocateObject(g, GetObject(jvp)); + return LocateObject(g, jvp); return false; } // end of LocateValue @@ -914,10 +912,10 @@ PSZ BJNX::LocateAll(PGLOBAL g, PBVAL jsp, PBVAL bvp, int mx) switch (jsp->Type) { case TYPE_JAR: - err = LocateArrayAll(g, MVP(jsp->To_Val)); + err = LocateArrayAll(g, jsp); break; case TYPE_JOB: - err = LocateObjectAll(g, MPP(jsp->To_Val)); + err = LocateObjectAll(g, jsp); break; case TYPE_JVAL: err = LocateValueAll(g, MVP(jsp->To_Val)); @@ -957,7 +955,7 @@ my_bool BJNX::LocateArrayAll(PGLOBAL g, PBVAL jarp) if (I < Imax) { Jpnp[++I].Type = TYPE_JAR; - for (PBVAL vp = jarp; vp; vp = MVP(vp->Next)) { + for (PBVAL vp = GetArray(jarp); vp; vp = GetNext(vp)) { Jpnp[I].N = i; if (LocateValueAll(g, GetArrayValue(jarp, i))) @@ -975,12 +973,12 @@ my_bool BJNX::LocateArrayAll(PGLOBAL g, PBVAL jarp) /*********************************************************************************/ /* Locate in a JSON Object. */ /*********************************************************************************/ -my_bool BJNX::LocateObjectAll(PGLOBAL g, PBPR jobp) +my_bool BJNX::LocateObjectAll(PGLOBAL g, PBVAL jobp) { if (I < Imax) { Jpnp[++I].Type = TYPE_JOB; - for (PBPR pair = jobp; pair; pair = MPP(pair->Next)) { + for (PBPR pair = GetObject(jobp); pair; pair = GetNext(pair)) { Jpnp[I].Key = MZP(pair->Key); if (LocateValueAll(g, MVP(pair->Vlp))) @@ -1002,9 +1000,9 @@ my_bool BJNX::LocateValueAll(PGLOBAL g, PBVAL jvp) if (CompareTree(g, Bvalp, jvp)) return AddPath(); else if (jvp->Type == TYPE_JAR) - return LocateArrayAll(g, GetArray(jvp)); + return LocateArrayAll(g, jvp); else if (jvp->Type == TYPE_JOB) - return LocateObjectAll(g, GetObject(jvp)); + return LocateObjectAll(g, jvp); return false; } // end of LocateValueAll @@ -1024,11 +1022,11 @@ my_bool BJNX::CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2) found = (CompareValues(g, GetArrayValue(jp1, i), GetArrayValue(jp2, i))); } else if (jp1->Type == TYPE_JOB) { - PBPR p1 = MPP(jp1->To_Val), p2 = MPP(jp2->To_Val); + PBPR p1 = GetObject(jp1), p2 = GetObject(jp2); // Keys can be differently ordered for (; found && p1 && p2; p1 = MPP(p1->Next)) - found = CompareValues(g, MVP(p1->Vlp), GetKeyValue(p2, MZP(p1->Key))); + found = CompareValues(g, MVP(p1->Vlp), GetKeyValue(jp2, MZP(p1->Key))); } else if (jp1->Type == TYPE_JVAL) { found = CompareTree(g, MVP(jp1->To_Val), (MVP(jp2->To_Val))); @@ -1048,8 +1046,9 @@ my_bool BJNX::CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2) if (v1 && v2) switch (v1->Type) { case TYPE_JAR: - if (v2->Type == TYPE_JAR) - b = CompareTree(g, MVP(v1->To_Val), MVP(v2->To_Val)); + case TYPE_JOB: + if (v2->Type == v1->Type) + b = CompareTree(g, v1, v2); break; case TYPE_STRG: @@ -1297,12 +1296,10 @@ char* bson_make_array(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, args->arg_count, false)) { BDOC doc(g); - PBVAL bvp = NULL, arp = NULL; + PBVAL bvp = NULL, arp = doc.NewVal(TYPE_JAR); for (uint i = 0; i < args->arg_count; i++) - bvp = doc.AddArrayValue(bvp, MakeBinValue(g, args, i)); - - arp = doc.SubAllocVal(bvp, TYPE_JAR); + doc.AddArrayValue(arp, MakeBinValue(g, args, i)); if (!(str = doc.Serialize(g, arp, NULL, 0))) str = strcpy(result, g->Message); @@ -1361,26 +1358,19 @@ char* bson_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, args->arg_count, true)) { - uint n = 1; - bool b = false; + uint i = 0; BDOC doc(g); - PBVAL bvp = NULL, arp = MakeBinValue(g, args, 0); + PBVAL arp, bvp = MakeBinValue(g, args, 0); - if (arp->Type == TYPE_JAR) { - bvp = doc.GetArray(arp); - b = !bvp; - } else - n = 0; + if (bvp->Type == TYPE_JAR) { + arp = bvp; + i = 1; + } else // First argument is not an array + arp = doc.NewVal(TYPE_JAR); - for (uint i = n; i < args->arg_count; i++) - bvp = doc.AddArrayValue(bvp, MakeBinValue(g, args, i)); + for (; i < args->arg_count; i++) + doc.AddArrayValue(arp, MakeBinValue(g, args, i)); - if (!n) - arp = doc.SubAllocVal(bvp, TYPE_JAR); - else if (b) - doc.SetValueArr(arp, bvp); - -// str = MakeResult(g, args, top, args->arg_count); str = doc.Serialize(g, arp, NULL, 0); } // endif CheckMemory diff --git a/storage/connect/bsonudf.h b/storage/connect/bsonudf.h index b310aa1827b..b591b6b89f8 100644 --- a/storage/connect/bsonudf.h +++ b/storage/connect/bsonudf.h @@ -46,10 +46,10 @@ protected: PBVAL GetRow(PGLOBAL g); my_bool CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2); my_bool LocateArray(PGLOBAL g, PBVAL jarp); - my_bool LocateObject(PGLOBAL g, PBPR jobp); + my_bool LocateObject(PGLOBAL g, PBVAL jobp); my_bool LocateValue(PGLOBAL g, PBVAL jvp); my_bool LocateArrayAll(PGLOBAL g, PBVAL jarp); - my_bool LocateObjectAll(PGLOBAL g, PBPR jobp); + my_bool LocateObjectAll(PGLOBAL g, PBVAL jobp); my_bool LocateValueAll(PGLOBAL g, PBVAL jvp); my_bool CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2); my_bool AddPath(void); diff --git a/storage/connect/filamap.cpp b/storage/connect/filamap.cpp index 007968e0d44..18ba0b1018f 100644 --- a/storage/connect/filamap.cpp +++ b/storage/connect/filamap.cpp @@ -170,7 +170,8 @@ bool MAPFAM::OpenTableFile(PGLOBAL g) htrc("CreateFileMap: %s\n", g->Message); return (mode == MODE_READ && rc == ENOENT) - ? PushWarning(g, Tdbp) : true; + ? false : true; +// ? PushWarning(g, Tdbp) : true; --> assert fails into MariaDB } // endif hFile /*******************************************************************/ diff --git a/storage/connect/global.h b/storage/connect/global.h index f09d5250124..d4a46e1c862 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -220,34 +220,11 @@ DllExport char *PlugDup(PGLOBAL g, const char *str); DllExport void htrc(char const *fmt, ...); DllExport void xtrc(uint, char const* fmt, ...); DllExport uint GetTraceValue(void); +DllExport void* MakePtr(void* memp, size_t offset); +DllExport size_t MakeOff(void* memp, void* ptr); #if defined(__cplusplus) } // extern "C" #endif -/***********************************************************************/ -/* Inline routine definitions. */ -/***********************************************************************/ -/***********************************************************************/ -/* This routine makes a pointer from an offset to a memory pointer. */ -/***********************************************************************/ -inline void* MakePtr(void* memp, size_t offset) { - // return ((offset == 0) ? NULL : &((char*)memp)[offset]); - return (!offset) ? NULL : (char *)memp + offset; -} /* end of MakePtr */ - -/***********************************************************************/ -/* This routine makes an offset from a pointer new format. */ -/***********************************************************************/ -inline size_t MakeOff(void* memp, void* ptr) { - if (ptr) { -#if defined(_DEBUG) - assert(ptr > memp); -#endif // _DEBUG - return (size_t)((char*)ptr - (size_t)memp); - } else - return 0; - -} /* end of MakeOff */ - /*-------------------------- End of Global.H --------------------------*/ diff --git a/storage/connect/mysql-test/connect/disabled.def b/storage/connect/mysql-test/connect/disabled.def index 1de4deb0a60..e1f6219f89f 100644 --- a/storage/connect/mysql-test/connect/disabled.def +++ b/storage/connect/mysql-test/connect/disabled.def @@ -20,4 +20,5 @@ mongo_c : Need MongoDB running and its C Driver installed mongo_java_2 : Need MongoDB running and its Java Driver installed mongo_java_3 : Need MongoDB running and its Java Driver installed tbl_thread : Bug MDEV-9844,10179,14214 03/01/2018 OB Option THREAD removed +bson : Development #vcol : Different error code on different versions diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp index 479310703eb..ac07f45ccf2 100644 --- a/storage/connect/plugutil.cpp +++ b/storage/connect/plugutil.cpp @@ -607,4 +607,27 @@ char *PlugDup(PGLOBAL g, const char *str) } // end of PlugDup -/*--------------------- End of PLUGUTIL program -----------------------*/ +/*************************************************************************/ +/* This routine makes a pointer from an offset to a memory pointer. */ +/*************************************************************************/ +void* MakePtr(void* memp, size_t offset) { + // return ((offset == 0) ? NULL : &((char*)memp)[offset]); + return (!offset) ? NULL : (char *)memp + offset; +} /* end of MakePtr */ + +/*************************************************************************/ +/* This routine makes an offset from a pointer new format. */ +/*************************************************************************/ +size_t MakeOff(void* memp, void* ptr) { + if (ptr) { +#if defined(_DEBUG) || defined(DEVELOPMENT) + if (ptr <= memp) + fprintf(stderr, "ptr %p <= memp %p", ptr, memp); +#endif // _DEBUG || DEVELOPMENT + return (size_t)((char*)ptr - (size_t)memp); + } else + return 0; + +} /* end of MakeOff */ + + /*--------------------- End of PLUGUTIL program -----------------------*/ diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp index aa2f5957911..f3378d5f9bc 100644 --- a/storage/connect/tabbson.cpp +++ b/storage/connect/tabbson.cpp @@ -607,7 +607,7 @@ PBVAL BTUTIL::FindRow(PGLOBAL g) if (*objpath != '[' && !IsNum(objpath)) { // objpass is a key val = (jsp->Type == TYPE_JOB) ? - GetKeyValue(GetObject(jsp), objpath) : NULL; + GetKeyValue(jsp, objpath) : NULL; } else { if (*objpath == '[') { if (objpath[strlen(objpath) - 1] == ']') @@ -648,7 +648,7 @@ PBVAL BTUTIL::MakeTopTree(PGLOBAL g, PBVAL jsp) char* p; char* objpath = PlugDup(g, Tp->Objname); int i; - PBPR objp = NULL; + PBVAL objp = NULL; PBVAL arp = NULL; PBVAL val = NULL; @@ -799,7 +799,7 @@ PVAL BCUTIL::MakeBson(PGLOBAL g, PBVAL jsp) return Cp->Value; } // end of MakeJson - /***********************************************************************/ +/***********************************************************************/ /* GetColumnValue: */ /***********************************************************************/ PVAL BCUTIL::GetColumnValue(PGLOBAL g, PBVAL row, int i) @@ -826,11 +826,11 @@ PVAL BCUTIL::GetColumnValue(PGLOBAL g, PBVAL row, int i) bvp = row; } else - bvp = GetKeyValue(MPP(row->To_Val), nodes[i].Key); + bvp = GetKeyValue(row, nodes[i].Key); break; case TYPE_JAR: - arp = MVP(row->To_Val); + arp = row; if (!nodes[i].Key) { if (nodes[i].Op == OP_EQ) @@ -838,7 +838,7 @@ PVAL BCUTIL::GetColumnValue(PGLOBAL g, PBVAL row, int i) else if (nodes[i].Op == OP_EXP) return ExpandArray(g, arp, i); else - return CalculateArray(arp, i); + return CalculateArray(g, arp, i); } else { // Unexpected array, unwrap it as [0] @@ -887,8 +887,8 @@ PVAL BCUTIL::ExpandArray(PGLOBAL g, PBVAL arp, int n) throw 666; } // endif jvp - if (n < nod - 1 && GetBson(bvp)) { - SetValue(&bval, GetColumnValue(g, GetBson(bvp), n + 1)); + if (n < nod - 1 && IsJson(bvp)) { + SetValue(&bval, GetColumnValue(g, bvp, n + 1)); bvp = &bval; } // endif n @@ -906,75 +906,67 @@ PVAL BCUTIL::ExpandArray(PGLOBAL g, PBVAL arp, int n) return value; } // end of ExpandArray - /***********************************************************************/ - /* CalculateArray: */ - /***********************************************************************/ -PVAL BCUTIL::CalculateArray(PBVAL arp, int n) +/***********************************************************************/ +/* CalculateArray: */ +/***********************************************************************/ +PVAL BCUTIL::CalculateArray(PGLOBAL g, PBVAL arp, int n) { - throw("CalculateArray NIY"); -#if 0 - int i, ars, nv = 0, nextsame = Tjp->NextSame; + int i, ars, nv = 0, nextsame = Tp->NextSame; bool err; - OPVAL op = Nodes[n].Op; - PVAL val[2], vp = Nodes[n].Valp; - PJVAL jvrp, jvp; - JVALUE jval; + int nod = Cp->Nod; + JNODE *nodes = Cp->Nodes; + OPVAL op = nodes[n].Op; + PVAL val[2], vp = nodes[n].Valp, mulval = Cp->MulVal; + PBVAL jvrp, jvp; + BVAL jval; vp->Reset(); - ars = MY_MIN(Tjp->Limit, arp->size()); - - if (trace(1)) - htrc("CalculateArray: size=%d op=%d nextsame=%d\n", - ars, op, nextsame); + ars = MY_MIN(Tp->Limit, GetArraySize(arp)); + xtrc(1,"CalculateArray: size=%d op=%d nextsame=%d\n", ars, op, nextsame); for (i = 0; i < ars; i++) { - jvrp = arp->GetArrayValue(i); + jvrp = GetArrayValue(arp, i); + xtrc(1, "i=%d nv=%d\n", i, nv); - if (trace(1)) - htrc("i=%d nv=%d\n", i, nv); - - if (!jvrp->IsNull() || (op == OP_CNC && GetJsonNull())) do { - if (jvrp->IsNull()) { - jvrp->Strp = PlugDup(g, GetJsonNull()); - jvrp->DataType = TYPE_STRG; + if (!IsValueNull(jvrp) || (op == OP_CNC && GetJsonNull())) do { + if (IsValueNull(jvrp)) { + SetString(jvrp, PlugDup(G, GetJsonNull())); jvp = jvrp; - } else if (n < Nod - 1 && jvrp->GetJson()) { - Tjp->NextSame = nextsame; - jval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1)); + } else if (n < nod - 1 && IsJson(jvrp)) { + Tp->NextSame = nextsame; + SetValue(&jval, GetColumnValue(g, jvrp, n + 1)); jvp = &jval; } else jvp = jvrp; - if (trace(1)) - htrc("jvp=%s null=%d\n", - jvp->GetString(g), jvp->IsNull() ? 1 : 0); + xtrc(1, "jvp=%s null=%d\n", GetString(jvp), IsValueNull(jvp) ? 1 : 0); if (!nv++) { SetJsonValue(g, vp, jvp); continue; } else - SetJsonValue(g, MulVal, jvp); + SetJsonValue(g, mulval, jvp); - if (!MulVal->IsNull()) { + if (!mulval->IsNull()) { switch (op) { case OP_CNC: - if (Nodes[n].CncVal) { - val[0] = Nodes[n].CncVal; + if (nodes[n].CncVal) { + val[0] = nodes[n].CncVal; err = vp->Compute(g, val, 1, op); } // endif CncVal - val[0] = MulVal; + val[0] = mulval; err = vp->Compute(g, val, 1, op); break; - // case OP_NUM: + // case OP_NUM: case OP_SEP: - val[0] = Nodes[n].Valp; - val[1] = MulVal; + val[0] = nodes[n].Valp; + val[1] = mulval; err = vp->Compute(g, val, 2, OP_ADD); break; default: - val[0] = Nodes[n].Valp; - val[1] = MulVal; + val[0] = nodes[n].Valp; + val[1] = mulval; err = vp->Compute(g, val, 2, op); } // endswitch Op @@ -991,24 +983,23 @@ PVAL BCUTIL::CalculateArray(PBVAL arp, int n) } // endif Null - } while (Tjp->NextSame > nextsame); + } while (Tp->NextSame > nextsame); } // endfor i if (op == OP_SEP) { // Calculate average - MulVal->SetValue(nv); + mulval->SetValue(nv); val[0] = vp; - val[1] = MulVal; + val[1] = mulval; if (vp->Compute(g, val, 2, OP_DIV)) vp->Reset(); } // endif Op - Tjp->NextSame = nextsame; + Tp->NextSame = nextsame; return vp; -#endif // 0 } // end of CalculateArray /***********************************************************************/ @@ -1031,7 +1022,7 @@ PBVAL BCUTIL::GetRow(PGLOBAL g) // Expected Array was not there, wrap the value continue; - val = GetKeyValue(MPP(row->To_Val), nodes[i].Key); + val = GetKeyValue(row, nodes[i].Key); break; case TYPE_JAR: arp = row; @@ -1058,7 +1049,7 @@ PBVAL BCUTIL::GetRow(PGLOBAL g) } // endswitch Type if (val) { - row = GetBson(val); + row = val; } else { // Construct missing objects for (i++; row && i < nod; i++) { @@ -1071,9 +1062,9 @@ PBVAL BCUTIL::GetRow(PGLOBAL g) nwr = NewVal(TYPE_JOB); if (row->Type == TYPE_JOB) { - SetKeyValue(MPP(row->To_Val), MOF(nwr), nodes[i - 1].Key); + SetKeyValue(row, MOF(nwr), nodes[i - 1].Key); } else if (row->Type == TYPE_JAR) { - AddArrayValue(MVP(row->To_Val), nwr); + AddArrayValue(row, nwr); } else { strcpy(g->Message, "Wrong type when writing new row"); nwr = NULL; @@ -1157,7 +1148,8 @@ bool BSONDEF::DefineAM(PGLOBAL g, LPCSTR am, int poff) /***********************************************************************/ /* GetTable: makes a new Table Description Block. */ /***********************************************************************/ -PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) { +PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) +{ if (trace(1)) htrc("BSON GetTable Pretty=%d Uri=%s\n", Pretty, SVP(Uri)); @@ -1169,7 +1161,6 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) { // JSN not used for pretty=1 for insert or delete if (Pretty <= 0 || (Pretty == 1 && (m == MODE_READ || m == MODE_UPDATE))) { - PGLOBAL G; USETEMP tmp = UseTemp(); bool map = Mapped && Pretty >= 0 && m != MODE_INSERT && !(tmp != TMP_NO && m == MODE_UPDATE) && @@ -1178,14 +1169,7 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) { if (Lrecl) { // Allocate the parse work memory - G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL)); - memset(G, 0, sizeof(GLOBAL)); - G->Sarea_Size = (size_t)Lrecl * 6; - G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); - PlugSubSet(G->Sarea, G->Sarea_Size); - G->jump_level = 0; -// ((TDBBSN*)tdbp)->G = G; -// ((TDBBSN*)tdbp)->Docp = new(g) BDOC(G->Sarea); + G = PlugInit(NULL, (size_t)Lrecl * 6); } else { strcpy(g->Message, "LRECL is not defined"); return NULL; @@ -1252,7 +1236,7 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) { txfp = new(g) DOSFAM(this); // Txfp must be set for TDBBSN - tdbp = new(g) TDBBSN(G, this, txfp); + tdbp = new(g) TDBBSN(g, this, txfp); } else { if (Zipped) { #if defined(ZIP_SUPPORT) @@ -1366,7 +1350,8 @@ PTDB TDBBSN::Clone(PTABS t) /***********************************************************************/ /* Allocate JSN column description block. */ /***********************************************************************/ -PCOL TDBBSN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) { +PCOL TDBBSN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) +{ PBSCOL colp = new(g) BSONCOL(g, cdp, this, cprec, n); return (colp->ParseJpath(g)) ? NULL : colp; @@ -1375,7 +1360,8 @@ PCOL TDBBSN::MakeCol(PGLOBAL g, PCOLDEF cdp, PCOL cprec, int n) { /***********************************************************************/ /* InsertSpecialColumn: Put a special column ahead of the column list.*/ /***********************************************************************/ -PCOL TDBBSN::InsertSpecialColumn(PCOL colp) { +PCOL TDBBSN::InsertSpecialColumn(PCOL colp) +{ if (!colp->IsSpecial()) return NULL; @@ -1390,7 +1376,8 @@ PCOL TDBBSN::InsertSpecialColumn(PCOL colp) { /***********************************************************************/ /* JSON Cardinality: returns table size in number of rows. */ /***********************************************************************/ -int TDBBSN::Cardinality(PGLOBAL g) { +int TDBBSN::Cardinality(PGLOBAL g) +{ if (!g) return 0; else if (Cardinal < 0) { @@ -1404,7 +1391,8 @@ int TDBBSN::Cardinality(PGLOBAL g) { /***********************************************************************/ /* JSON GetMaxSize: returns file size estimate in number of lines. */ /***********************************************************************/ -int TDBBSN::GetMaxSize(PGLOBAL g) { +int TDBBSN::GetMaxSize(PGLOBAL g) +{ if (MaxSize < 0) MaxSize = TDBDOS::GetMaxSize(g) * ((Xcol) ? Limit : 1); @@ -1414,7 +1402,8 @@ int TDBBSN::GetMaxSize(PGLOBAL g) { /***********************************************************************/ /* JSON EstimatedLength. Returns an estimated minimum line length. */ /***********************************************************************/ -int TDBBSN::EstimatedLength(void) { +int TDBBSN::EstimatedLength(void) +{ if (AvgLen <= 0) return (Lrecl ? Lrecl : 1024) / 8; // TODO: make it better else @@ -1425,7 +1414,8 @@ int TDBBSN::EstimatedLength(void) { /***********************************************************************/ /* OpenDB: Data Base open routine for JSN access method. */ /***********************************************************************/ -bool TDBBSN::OpenDB(PGLOBAL g) { +bool TDBBSN::OpenDB(PGLOBAL g) +{ if (Use == USE_OPEN) { /*******************************************************************/ /* Table already open replace it at its beginning. */ @@ -1437,13 +1427,11 @@ bool TDBBSN::OpenDB(PGLOBAL g) { /*******************************************************************/ /* First opening. */ /*******************************************************************/ -// Docp = new(g) BDOC(g->Sarea); - if (Mode == MODE_INSERT) switch (Jmode) { -// case MODE_OBJECT: Row = new(g) JOBJECT; break; -// case MODE_ARRAY: Row = new(g) JARRAY; break; -// case MODE_VALUE: Row = new(g) JVALUE; break; + case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break; + case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break; + case MODE_VALUE: Row = Bp->NewVal(TYPE_JVAL); break; default: sprintf(g->Message, "Invalid Jmode %d", Jmode); return true; @@ -1507,7 +1495,8 @@ bool TDBBSN::OpenDB(PGLOBAL g) { /* This is called from TDBDOS::OpenDB and must be executed before */ /* Kindex construction if the file is accessed using an index. */ /***********************************************************************/ -bool TDBBSN::SkipHeader(PGLOBAL g) { +bool TDBBSN::SkipHeader(PGLOBAL g) +{ int len = GetFileLength(g); bool rc = false; @@ -1531,7 +1520,8 @@ bool TDBBSN::SkipHeader(PGLOBAL g) { /***********************************************************************/ /* ReadDB: Data Base read routine for JSN access method. */ /***********************************************************************/ -int TDBBSN::ReadDB(PGLOBAL g) { +int TDBBSN::ReadDB(PGLOBAL g) +{ int rc; N++; @@ -1589,7 +1579,8 @@ int TDBBSN::ReadDB(PGLOBAL g) { /***********************************************************************/ /* PrepareWriting: Prepare the line for WriteDB. */ /***********************************************************************/ -bool TDBBSN::PrepareWriting(PGLOBAL g) { +bool TDBBSN::PrepareWriting(PGLOBAL g) +{ PSZ s; if (!(Top = Bp->MakeTopTree(g, Row))) @@ -1629,10 +1620,10 @@ int TDBBSN::WriteDB(PGLOBAL g) { void TDBBSN::CloseDB(PGLOBAL g) { TDBDOS::CloseDB(g); - ((PBDEF)To_Def)->G = PlugExit(((PBDEF)To_Def)->G); + Bp->G = PlugExit(Bp->G); } // end of CloseDB - /* ---------------------------- BSONCOL ------------------------------ */ +/* ---------------------------- BSONCOL ------------------------------ */ /***********************************************************************/ /* BSONCOL public constructor. */ @@ -1689,7 +1680,8 @@ bool BSONCOL::SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check) /***********************************************************************/ /* Check whether this object is expanded. */ /***********************************************************************/ -bool BSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b) { +bool BSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b) +{ if ((Tbp->Xcol && nm && !strcmp(nm, Tbp->Xcol) && (Tbp->Xval < 0 || Tbp->Xval == i)) || Xpd) { Xpd = true; // Expandable object @@ -1705,7 +1697,8 @@ bool BSONCOL::CheckExpand(PGLOBAL g, int i, PSZ nm, bool b) { /***********************************************************************/ /* Analyse array processing options. */ /***********************************************************************/ -bool BSONCOL::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) { +bool BSONCOL::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) +{ int n; bool dg = true, b = false; PJNODE jnp = &Nodes[i]; @@ -1838,7 +1831,8 @@ bool BSONCOL::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) { /* when creating the table. It permits to indicate the position of */ /* the node corresponding to that column. */ /***********************************************************************/ -bool BSONCOL::ParseJpath(PGLOBAL g) { +bool BSONCOL::ParseJpath(PGLOBAL g) +{ char* p, * p1 = NULL, * p2 = NULL, * pbuf = NULL; int i; bool a; @@ -1925,7 +1919,8 @@ fin: /***********************************************************************/ /* Get Jpath converted to Mongo path. */ /***********************************************************************/ -PSZ BSONCOL::GetJpath(PGLOBAL g, bool proj) { +PSZ BSONCOL::GetJpath(PGLOBAL g, bool proj) +{ if (Jpath) { char* p1, * p2, * mgopath; int i = 0; @@ -1989,36 +1984,11 @@ PSZ BSONCOL::GetJpath(PGLOBAL g, bool proj) { } // end of GetJpath -/***********************************************************************/ -/* MakeJson: Serialize the json item and set value to it. */ -/***********************************************************************/ -PVAL BSONCOL::MakeBson(PGLOBAL g, PBVAL jsp) { - if (Value->IsTypeNum()) { - strcpy(g->Message, "Cannot make Json for a numeric column"); - Value->Reset(); -#if 0 - } else if (Value->GetType() == TYPE_BIN) { - if ((unsigned)Value->GetClen() >= sizeof(BSON)) { - ulong len = Tjp->Lrecl ? Tjp->Lrecl : 500; - PBSON bsp = JbinAlloc(g, NULL, len, jsp); - - strcat(bsp->Msg, " column"); - ((BINVAL*)Value)->SetBinValue(bsp, sizeof(BSON)); - } else { - strcpy(g->Message, "Column size too small"); - Value->SetValue_char(NULL, 0); - } // endif Clen -#endif 0 - } else - Value->SetValue_psz(Cp->SerialVal(g, jsp, 0)); - - return Value; -} // end of MakeJson - /***********************************************************************/ /* ReadColumn: */ /***********************************************************************/ -void BSONCOL::ReadColumn(PGLOBAL g) { +void BSONCOL::ReadColumn(PGLOBAL g) +{ if (!Tbp->SameRow || Xnod >= Tbp->SameRow) Value->SetValue_pval(Cp->GetColumnValue(g, Tbp->Row, 0)); @@ -2034,7 +2004,8 @@ void BSONCOL::ReadColumn(PGLOBAL g) { /***********************************************************************/ /* WriteColumn: */ /***********************************************************************/ -void BSONCOL::WriteColumn(PGLOBAL g) { +void BSONCOL::WriteColumn(PGLOBAL g) +{ if (Xpd && Tbp->Pretty < 2) { strcpy(g->Message, "Cannot write expanded column when Pretty is not 2"); throw 666; @@ -2052,75 +2023,48 @@ void BSONCOL::WriteColumn(PGLOBAL g) { if (Value->IsNull() && Tbp->Mode == MODE_INSERT) return; - throw "Write BSON NIY"; - -#if 0 - char* s; - PBPR objp = NULL; - PBVAL arp = NULL; - PBVAL jvp = NULL; - PBVAL jsp, row = Cp->GetRow(); - - switch (row->Type) { - case TYPE_JOB: objp = (PJOB)row; break; - case TYPE_JAR: arp = (PJAR)row; break; - case TYPE_JVAL: jvp = (PJVAL)row; break; - default: row = NULL; // ??????????????????????????? - } // endswitch Type + PBVAL jsp, row = Cp->GetRow(g); if (row) switch (Buf_Type) { case TYPE_STRING: - if (Nodes[Nod - 1].Op == OP_XX) { - s = Value->GetCharValue(); - - if (!(jsp = ParseJson(G, s, strlen(s)))) { - strcpy(g->Message, s); - throw 666; - } // endif jsp - - if (arp) { - if (Nod > 1 && Nodes[Nod - 2].Op == OP_EQ) - arp->SetArrayValue(G, new(G) JVALUE(jsp), Nodes[Nod - 2].Rank); - else - arp->AddArrayValue(G, new(G) JVALUE(jsp)); - - arp->InitArray(G); - } else if (objp) { - if (Nod > 1 && Nodes[Nod - 2].Key) - objp->SetKeyValue(G, new(G) JVALUE(jsp), Nodes[Nod - 2].Key); - - } else if (jvp) - jvp->SetValue(jsp); - - break; - } // endif Op - - // fall through case TYPE_DATE: case TYPE_INT: case TYPE_TINY: case TYPE_SHORT: case TYPE_BIGINT: case TYPE_DOUBLE: - if (arp) { + if (Buf_Type == TYPE_STRING && Nodes[Nod - 1].Op == OP_XX) { + char *s = Value->GetCharValue(); + + if (!(jsp = Cp->ParseJson(g, s, strlen(s)))) { + strcpy(g->Message, s); + throw 666; + } // endif jsp + + } else + jsp = Cp->NewVal(Value); + + switch (row->Type) { + case TYPE_JAR: if (Nodes[Nod - 1].Op == OP_EQ) - arp->SetArrayValue(G, new(G) JVALUE(G, Value), Nodes[Nod - 1].Rank); + Cp->SetArrayValue(row, jsp, Nodes[Nod - 1].Rank); else - arp->AddArrayValue(G, new(G) JVALUE(G, Value)); + Cp->AddArrayValue(row, jsp); - arp->InitArray(G); - } else if (objp) { + case TYPE_JOB: if (Nodes[Nod - 1].Key) - objp->SetKeyValue(G, new(G) JVALUE(G, Value), Nodes[Nod - 1].Key); + Cp->SetKeyValue(row, jsp, Nodes[Nod - 1].Key); - } else if (jvp) - jvp->SetValue(g, Value); + break; + case TYPE_JVAL: + default: + Cp->SetValueVal(row, jsp); + } // endswitch Type break; default: // ?????????? sprintf(g->Message, "Invalid column type %d", Buf_Type); } // endswitch Type -#endif // 0 } // end of WriteColumn @@ -2169,7 +2113,7 @@ int TDBBSON::MakeNewDoc(PGLOBAL g) // Create a void table that will be populated Docp = Bp->NewVal(TYPE_JAR); - if (Bp->MakeTopTree(g, Docp)) + if (!(Top = Bp->MakeTopTree(g, Docp))) return RC_FX; Done = true; @@ -2187,7 +2131,7 @@ int TDBBSON::MakeDocument(PGLOBAL g) my_bool a; MODE mode = Mode; PBVAL jsp; - PBPR objp = NULL; + PBVAL objp = NULL; PBVAL arp = NULL; PBVAL val = NULL; @@ -2260,7 +2204,7 @@ int TDBBSON::MakeDocument(PGLOBAL g) } // endif Type key = p; - objp = Bp->GetObject(jsp); + objp = jsp; arp = NULL; val = Bp->GetKeyValue(objp, key); @@ -2285,7 +2229,7 @@ int TDBBSON::MakeDocument(PGLOBAL g) return RC_FX; } // endif Type - arp = Bp->GetArray(jsp); + arp = jsp; objp = NULL; i = atoi(p) - B; val = Bp->GetArrayValue(arp, i); @@ -2449,9 +2393,9 @@ bool TDBBSON::OpenDB(PGLOBAL g) if (Mode == MODE_INSERT) switch (Jmode) { - case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break; - case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break; - case MODE_VALUE: Row = Bp->NewVal(); break; + case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break; + case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break; + case MODE_VALUE: Row = Bp->NewVal(TYPE_JVAL); break; default: sprintf(g->Message, "Invalid Jmode %d", Jmode); return true; @@ -2479,7 +2423,7 @@ int TDBBSON::ReadDB(PGLOBAL) M++; rc = RC_OK; } else if (++Fpos < (signed)Bp->GetSize(Docp)) { - Row = Bp->GetArrayValue(Bp->GetBson(Docp), Fpos); + Row = Bp->GetArrayValue(Docp, Fpos); if (Row->Type == TYPE_JVAL) Row = Bp->GetBson(Row); @@ -2498,32 +2442,17 @@ int TDBBSON::ReadDB(PGLOBAL) /***********************************************************************/ int TDBBSON::WriteDB(PGLOBAL g) { - if (Jmode == MODE_OBJECT) { - PBVAL vp = Bp->DupVal(Row); + if (Mode == MODE_INSERT) { + Bp->AddArrayValue(Docp, Row); - if (Mode == MODE_INSERT) { - Bp->AddArrayValue(Docp, vp); - Row = Bp->NewVal(TYPE_JOB); - } else if (Bp->SetArrayValue(Docp, vp, Fpos)) - return RC_FX; + switch(Jmode) { + case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break; + case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break; + default: Row = Bp->NewVal(); break; + } // endswitch Jmode - } else if (Jmode == MODE_ARRAY) { - PBVAL vp = Bp->DupVal(Row); - - if (Mode == MODE_INSERT) { - Bp->AddArrayValue(Docp, vp); - Row = Bp->NewVal(TYPE_JAR); - } else if (Bp->SetArrayValue(Docp, vp, Fpos)) - return RC_FX; - - } else { // if (Jmode == MODE_VALUE) - if (Mode == MODE_INSERT) { - Bp->AddArrayValue(Docp, Row); - Row = Bp->NewVal(); - } else if (Bp->SetArrayValue(Docp, Row, Fpos)) - return RC_FX; - - } // endif Jmode + } else + Bp->SetArrayValue(Docp, Row, Fpos); Changed = true; return RC_OK; @@ -2534,26 +2463,15 @@ int TDBBSON::WriteDB(PGLOBAL g) /***********************************************************************/ int TDBBSON::DeleteDB(PGLOBAL g, int irc) { - strcpy(g->Message, "BSON Delete NIY"); - return RC_FX; -#if 0 - if (irc == RC_OK) { + if (irc == RC_OK) // Deleted current row - if (Doc->DeleteValue(Fpos)) { - sprintf(g->Message, "Value %d does not exist", Fpos + 1); - return RC_FX; - } // endif Delete - - Changed = true; - } else if (irc == RC_FX) + Bp->DeleteValue(Docp, Fpos); + else if (irc == RC_FX) // Delete all - for (int i = 0; i < Doc->size(); i++) { - Doc->DeleteValue(i); - Changed = true; - } // endfor i + Docp->To_Val = 0; + Changed = true; return RC_OK; -#endif // 0 } // end of DeleteDB /***********************************************************************/ diff --git a/storage/connect/tabbson.h b/storage/connect/tabbson.h index 127370ce342..677bcbfd6e9 100644 --- a/storage/connect/tabbson.h +++ b/storage/connect/tabbson.h @@ -134,7 +134,7 @@ public: PVAL MakeBson(PGLOBAL g, PBVAL jsp); PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i); PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n); - PVAL CalculateArray(PBVAL arp, int n); + PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n); PBVAL GetRow(PGLOBAL g); protected: @@ -243,7 +243,7 @@ public: // Methods virtual bool SetBuffer(PGLOBAL g, PVAL value, bool ok, bool check); - bool ParseJpath(PGLOBAL g); + bool ParseJpath(PGLOBAL g); virtual PSZ GetJpath(PGLOBAL g, bool proj); virtual void ReadColumn(PGLOBAL g); virtual void WriteColumn(PGLOBAL g); @@ -251,12 +251,6 @@ public: protected: bool CheckExpand(PGLOBAL g, int i, PSZ nm, bool b); bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm); -//PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i); -//PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n); -//PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n); - PVAL MakeBson(PGLOBAL g, PBVAL jsp); -//void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL val); -//PBVAL GetRow(PGLOBAL g); // Default constructor not to be used BSONCOL(void) {} From 871532c3b9155fa00f7de61ef02f0c2d0f862d57 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 8 Dec 2020 01:15:40 +0100 Subject: [PATCH 036/150] - Continue BSON implementation modified: storage/connect/bson.cpp modified: storage/connect/bson.h modified: storage/connect/bsonudf.cpp modified: storage/connect/cmgfam.cpp modified: storage/connect/cmgfam.h modified: storage/connect/ha_connect.cc modified: storage/connect/jmgfam.cpp modified: storage/connect/jmgfam.h modified: storage/connect/jmgoconn.cpp modified: storage/connect/mycat.cc modified: storage/connect/tabbson.cpp modified: storage/connect/tabjson.cpp --- storage/connect/bson.cpp | 71 ++- storage/connect/bson.h | 7 +- storage/connect/bsonudf.cpp | 7 +- storage/connect/cmgfam.cpp | 49 +- storage/connect/cmgfam.h | 7 +- storage/connect/ha_connect.cc | 46 +- storage/connect/jmgfam.cpp | 41 +- storage/connect/jmgfam.h | 7 +- storage/connect/jmgoconn.cpp | 4 +- storage/connect/mycat.cc | 49 +- .../connect/mysql-test/connect/r/bson.result | 517 ++++++++++++++++++ .../connect/mysql-test/connect/t/bson.test | 294 ++++++++++ storage/connect/tabbson.cpp | 32 +- storage/connect/tabjson.cpp | 1 + 14 files changed, 1033 insertions(+), 99 deletions(-) create mode 100644 storage/connect/mysql-test/connect/r/bson.result create mode 100644 storage/connect/mysql-test/connect/t/bson.test diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index b4ce7ec1505..bf0755d4eae 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -108,7 +108,8 @@ BDOC::BDOC(PGLOBAL G) : BJSON(G, NULL) /* Parse a json string. */ /* Note: when pretty is not known, the caller set pretty to 3. */ /***********************************************************************/ -PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { +PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) +{ int i, pretty = (ptyp) ? *ptyp : 3; bool b = false; @@ -233,7 +234,8 @@ OFFSET BDOC::ParseAsArray(int& i, int pretty, int* ptyp) { /***********************************************************************/ /* Parse a JSON Array. */ /***********************************************************************/ -OFFSET BDOC::ParseArray(int& i) { +OFFSET BDOC::ParseArray(int& i) +{ int level = 0; bool b = (!i); PBVAL vlp, firstvlp, lastvlp; @@ -290,7 +292,8 @@ OFFSET BDOC::ParseArray(int& i) { /***********************************************************************/ /* Parse a JSON Object. */ /***********************************************************************/ -OFFSET BDOC::ParseObject(int& i) { +OFFSET BDOC::ParseObject(int& i) +{ OFFSET key; int level = 0; PBPR bpp, firstbpp, lastbpp; @@ -439,7 +442,8 @@ err: /***********************************************************************/ /* Unescape and parse a JSON string. */ /***********************************************************************/ -OFFSET BDOC::ParseString(int& i) { +OFFSET BDOC::ParseString(int& i) +{ uchar* p; int n = 0; @@ -448,13 +452,13 @@ OFFSET BDOC::ParseString(int& i) { throw("ParseString: Out of memory"); // The size to allocate is not known yet - p = (uchar*)PlugSubAlloc(G, NULL, 0); + p = (uchar*)BsonSubAlloc(0); for (; i < len; i++) switch (s[i]) { case '"': p[n++] = 0; - PlugSubAlloc(G, NULL, n); + BsonSubAlloc(n); return MOF(p); case '\\': if (++i < len) { @@ -525,7 +529,8 @@ throw("Unexpected EOF in String"); /***********************************************************************/ /* Parse a JSON numeric value. */ /***********************************************************************/ -void BDOC::ParseNumeric(int& i, PBVAL vlp) { +void BDOC::ParseNumeric(int& i, PBVAL vlp) +{ char buf[50]; int n = 0; short nd = 0; @@ -580,7 +585,7 @@ fin: if (has_dot || has_e) { double dv = strtod(buf, NULL); - if (nd > 6) { + if (nd > 5 || dv > FLT_MAX || dv < FLT_MIN) { double* dvp = (double*)PlugSubAlloc(G, NULL, sizeof(double)); *dvp = dv; @@ -620,7 +625,8 @@ err: /***********************************************************************/ /* Serialize a BJSON document tree: */ /***********************************************************************/ -PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty) { +PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty) +{ PSZ str = NULL; bool b = false, err = true; FILE* fs = NULL; @@ -697,7 +703,8 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty) { /***********************************************************************/ /* Serialize a JSON Array. */ /***********************************************************************/ -bool BDOC::SerializeArray(OFFSET arp, bool b) { +bool BDOC::SerializeArray(OFFSET arp, bool b) +{ bool first = true; PBVAL vp = MVP(arp); @@ -740,7 +747,8 @@ bool BDOC::SerializeArray(OFFSET arp, bool b) { /***********************************************************************/ /* Serialize a JSON Object. */ /***********************************************************************/ -bool BDOC::SerializeObject(OFFSET obp) { +bool BDOC::SerializeObject(OFFSET obp) +{ bool first = true; PBPR prp = MPP(obp); @@ -768,7 +776,8 @@ bool BDOC::SerializeObject(OFFSET obp) { /***********************************************************************/ /* Serialize a JSON Value. */ /***********************************************************************/ -bool BDOC::SerializeValue(PBVAL jvp) { +bool BDOC::SerializeValue(PBVAL jvp) +{ char buf[64]; if (jvp) switch (jvp->Type) { @@ -833,7 +842,22 @@ void* BJSON::BsonSubAlloc(size_t size) xtrc(16, "Done memp=%p used=%zd free=%zd\n", memp, pph->To_Free, pph->FreeBlk); return memp; -} /* end of BsonSubAlloc */ +} // end of BsonSubAlloc + +/*********************************************************************************/ +/* Program for SubSet re-initialization of the memory pool. */ +/*********************************************************************************/ +OFFSET BJSON::DupStr(PSZ str) +{ + if (str) { + PSZ sm = (PSZ)BsonSubAlloc(strlen(str) + 1); + + strcpy(sm, str); + return MOF(sm); + } else + return NULL; + +} // end of DupStr /*********************************************************************************/ /* Program for SubSet re-initialization of the memory pool. */ @@ -888,7 +912,7 @@ void BJSON::AddPair(PBVAL bop, PSZ key, OFFSET val) { CheckType(bop, TYPE_JOB); PBPR brp; - OFFSET nrp = MOF(SubAllocPair(key, val)); + OFFSET nrp = NewPair(key, val); if (bop->To_Val) { for (brp = GetObject(bop); brp->Next; brp = GetNext(brp)); @@ -948,8 +972,9 @@ PBVAL BJSON::GetKeyValue(PBVAL bop, PSZ key) PSZ BJSON::GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text) { CheckType(bop, TYPE_JOB); + PBPR brp = GetObject(bop); - if (bop->To_Val) { + if (brp) { bool b; if (!text) { @@ -962,12 +987,11 @@ PSZ BJSON::GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text) b = false; } // endif text -#if 0 - if (b && !bop->Next && !strcmp(MZP(bop->Key), "$date")) { + if (b && !brp->Next && !strcmp(MZP(brp->Key), "$date")) { int i; PSZ s; - First->Val->GetText(g, text); + GetValueText(g, MVP(brp->Vlp), text); s = text->GetStr(); i = (s[1] == '-' ? 2 : 1); @@ -983,10 +1007,7 @@ PSZ BJSON::GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text) } // endif text - } else -#endif // 0 - - for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) { + } else for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) { GetValueText(g, GetVal(brp), text); if (brp->Next) @@ -1021,10 +1042,10 @@ void BJSON::SetKeyValue(PBVAL bop, OFFSET bvp, PSZ key) prp = brp; if (!brp) - prp->Next = MOF(SubAllocPair(key, bvp)); + prp->Next = NewPair(key, bvp); } else - bop->To_Val = MOF(SubAllocPair(key, bvp)); + bop->To_Val = NewPair(key, bvp); bop->Nd++; } // end of SetKeyValue @@ -1301,7 +1322,7 @@ PBVAL BJSON::SubAllocStr(OFFSET toval, short nd) bvp->To_Val = toval; bvp->Nd = nd; return bvp; -} // end of SubAllocVal +} // end of SubAllocStr /***********************************************************************/ /* Allocate a BVALUE with a given string or numeric value. */ diff --git a/storage/connect/bson.h b/storage/connect/bson.h index aceaed5eb7c..03d5b9bd854 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -81,8 +81,8 @@ public: // SubAlloc functions void* BsonSubAlloc(size_t size); PBPR SubAllocPair(OFFSET key, OFFSET val = 0); - PBPR SubAllocPair(PSZ key, OFFSET val = 0) - {return SubAllocPair(MOF(key), val);} + OFFSET NewPair(PSZ key, OFFSET val = 0) + {return MOF(SubAllocPair(DupStr(key), val));} PBVAL NewVal(int type = TYPE_NULL); PBVAL NewVal(PVAL valp); PBVAL SubAllocVal(OFFSET toval, int type = TYPE_NULL, short nd = 0); @@ -90,8 +90,9 @@ public: {return SubAllocVal(MOF(toval), type, nd);} PBVAL SubAllocStr(OFFSET str, short nd = 0); PBVAL SubAllocStr(PSZ str, short nd = 0) - {return SubAllocStr(MOF(str), nd);} + {return SubAllocStr(DupStr(str), nd);} PBVAL DupVal(PBVAL bvp); + OFFSET DupStr(PSZ str); // Array functions inline PBVAL GetArray(PBVAL vlp) {return MVP(vlp->To_Val);} diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 356a5c5169f..5163f042ad0 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -693,13 +693,8 @@ PBVAL BJNX::GetRow(PGLOBAL g) for (i++; row && i < Nod; i++) { if (Nodes[i].Op == OP_XX) break; - // else if (!Nodes[i].Key) - // Construct intermediate array - // nwr = SubAllocVal(g); - // else - // nwr = SubAllocPair(g); - // Construct new row + // Construct new row nwr = NewVal(); if (row->Type == TYPE_JOB) { diff --git a/storage/connect/cmgfam.cpp b/storage/connect/cmgfam.cpp index 579b5b919a7..690c087c2bb 100644 --- a/storage/connect/cmgfam.cpp +++ b/storage/connect/cmgfam.cpp @@ -1,11 +1,11 @@ /************** CMGFAM C++ Program Source Code File (.CPP) *************/ /* PROGRAM NAME: cmgfam.cpp */ /* ------------- */ -/* Version 1.4 */ +/* Version 1.5 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 20017 */ +/* (C) Copyright to the author Olivier BERTRAND 20017 - 2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ @@ -29,7 +29,11 @@ #include "reldef.h" #include "filamtxt.h" #include "tabdos.h" +#if defined(BSON_SUPPORT) +#include "tabbson.h" +#else #include "tabjson.h" +#endif // BSON_SUPPORT #include "cmgfam.h" #if defined(UNIX) || defined(UNIV_LINUX) @@ -53,6 +57,7 @@ CMGFAM::CMGFAM(PJDEF tdp) : DOSFAM((PDOSDEF)NULL) Pcg.Options = tdp->Options; Pcg.Filter = tdp->Filter; Pcg.Pipe = tdp->Pipe && tdp->Options != NULL; + Lrecl = tdp->Lrecl + tdp->Ending; } else { Pcg.Uristr = NULL; Pcg.Db_name = NULL; @@ -60,21 +65,55 @@ CMGFAM::CMGFAM(PJDEF tdp) : DOSFAM((PDOSDEF)NULL) Pcg.Options = NULL; Pcg.Filter = NULL; Pcg.Pipe = false; + Lrecl = 0; } // endif tdp To_Fbt = NULL; Mode = MODE_ANY; Done = false; - Lrecl = tdp->Lrecl + tdp->Ending; } // end of CMGFAM standard constructor - CMGFAM::CMGFAM(PCMGFAM tdfp) : DOSFAM(tdfp) +#if defined(BSON_SUPPORT) + /***********************************************************************/ +/* Constructors. */ +/***********************************************************************/ +CMGFAM::CMGFAM(PBDEF tdp) : DOSFAM((PDOSDEF)NULL) { + Cmgp = NULL; + Pcg.Tdbp = NULL; + + if (tdp) { + Pcg.Uristr = tdp->Uri; + Pcg.Db_name = tdp->Schema; + Pcg.Coll_name = tdp->Collname; + Pcg.Options = tdp->Options; + Pcg.Filter = tdp->Filter; + Pcg.Pipe = tdp->Pipe && tdp->Options != NULL; + Lrecl = tdp->Lrecl + tdp->Ending; + } else { + Pcg.Uristr = NULL; + Pcg.Db_name = NULL; + Pcg.Coll_name = NULL; + Pcg.Options = NULL; + Pcg.Filter = NULL; + Pcg.Pipe = false; + Lrecl = 0; + } // endif tdp + + To_Fbt = NULL; + Mode = MODE_ANY; + Done = false; +} // end of CMGFAM standard constructor +#endif // BSON_SUPPORT + +CMGFAM::CMGFAM(PCMGFAM tdfp) : DOSFAM(tdfp) +{ + Cmgp = tdfp->Cmgp; Pcg = tdfp->Pcg; To_Fbt = tdfp->To_Fbt; Mode = tdfp->Mode; Done = tdfp->Done; - } // end of CMGFAM copy constructor +} // end of CMGFAM copy constructor /***********************************************************************/ /* Reset: reset position values at the beginning of file. */ diff --git a/storage/connect/cmgfam.h b/storage/connect/cmgfam.h index 7571f5c5309..9c5f91f0d23 100644 --- a/storage/connect/cmgfam.h +++ b/storage/connect/cmgfam.h @@ -1,7 +1,7 @@ /*************** CMGFam H Declares Source Code File (.H) ***************/ -/* Name: cmgfam.h Version 1.5 */ +/* Name: cmgfam.h Version 1.6 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2017 - 2020 */ /* */ /* This file contains the MongoDB access method classes declares. */ /***********************************************************************/ @@ -20,6 +20,9 @@ class DllExport CMGFAM : public DOSFAM { public: // Constructor CMGFAM(PJDEF tdp); +#if defined(BSON_SUPPORT) + CMGFAM(PBDEF tdp); +#endif // BSON_SUPPORT CMGFAM(PCMGFAM txfp); // Implementation diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 95f885c65b4..6728550447c 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -170,7 +170,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.07.0002 December 02, 2020"; + char version[]= "Version 1.07.0002 December 07, 2020"; #if defined(__WIN__) char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__; char slash= '\\'; @@ -230,9 +230,9 @@ char *GetUserVariable(PGLOBAL g, const uchar *varname) PQRYRES OEMColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); PQRYRES VirColumns(PGLOBAL g, bool info); PQRYRES JSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info); -#ifdef DEVELOPMENT +#ifdef BSON_SUPPORT PQRYRES BSONColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt, bool info); -#endif // DEVEOPMENT +#endif // BSON_SUPPORT PQRYRES XMLColumns(PGLOBAL g, char *db, char *tab, PTOS topt, bool info); #if defined(REST_SUPPORT) PQRYRES RESTColumns(PGLOBAL g, PTOS topt, char *tab, char *db, bool info); @@ -259,6 +259,9 @@ bool JsonAllPath(void); char *GetJsonNull(void); uint GetJsonGrpSize(void); char *GetJavaWrapper(void); +#if defined(BSON_SUPPORT) +bool Force_Bson(void); +#endif // BSON_SUPPORT size_t GetWorkSize(void); void SetWorkSize(size_t); extern "C" const char *msglang(void); @@ -444,6 +447,13 @@ static MYSQL_THDVAR_BOOL(enable_mongo, PLUGIN_VAR_RQCMDARG, #endif // !version 2,3 #endif // JAVA_SUPPORT || CMGO_SUPPORT +#if defined(BSON_SUPPORT) +// Force using BSON for JSON tables +static MYSQL_THDVAR_BOOL(force_bson, PLUGIN_VAR_RQCMDARG, + "Force using BSON for JSON tables", + NULL, NULL, 0); // NO by default +#endif // BSON_SUPPORT + #if defined(XMSG) || defined(NEWMSG) const char *language_names[]= { @@ -506,6 +516,8 @@ char *GetJavaWrapper(void) bool MongoEnabled(void) {return THDVAR(current_thd, enable_mongo);} #endif // JAVA_SUPPORT || CMGO_SUPPORT +bool Force_Bson(void) {return THDVAR(current_thd, force_bson);} + #if defined(XMSG) || defined(NEWMSG) extern "C" const char *msglang(void) {return language_names[THDVAR(current_thd, msg_lang)];} @@ -4516,9 +4528,9 @@ bool ha_connect::check_privileges(THD *thd, PTOS options, char *dbn, bool quick) case TAB_VEC: case TAB_REST: case TAB_JSON: -#if defined(DEVELOPMENT) +#if defined(BSON_SUPPORT) case TAB_BSON: -#endif // DEVELOPMENT +#endif // BSON_SUPPORT if (options->filename && *options->filename) { if (!quick) { char path[FN_REFLEN], dbpath[FN_REFLEN]; @@ -5444,7 +5456,10 @@ static bool add_field(String* sql, TABTYPE ttp, const char* field_name, int typ, if (fmt && *fmt) { switch (ttp) { case TAB_JSON: error |= sql->append(" JPATH='"); break; - case TAB_XML: error |= sql->append(" XPATH='"); break; +#if defined(BSON_SUPPORT) + case TAB_BSON: error |= sql->append(" JPATH='"); break; +#endif // BSON_SUPPORT + case TAB_XML: error |= sql->append(" XPATH='"); break; default: error |= sql->append(" FIELD_FORMAT='"); } // endswitch ttp @@ -5686,9 +5701,9 @@ static int connect_assisted_discovery(handlerton *, THD* thd, } else if (topt->http) { switch (ttp) { case TAB_JSON: -#if defined(DEVELOPMENT) +#if defined(BSON_SUPPORT) case TAB_BSON: -#endif // DEVELOPMENT +#endif // BSON_SUPPORT case TAB_XML: case TAB_CSV: ttp = TAB_REST; @@ -5873,9 +5888,9 @@ static int connect_assisted_discovery(handlerton *, THD* thd, case TAB_XML: #endif // LIBXML2_SUPPORT || DOMDOC_SUPPORT case TAB_JSON: -#if defined(DEVELOPMENT) +#if defined(BSON_SUPPORT) case TAB_BSON: -#endif // DEVELOPMENT +#endif // BSON_SUPPORT dsn= strz(g, create_info->connect_string); if (!fn && !zfn && !mul && !dsn) @@ -6040,13 +6055,15 @@ static int connect_assisted_discovery(handlerton *, THD* thd, qrp= VirColumns(g, fnc == FNC_COL); break; case TAB_JSON: +#if !defined(FORCE_BSON) qrp= JSONColumns(g, db, dsn, topt, fnc == FNC_COL); break; -#if defined(DEVELOPMENT) +#endif // !FORCE_BSON +#if defined(BSON_SUPPORT) case TAB_BSON: qrp= BSONColumns(g, db, dsn, topt, fnc == FNC_COL); break; -#endif // DEVELOPMENT +#endif // BSON_SUPPORT #if defined(JAVA_SUPPORT) case TAB_MONGO: url= strz(g, create_info->connect_string); @@ -7426,7 +7443,10 @@ static struct st_mysql_sys_var* connect_system_variables[]= { MYSQL_SYSVAR(enable_mongo), #endif // JAVA_SUPPORT || CMGO_SUPPORT MYSQL_SYSVAR(cond_push), - NULL +#if defined(BSON_SUPPORT) + MYSQL_SYSVAR(force_bson), +#endif // BSON_SUPPORT + NULL }; maria_declare_plugin(connect) diff --git a/storage/connect/jmgfam.cpp b/storage/connect/jmgfam.cpp index 30f6279146d..2d45753ec63 100644 --- a/storage/connect/jmgfam.cpp +++ b/storage/connect/jmgfam.cpp @@ -1,15 +1,15 @@ /************ JMONGO FAM C++ Program Source Code File (.CPP) ***********/ /* PROGRAM NAME: jmgfam.cpp */ /* ------------- */ -/* Version 1.0 */ +/* Version 1.1 */ /* */ /* COPYRIGHT: */ /* ---------- */ -/* (C) Copyright to the author Olivier BERTRAND 20017 */ +/* (C) Copyright to the author Olivier BERTRAND 20017 - 2020 */ /* */ /* WHAT THIS PROGRAM DOES: */ /* ----------------------- */ -/* This program are the Java MongoDB access method classes. */ +/* This program are the Java MongoDB access method classes. */ /* */ /***********************************************************************/ @@ -49,7 +49,11 @@ #include "reldef.h" #include "filamtxt.h" #include "tabdos.h" +#if defined(BSON_SUPPORT) +#include "tabbson.h" +#else #include "tabjson.h" +#endif // BSON_SUPPORT #include "jmgfam.h" #if defined(UNIX) || defined(UNIV_LINUX) @@ -92,10 +96,38 @@ JMGFAM::JMGFAM(PJDEF tdp) : DOSFAM((PDOSDEF)NULL) Version = tdp->Version; Lrecl = tdp->Lrecl + tdp->Ending; Curpos = 0; -} // end of JMGFAM standard constructor +} // end of JMGFAM Json standard constructor + +#if defined(BSON_SUPPORT) +JMGFAM::JMGFAM(PBDEF tdp) : DOSFAM((PDOSDEF)NULL) +{ + Jcp = NULL; + Ops.Driver = tdp->Schema; + Ops.Url = tdp->Uri; + Ops.User = NULL; + Ops.Pwd = NULL; + Ops.Scrollable = false; + Ops.Fsize = 0; + Ops.Version = tdp->Version; + To_Fbt = NULL; + Mode = MODE_ANY; + Uristr = tdp->Uri; + Db_name = tdp->Schema; + Coll_name = tdp->Collname; + Options = tdp->Options; + Filter = tdp->Filter; + Wrapname = tdp->Wrapname; + Done = false; + Pipe = tdp->Pipe; + Version = tdp->Version; + Lrecl = tdp->Lrecl + tdp->Ending; + Curpos = 0; +} // end of JMGFAM Bson standard constructor +#endif // BSON_SUPPORT JMGFAM::JMGFAM(PJMGFAM tdfp) : DOSFAM(tdfp) { + Jcp = tdfp->Jcp; //Client = tdfp->Client; //Database = NULL; //Collection = tdfp->Collection; @@ -114,6 +146,7 @@ JMGFAM::JMGFAM(PJMGFAM tdfp) : DOSFAM(tdfp) Done = tdfp->Done; Pipe = tdfp->Pipe; Version = tdfp->Version; + Curpos = tdfp->Curpos; } // end of JMGFAM copy constructor /***********************************************************************/ diff --git a/storage/connect/jmgfam.h b/storage/connect/jmgfam.h index 5c80d993833..c5d9d1f57e6 100644 --- a/storage/connect/jmgfam.h +++ b/storage/connect/jmgfam.h @@ -1,7 +1,7 @@ /************** MongoFam H Declares Source Code File (.H) **************/ -/* Name: jmgfam.h Version 1.0 */ +/* Name: jmgfam.h Version 1.1 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2017 */ +/* (C) Copyright to the author Olivier BERTRAND 2017 - 2020 */ /* */ /* This file contains the JAVA MongoDB access method classes declares */ /***********************************************************************/ @@ -25,6 +25,9 @@ class DllExport JMGFAM : public DOSFAM { public: // Constructor JMGFAM(PJDEF tdp); +#if defined(BSON_SUPPORT) + JMGFAM(PBDEF tdp); +#endif // BSON_SUPPORT JMGFAM(PJMGFAM txfp); // Implementation diff --git a/storage/connect/jmgoconn.cpp b/storage/connect/jmgoconn.cpp index c80800bd897..8a12fffbd05 100644 --- a/storage/connect/jmgoconn.cpp +++ b/storage/connect/jmgoconn.cpp @@ -121,7 +121,7 @@ JMgoConn::JMgoConn(PGLOBAL g, PCSZ collname, PCSZ wrapper) /***********************************************************************/ void JMgoConn::AddJars(PSTRG jpop, char sep) { -#if defined(DEVELOPMENT) +#if defined(BSON_SUPPORT) if (m_Version == 2) { jpop->Append(sep); // jpop->Append("C:/Eclipse/workspace/MongoWrap2/bin"); @@ -134,7 +134,7 @@ void JMgoConn::AddJars(PSTRG jpop, char sep) jpop->Append(sep); jpop->Append("C:/mongo-java-driver/mongo-java-driver-3.4.2.jar"); } // endif m_Version -#endif // DEVELOPMENT +#endif // BSON_SUPPORT } // end of AddJars /***********************************************************************/ diff --git a/storage/connect/mycat.cc b/storage/connect/mycat.cc index 476baf63039..e3fa00e119f 100644 --- a/storage/connect/mycat.cc +++ b/storage/connect/mycat.cc @@ -82,11 +82,11 @@ #endif // JAVA_SUPPORT #include "tabpivot.h" #include "tabvir.h" -#if defined(DEVELOPMENT) +#if defined(BSON_SUPPORT) #include "tabbson.h" #else #include "tabjson.h" -#endif // DEVELOPMENT +#endif // BSON_SUPPORT #include "ha_connect.h" #if defined(XML_SUPPORT) #include "tabxml.h" @@ -111,6 +111,9 @@ extern "C" HINSTANCE s_hModule; // Saved module handle #if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) bool MongoEnabled(void); #endif // JAVA_SUPPORT || CMGO_SUPPORT +#if defined(BSON_SUPPORT) +bool Force_Bson(void); +#endif // BSON_SUPPORT /***********************************************************************/ /* Get the plugin directory. */ @@ -134,25 +137,25 @@ TABTYPE GetTypeID(const char *type) : (!stricmp(type, "DBF")) ? TAB_DBF #if defined(XML_SUPPORT) : (!stricmp(type, "XML")) ? TAB_XML -#endif +#endif // XML_SUPPORT : (!stricmp(type, "INI")) ? TAB_INI : (!stricmp(type, "VEC")) ? TAB_VEC #if defined(ODBC_SUPPORT) : (!stricmp(type, "ODBC")) ? TAB_ODBC -#endif +#endif // ODBC_SUPPORT #if defined(JAVA_SUPPORT) : (!stricmp(type, "JDBC")) ? TAB_JDBC -#endif +#endif // JAVA_SUPPORT #if defined(JAVA_SUPPORT) || defined(CMGO_SUPPORT) : (!stricmp(type, "MONGO") && MongoEnabled()) ? TAB_MONGO -#endif +#endif // JAVA_SUPPORT || CMGO_SUPPORT : (!stricmp(type, "MYSQL")) ? TAB_MYSQL : (!stricmp(type, "MYPRX")) ? TAB_MYSQL : (!stricmp(type, "DIR")) ? TAB_DIR #if defined(__WIN__) : (!stricmp(type, "MAC")) ? TAB_MAC : (!stricmp(type, "WMI")) ? TAB_WMI -#endif +#endif // __WIN__ : (!stricmp(type, "TBL")) ? TAB_TBL : (!stricmp(type, "XCOL")) ? TAB_XCL : (!stricmp(type, "OCCUR")) ? TAB_OCCUR @@ -161,12 +164,12 @@ TABTYPE GetTypeID(const char *type) : (!stricmp(type, "PIVOT")) ? TAB_PIVOT : (!stricmp(type, "VIR")) ? TAB_VIR : (!stricmp(type, "JSON")) ? TAB_JSON -#if defined(DEVELOPMENT) +#if defined(BSON_SUPPORT) : (!stricmp(type, "BSON")) ? TAB_BSON -#endif +#endif // BSON_SUPPORT #if defined(ZIP_SUPPORT) : (!stricmp(type, "ZIP")) ? TAB_ZIP -#endif +#endif // ZIP_SUPPORT : (!stricmp(type, "OEM")) ? TAB_OEM : TAB_NIY; } // end of GetTypeID @@ -188,9 +191,9 @@ bool IsFileType(TABTYPE type) case TAB_INI: case TAB_VEC: case TAB_JSON: -#if defined(DEVELOPMENT) +#if defined(BSON_SUPPORT) case TAB_BSON: -#endif // DEVELOPMENT +#endif // BSON_SUPPORT case TAB_REST: // case TAB_ZIP: isfile= true; @@ -286,9 +289,9 @@ bool IsTypeIndexable(TABTYPE type) case TAB_VEC: case TAB_DBF: case TAB_JSON: -#if defined(DEVELOPMENT) +#if defined(BSON_SUPPORT) case TAB_BSON: -#endif // DEVELOPMENT +#endif // BSON_SUPPORT idx= true; break; default: @@ -315,9 +318,9 @@ int GetIndexType(TABTYPE type) case TAB_VEC: case TAB_DBF: case TAB_JSON: -#if defined(DEVELOPMENT) +#if defined(BSON_SUPPORT) case TAB_BSON: -#endif // DEVELOPMENT +#endif // BSON_SUPPORT xtyp= 1; break; case TAB_MYSQL: @@ -481,10 +484,18 @@ PTABDEF MYCAT::MakeTableDesc(PGLOBAL g, PTABLE tablep, LPCSTR am) case TAB_MYSQL: tdp= new(g) MYSQLDEF; break; case TAB_PIVOT: tdp= new(g) PIVOTDEF; break; case TAB_VIR: tdp= new(g) VIRDEF; break; - case TAB_JSON: tdp= new(g) JSONDEF; break; -#if defined(DEVELOPMENT) + case TAB_JSON: +#if defined(BSON_SUPPORT) + if (Force_Bson()) + tdp= new(g) BSONDEF; + else +#endif // BSON_SUPPORT + tdp= new(g) JSONDEF; + + break; +#if defined(BSON_SUPPORT) case TAB_BSON: tdp= new(g) BSONDEF; break; -#endif // DEVELOPMENT +#endif // BSON_SUPPORT #if defined(ZIP_SUPPORT) case TAB_ZIP: tdp= new(g) ZIPDEF; break; #endif // ZIP_SUPPORT diff --git a/storage/connect/mysql-test/connect/r/bson.result b/storage/connect/mysql-test/connect/r/bson.result new file mode 100644 index 00000000000..fd15e020aac --- /dev/null +++ b/storage/connect/mysql-test/connect/r/bson.result @@ -0,0 +1,517 @@ +# +# Testing doc samples +# +CREATE TABLE t1 +( +ISBN CHAR(15), +LANG CHAR(2), +SUBJECT CHAR(32), +AUTHOR CHAR(64), +TITLE CHAR(32), +TRANSLATION CHAR(32), +TRANSLATOR CHAR(80), +PUBLISHER CHAR(32), +DATEPUB int(4) +) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +ISBN LANG SUBJECT AUTHOR TITLE TRANSLATION TRANSLATOR PUBLISHER DATEPUB +9782212090819 fr applications Jean-Christophe Bernadac, Franois Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 +DROP TABLE t1; +# +# Testing Jpath. Get the number of authors +# +CREATE TABLE t1 +( +ISBN CHAR(15), +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +Authors INT(2) JPATH='$.AUTHOR[#]', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +ISBN Language Subject Authors Title Translation Translator Publisher Location Year +9782212090819 fr applications 2 Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications 1 XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 +DROP TABLE t1; +# +# Concatenates the authors +# +CREATE TABLE t1 +( +ISBN CHAR(15), +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year +9782212090819 fr applications Jean-Christophe and Franois Bernadac and Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 +DROP TABLE t1; +# +# Testing expanding authors +# +CREATE TABLE t1 +( +ISBN CHAR(15), +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year +9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications Franois Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 +UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab'; +SELECT * FROM t1 WHERE ISBN = '9782212090819'; +ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year +9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +# +# To add an author a new table must be created +# +CREATE TABLE t2 ( +FIRSTNAME CHAR(32), +LASTNAME CHAR(32)) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json' OPTION_LIST='Object=$[1].AUTHOR'; +SELECT * FROM t2; +FIRSTNAME LASTNAME +William J. Pardi +INSERT INTO t2 VALUES('Charles','Dickens'); +SELECT * FROM t1; +ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year +9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 +9782840825685 fr applications Charles Dickens XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 +DROP TABLE t1; +DROP TABLE t2; +# +# Check the biblio file has the good format +# +CREATE TABLE t1 +( +line char(255) +) +ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json'; +SELECT * FROM t1; +line +[ + { + "ISBN": "9782212090819", + "LANG": "fr", + "SUBJECT": "applications", + "AUTHOR": [ + { + "FIRSTNAME": "Jean-Christophe", + "LASTNAME": "Bernadac" + }, + { + "FIRSTNAME": "Philippe", + "LASTNAME": "Knab" + } + ], + "TITLE": "Construire une application XML", + "PUBLISHER": { + "NAME": "Eyrolles", + "PLACE": "Paris" + }, + "DATEPUB": 1999 + }, + { + "ISBN": "9782840825685", + "LANG": "fr", + "SUBJECT": "applications", + "AUTHOR": [ + { + "FIRSTNAME": "William J.", + "LASTNAME": "Pardi" + }, + { + "FIRSTNAME": "Charles", + "LASTNAME": "Dickens" + } + ], + "TITLE": "XML en Action", + "TRANSLATION": "adapt de l'anglais par", + "TRANSLATOR": { + "FIRSTNAME": "James", + "LASTNAME": "Guerin" + }, + "PUBLISHER": { + "NAME": "Microsoft Press", + "PLACE": "Paris" + }, + "DATEPUB": 1999 + } +] +DROP TABLE t1; +# +# Testing a pretty=0 file +# +CREATE TABLE t1 +( +ISBN CHAR(15) NOT NULL, +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX', +TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME', +TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB', +INDEX IX(ISBN) +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0'; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 1 IX 1 ISBN A NULL NULL NULL XINDEX +SELECT * FROM t1; +ISBN Language Subject AuthorFN AuthorLN Title Translation TranslatorFN TranslatorLN Publisher Location Year +9782212090819 fr applications Jean-Michel Bernadac Construire une application XML NULL NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications Franois Knab Construire une application XML NULL NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 2001 +DESCRIBE SELECT * FROM t1 WHERE ISBN = '9782212090819'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref IX IX 15 const 1 Using where +UPDATE t1 SET AuthorFN = 'Philippe' WHERE ISBN = '9782212090819'; +ERROR HY000: Got error 122 'Cannot write expanded column when Pretty is not 2' from CONNECT +DROP TABLE t1; +# +# A file with 2 arrays +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +WHO WEEK WHAT AMOUNT +Joe 3 Beer+Food+Food+Car 69.00 +Joe 4 Beer+Beer+Food+Food+Beer 83.00 +Joe 5 Beer+Food 26.00 +Beth 3 Beer 16.00 +Beth 4 Food+Beer 32.00 +Beth 5 Food+Beer 32.00 +Janet 3 Car+Food+Beer 55.00 +Janet 4 Car 17.00 +Janet 5 Beer+Car+Beer+Food 57.00 +DROP TABLE t1; +# +# Now it can be fully expanded +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 3 Beer 16.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +Janet 4 Car 17.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +DROP TABLE t1; +# +# A table showing many calculated results +# +CREATE TABLE t1 ( +WHO CHAR(12) NOT NULL, +WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER', +SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT', +SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT', +AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT', +SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT', +AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT', +AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT', +AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +WHO WEEKS SUMS SUM AVGS SUMAVG AVGSUM AVGAVG AVERAGE +Joe 3, 4, 5 69.00+83.00+26.00 178.00 17.25+16.60+13.00 46.85 59.33 15.62 16.18 +Beth 3, 4, 5 16.00+32.00+32.00 80.00 16.00+16.00+16.00 48.00 26.67 16.00 16.00 +Janet 3, 4, 5 55.00+17.00+57.00 129.00 18.33+17.00+14.25 49.58 43.00 16.53 16.12 +DROP TABLE t1; +# +# Expand expense in 3 one week tables +# +CREATE TABLE t2 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[0].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t2; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Beth 3 Beer 16.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +CREATE TABLE t3 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[1].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t3; +WHO WEEK WHAT AMOUNT +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +CREATE TABLE t4 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[2].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t4; +WHO WEEK WHAT AMOUNT +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +# +# The expanded table is made as a TBL table +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32), +AMOUNT DOUBLE(8,2)) +ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4'; +SELECT * FROM t1; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Beth 3 Beer 16.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +DROP TABLE t1, t2, t3, t4; +# +# Three partial JSON tables +# +CREATE TABLE t2 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp3.json'; +SELECT * FROM t2; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Beth 3 Beer 16.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +CREATE TABLE t3 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp4.json'; +SELECT * FROM t3; +WHO WEEK WHAT AMOUNT +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +CREATE TABLE t4 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp5.json'; +SELECT * FROM t4; +WHO WEEK WHAT AMOUNT +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +# +# The complete table can be a multiple JSON table +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp*.json' MULTIPLE=1; +SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT; +WHO WEEK WHAT AMOUNT +Beth 3 Beer 16.00 +Beth 4 Beer 15.00 +Beth 4 Food 17.00 +Beth 5 Beer 20.00 +Beth 5 Food 12.00 +Janet 3 Beer 18.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 4 Car 17.00 +Janet 5 Beer 14.00 +Janet 5 Beer 19.00 +Janet 5 Car 12.00 +Janet 5 Food 12.00 +Joe 3 Beer 18.00 +Joe 3 Car 20.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 4 Beer 14.00 +Joe 4 Beer 16.00 +Joe 4 Beer 19.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +DROP TABLE t1; +# +# Or also a partition JSON table +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp%s.json'; +ALTER TABLE t1 +PARTITION BY LIST COLUMNS(WEEK) ( +PARTITION `3` VALUES IN(3), +PARTITION `4` VALUES IN(4), +PARTITION `5` VALUES IN(5)); +Warnings: +Warning 1105 Data repartition in 3 is unchecked +Warning 1105 Data repartition in 4 is unchecked +Warning 1105 Data repartition in 5 is unchecked +SHOW WARNINGS; +Level Code Message +Warning 1105 Data repartition in 3 is unchecked +Warning 1105 Data repartition in 4 is unchecked +Warning 1105 Data repartition in 5 is unchecked +SELECT * FROM t1; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Beth 3 Beer 16.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +SELECT * FROM t1 WHERE WEEK = 4; +WHO WEEK WHAT AMOUNT +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +DROP TABLE t1, t2, t3, t4; diff --git a/storage/connect/mysql-test/connect/t/bson.test b/storage/connect/mysql-test/connect/t/bson.test new file mode 100644 index 00000000000..ab38cab73fc --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson.test @@ -0,0 +1,294 @@ +--source include/not_embedded.inc +--source include/have_partition.inc + +let $MYSQLD_DATADIR= `select @@datadir`; + +--copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json +--copy_file $MTR_SUITE_DIR/std_data/bib0.json $MYSQLD_DATADIR/test/bib0.json +--copy_file $MTR_SUITE_DIR/std_data/expense.json $MYSQLD_DATADIR/test/expense.json +--copy_file $MTR_SUITE_DIR/std_data/mulexp3.json $MYSQLD_DATADIR/test/mulexp3.json +--copy_file $MTR_SUITE_DIR/std_data/mulexp4.json $MYSQLD_DATADIR/test/mulexp4.json +--copy_file $MTR_SUITE_DIR/std_data/mulexp5.json $MYSQLD_DATADIR/test/mulexp5.json + +--echo # +--echo # Testing doc samples +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15), + LANG CHAR(2), + SUBJECT CHAR(32), + AUTHOR CHAR(64), + TITLE CHAR(32), + TRANSLATION CHAR(32), + TRANSLATOR CHAR(80), + PUBLISHER CHAR(32), + DATEPUB int(4) +) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +DROP TABLE t1; + + +--echo # +--echo # Testing Jpath. Get the number of authors +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15), + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + Authors INT(2) JPATH='$.AUTHOR[#]', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Concatenates the authors +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15), + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Testing expanding authors +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15), + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab'; +SELECT * FROM t1 WHERE ISBN = '9782212090819'; + +--echo # +--echo # To add an author a new table must be created +--echo # +CREATE TABLE t2 ( +FIRSTNAME CHAR(32), +LASTNAME CHAR(32)) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json' OPTION_LIST='Object=$[1].AUTHOR'; +SELECT * FROM t2; +INSERT INTO t2 VALUES('Charles','Dickens'); +SELECT * FROM t1; +DROP TABLE t1; +DROP TABLE t2; + +--echo # +--echo # Check the biblio file has the good format +--echo # +CREATE TABLE t1 +( + line char(255) +) +ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Testing a pretty=0 file +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15) NOT NULL, + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX', + TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME', + TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB', + INDEX IX(ISBN) +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0'; +SHOW INDEX FROM t1; +SELECT * FROM t1; +DESCRIBE SELECT * FROM t1 WHERE ISBN = '9782212090819'; +--error ER_GET_ERRMSG +UPDATE t1 SET AuthorFN = 'Philippe' WHERE ISBN = '9782212090819'; +DROP TABLE t1; + +--echo # +--echo # A file with 2 arrays +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Now it can be fully expanded +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +#--error ER_GET_ERRMSG +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # A table showing many calculated results +--echo # +CREATE TABLE t1 ( +WHO CHAR(12) NOT NULL, +WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER', +SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT', +SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT', +AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT', +SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT', +AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT', +AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT', +AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Expand expense in 3 one week tables +--echo # +CREATE TABLE t2 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[0].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t2; + +CREATE TABLE t3 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[1].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t3; + +CREATE TABLE t4 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[2].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t4; + +--echo # +--echo # The expanded table is made as a TBL table +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32), +AMOUNT DOUBLE(8,2)) +ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4'; +SELECT * FROM t1; +DROP TABLE t1, t2, t3, t4; + +--echo # +--echo # Three partial JSON tables +--echo # +CREATE TABLE t2 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp3.json'; +SELECT * FROM t2; + +CREATE TABLE t3 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp4.json'; +SELECT * FROM t3; + +CREATE TABLE t4 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp5.json'; +SELECT * FROM t4; + +--echo # +--echo # The complete table can be a multiple JSON table +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp*.json' MULTIPLE=1; +SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT; +DROP TABLE t1; + +--echo # +--echo # Or also a partition JSON table +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp%s.json'; +ALTER TABLE t1 +PARTITION BY LIST COLUMNS(WEEK) ( +PARTITION `3` VALUES IN(3), +PARTITION `4` VALUES IN(4), +PARTITION `5` VALUES IN(5)); +SHOW WARNINGS; +SELECT * FROM t1; +SELECT * FROM t1 WHERE WEEK = 4; +DROP TABLE t1, t2, t3, t4; + +# +# Clean up +# +--remove_file $MYSQLD_DATADIR/test/biblio.json +--remove_file $MYSQLD_DATADIR/test/bib0.dnx +--remove_file $MYSQLD_DATADIR/test/bib0.json +--remove_file $MYSQLD_DATADIR/test/expense.json +--remove_file $MYSQLD_DATADIR/test/mulexp3.json +--remove_file $MYSQLD_DATADIR/test/mulexp4.json +--remove_file $MYSQLD_DATADIR/test/mulexp5.json diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp index f3378d5f9bc..bac437b2d18 100644 --- a/storage/connect/tabbson.cpp +++ b/storage/connect/tabbson.cpp @@ -1,5 +1,5 @@ /************* tabbson C++ Program Source Code File (.CPP) *************/ -/* PROGRAM NAME: tabjson Version 1.0 */ +/* PROGRAM NAME: tabbson Version 1.0 */ /* (C) Copyright to the author Olivier BERTRAND 2020 */ /* This program are the BSON class DB execution routines. */ /***********************************************************************/ @@ -29,14 +29,12 @@ #if defined(ZIP_SUPPORT) #include "filamzip.h" #endif // ZIP_SUPPORT -#if 0 #if defined(JAVA_SUPPORT) #include "jmgfam.h" #endif // JAVA_SUPPORT #if defined(CMGO_SUPPORT) #include "cmgfam.h" #endif // CMGO_SUPPORT -#endif // 0 #include "tabmul.h" #include "checklvl.h" #include "resource.h" @@ -266,38 +264,36 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) if (tdp->Zipped) { #if defined(ZIP_SUPPORT) - tjnp = new(g)TDBBSN(tdp->G, tdp, new(g) UNZFAM(tdp)); + tjnp = new(g)TDBBSN(g, tdp, new(g) UNZFAM(tdp)); #else // !ZIP_SUPPORT sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "ZIP"); return NULL; #endif // !ZIP_SUPPORT -#if 0 } else if (tdp->Uri) { if (tdp->Driver && toupper(*tdp->Driver) == 'C') { #if defined(CMGO_SUPPORT) - tjnp = new(g) TDBBSN(G, tdp, new(g) CMGFAM(tdp)); + tjnp = new(g) TDBBSN(g, tdp, new(g) CMGFAM(tdp)); #else sprintf(g->Message, "Mongo %s Driver not available", "C"); return 0; #endif } else if (tdp->Driver && toupper(*tdp->Driver) == 'J') { #if defined(JAVA_SUPPORT) - tjnp = new(g) TDBBSN(G, tdp, new(g) JMGFAM(tdp)); + tjnp = new(g) TDBBSN(g, tdp, new(g) JMGFAM(tdp)); #else sprintf(g->Message, "Mongo %s Driver not available", "Java"); return 0; #endif } else { // Driver not specified #if defined(CMGO_SUPPORT) - tjnp = new(g) TDBBSN(G, tdp, new(g) CMGFAM(tdp)); + tjnp = new(g) TDBBSN(g, tdp, new(g) CMGFAM(tdp)); #elif defined(JAVA_SUPPORT) - tjnp = new(g) TDBBSN(G, tdp, new(g) JMGFAM(tdp)); + tjnp = new(g) TDBBSN(g, tdp, new(g) JMGFAM(tdp)); #else sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "MONGO"); return 0; #endif } // endif Driver -#endif // 0 } else if (tdp->Pretty >= 0) tjnp = new(g) TDBBSN(g, tdp, new(g) DOSFAM(tdp)); @@ -394,7 +390,7 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j) char *p, *pc = colname + strlen(colname), buf[32]; int ars; size_t n; - PBPR job; + PBVAL job; PBVAL jar; if (jvp && !bp->IsJson(jvp)) { @@ -436,9 +432,9 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j) switch (jsp->Type) { case TYPE_JOB: - job = bp->GetObject(jsp); + job = jsp; - for (PBPR jrp = job; jrp; jrp = bp->GetNext(jrp)) { + for (PBPR jrp = bp->GetObject(job); jrp; jrp = bp->GetNext(jrp)) { PCSZ k = bp->GetKey(jrp); if (*k != '$') { @@ -456,7 +452,7 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j) return false; case TYPE_JAR: - jar = bp->GetArray(jsp); + jar = jsp; if (all || (tdp->Xcol && !stricmp(tdp->Xcol, key))) ars = MY_MIN(bp->GetArraySize(jar), limit); @@ -722,9 +718,11 @@ void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp) case TYPE_BINT: case TYPE_DBL: case TYPE_DTM: + case TYPE_FLOAT: switch (vp->GetType()) { case TYPE_STRING: case TYPE_DATE: + case TYPE_DECIM: vp->SetValue_psz(GetString(jvp)); break; case TYPE_INT: @@ -738,7 +736,7 @@ void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp) case TYPE_DOUBLE: vp->SetValue(GetDouble(jvp)); - if (jvp->Type == TYPE_DBL) + if (jvp->Type == TYPE_DBL || jvp->Type == TYPE_FLOAT) vp->SetPrec(jvp->Nd); break; @@ -1175,7 +1173,6 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) return NULL; } // endif Lrecl -#if 0 if (Uri) { if (Driver && toupper(*Driver) == 'C') { #if defined(CMGO_SUPPORT) @@ -1203,8 +1200,7 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) } // endif Driver } else if (Zipped) { -#endif // 0 - if (Zipped) { +// if (Zipped) { #if defined(ZIP_SUPPORT) if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) { txfp = new(g) UNZFAM(this); diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index af45cdab9f7..f606f4b1a00 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -3,6 +3,7 @@ /* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */ /* This program are the JSON class DB execution routines. */ /***********************************************************************/ +#undef BSON_SUPPORT /***********************************************************************/ /* Include relevant sections of the MariaDB header file. */ From 4eeadedc77018da781eb7ea008fd3474f1a354d5 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 9 Dec 2020 00:55:06 +0100 Subject: [PATCH 037/150] - Fix json_bjson (s was erase by Json_Subset) modified: storage/connect/jsonudf.cpp modified: storage/connect/jsonudf.h - Fix compile error (Force_Bson was not conditional by BSON_SUPPORT) modified: storage/connect/ha_connect.cc - Continue Bjson implementation modified: storage/connect/block.h modified: storage/connect/bson.cpp modified: storage/connect/bson.h modified: storage/connect/bsonudf.cpp modified: storage/connect/bsonudf.h modified: storage/connect/plugutil.cpp modified: storage/connect/tabbson.cpp modified: storage/connect/tabjson.cpp - Typo deleted: storage/connect/Header.h --- storage/connect/Header.h | 395 ---------------------------------- storage/connect/block.h | 4 +- storage/connect/bson.cpp | 58 +++-- storage/connect/bson.h | 129 +---------- storage/connect/bsonudf.cpp | 124 +++++++++++ storage/connect/bsonudf.h | 5 +- storage/connect/ha_connect.cc | 4 +- storage/connect/jsonudf.cpp | 9 +- storage/connect/jsonudf.h | 4 +- storage/connect/plugutil.cpp | 41 ++-- storage/connect/tabbson.cpp | 37 ++-- storage/connect/tabjson.cpp | 8 - 12 files changed, 204 insertions(+), 614 deletions(-) delete mode 100644 storage/connect/Header.h diff --git a/storage/connect/Header.h b/storage/connect/Header.h deleted file mode 100644 index f9664befaa3..00000000000 --- a/storage/connect/Header.h +++ /dev/null @@ -1,395 +0,0 @@ -#pragma once -/**************** json H Declares Source Code File (.H) ****************/ -/* Name: json.h Version 1.2 */ -/* */ -/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */ -/* */ -/* This file contains the JSON classes declares. */ -/***********************************************************************/ -#include -#include "value.h" -#include "xobject.h" - -#if defined(_DEBUG) -#define X assert(false); -#else -#define X -#endif - -enum JTYP { - TYPE_NULL = TYPE_VOID, - TYPE_STRG = TYPE_STRING, - TYPE_DBL = TYPE_DOUBLE, - TYPE_BOOL = TYPE_TINY, - TYPE_BINT = TYPE_BIGINT, - TYPE_DTM = TYPE_DATE, - TYPE_INTG = TYPE_INT, - TYPE_VAL = 12, - TYPE_JSON, - TYPE_JAR, - TYPE_JOB, - TYPE_JVAL -}; - -class JDOC; -class JOUT; -class JSON; -class JVALUE; -class JOBJECT; -class JARRAY; - -typedef class JDOC* PJDOC; -typedef class JSON* PJSON; -typedef class JVALUE* PJVAL; -typedef class JOBJECT* PJOB; -typedef class JARRAY* PJAR; - -// BSON size should be equal on Linux and Windows -#define BMX 255 -typedef struct BSON* PBSON; -typedef struct JPAIR* PJPR; -//typedef struct VAL *PVL; - -/***********************************************************************/ -/* Structure JPAIR. The pairs of a json Object. */ -/***********************************************************************/ -struct JPAIR { - PCSZ Key; // This pair key name - PJVAL Val; // To the value of the pair - PJPR Next; // To the next pair -}; // end of struct JPAIR - -#if 0 -/***********************************************************************/ -/* Structure VAL (string, int, float, bool or null) */ -/***********************************************************************/ -struct VAL { - union { - char* Strp; // Ptr to a string - int N; // An integer value - long long LLn; // A big integer value - double F; // A float value - bool B; // True or false - }; - int Nd; // Decimal number - JTYP Type; // The value type -}; // end of struct VAL -#endif // 0 - -/***********************************************************************/ -/* Structure used to return binary json to Json UDF functions. */ -/***********************************************************************/ -struct BSON { - char Msg[BMX + 1]; - char* Filename; - PGLOBAL G; - int Pretty; - ulong Reslen; - my_bool Changed; - PJSON Top; - PJSON Jsp; - PBSON Bsp; -}; // end of struct BSON - -PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp); -//PVL AllocVal(PGLOBAL g, JTYP type); -char* NextChr(PSZ s, char sep); -char* GetJsonNull(void); -const char* GetFmt(int type, bool un); - -PJSON ParseJson(PGLOBAL g, char* s, size_t n, int* prty = NULL, bool* b = NULL); -PSZ Serialize(PGLOBAL g, PJSON jsp, char* fn, int pretty); -DllExport bool IsNum(PSZ s); - -/***********************************************************************/ -/* Class JDOC. The class for parsing and serializing json documents. */ -/***********************************************************************/ -class JDOC : public BLOCK { - friend PJSON ParseJson(PGLOBAL, char*, size_t, int*, bool*); - friend PSZ Serialize(PGLOBAL, PJSON, char*, int); -public: - JDOC(void) : js(NULL), s(NULL), len(0), pty(NULL) {} - - void SetJp(JOUT* jp) { js = jp; } - -protected: - PJAR ParseArray(PGLOBAL g, int& i); - PJOB ParseObject(PGLOBAL g, int& i); - PJVAL ParseValue(PGLOBAL g, int& i); - char* ParseString(PGLOBAL g, int& i); - void ParseNumeric(PGLOBAL g, int& i, PJVAL jvp); - PJAR ParseAsArray(PGLOBAL g, int& i, int pretty, int* ptyp); - bool SerializeArray(PJAR jarp, bool b); - bool SerializeObject(PJOB jobp); - bool SerializeValue(PJVAL jvp); - - // Members used when parsing and serializing -private: - JOUT* js; - char* s; - int len; - bool* pty; -}; // end of class JDOC - -/***********************************************************************/ -/* Class JSON. The base class for all other json classes. */ -/***********************************************************************/ -class JSON : public BLOCK { -public: - // Constructor - JSON(void) { Type = TYPE_JSON; } - JSON(int) {} - - // Implementation - inline JTYP GetType(void) { return Type; } - - // Methods - virtual int size(void) { return 1; } - virtual void Clear(void) { X } - virtual PJOB GetObject(void) { return NULL; } - virtual PJAR GetArray(void) { return NULL; } - virtual PJVAL GetArrayValue(int i) { X return NULL; } - virtual int GetSize(bool b) { X return 0; } - virtual PJSON GetJsp(void) { X return NULL; } - virtual PJPR GetFirst(void) { X return NULL; } - virtual PSZ GetText(PGLOBAL g, PSTRG text) { X return NULL; } - virtual bool Merge(PGLOBAL g, PJSON jsp) { X return true; } - virtual void SetValue(PJSON jsp) { X } - virtual bool DeleteValue(int i) { X return true; } - virtual bool IsNull(void) { X return true; } - - // Members - JTYP Type; -}; // end of class JSON - -/***********************************************************************/ -/* Class JOBJECT: contains a list of value pairs. */ -/***********************************************************************/ -class JOBJECT : public JSON { - friend class JDOC; - friend class JSNX; - friend class SWAP; -public: - JOBJECT(void) : JSON() { Type = TYPE_JOB; First = Last = NULL; } - JOBJECT(int i) : JSON(i) {} - - // Methods - virtual void Clear(void) { First = Last = NULL; } - //virtual JTYP GetValType(void) {return TYPE_JOB;} - virtual PJPR GetFirst(void) { return First; } - virtual int GetSize(bool b); - virtual PJOB GetObject(void) { return this; } - virtual PSZ GetText(PGLOBAL g, PSTRG text); - virtual bool Merge(PGLOBAL g, PJSON jsp); - virtual bool IsNull(void); - - // Specific - PJPR AddPair(PGLOBAL g, PCSZ key); - PJVAL GetKeyValue(const char* key); - PJAR GetKeyList(PGLOBAL g); - PJAR GetValList(PGLOBAL g); - void SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key); - void DeleteKey(PCSZ k); - -protected: - PJPR First; - PJPR Last; -}; // end of class JOBJECT - -/***********************************************************************/ -/* Class JARRAY. */ -/***********************************************************************/ -class JARRAY : public JSON { - friend class SWAP; -public: - JARRAY(void); - JARRAY(int i) : JSON(i) {} - - // Methods - virtual void Clear(void) { First = Last = NULL; Size = 0; } - virtual int size(void) { return Size; } - virtual PJAR GetArray(void) { return this; } - virtual int GetSize(bool b); - virtual PJVAL GetArrayValue(int i); - virtual PSZ GetText(PGLOBAL g, PSTRG text); - virtual bool Merge(PGLOBAL g, PJSON jsp); - virtual bool DeleteValue(int n); - virtual bool IsNull(void); - - // Specific - PJVAL AddArrayValue(PGLOBAL g, PJVAL jvp = NULL, int* x = NULL); - bool SetArrayValue(PGLOBAL g, PJVAL jvp, int i); - void InitArray(PGLOBAL g); - -protected: - // Members - int Size; // The number of items in the array - int Alloc; // The Mvals allocated size - PJVAL First; // Used when constructing - PJVAL Last; // Last constructed value - PJVAL* Mvals; // Allocated when finished -}; // end of class JARRAY - -/***********************************************************************/ -/* Class JVALUE. */ -/***********************************************************************/ -class JVALUE : public JSON { - friend class JARRAY; - friend class JSNX; - friend class JSONDISC; - friend class JSONCOL; - friend class JSON; - friend class JDOC; - friend class SWAP; -public: - JVALUE(void) : JSON() { Type = TYPE_JVAL; Clear(); } - JVALUE(PJSON jsp); - //JVALUE(PGLOBAL g, PVL vlp); - JVALUE(PGLOBAL g, PVAL valp); - JVALUE(PGLOBAL g, PCSZ strp); - JVALUE(int i) : JSON(i) {} - - //using JSON::GetVal; - //using JSON::SetVal; - - // Methods - virtual void Clear(void); - //virtual JTYP GetType(void) {return TYPE_JVAL;} - virtual JTYP GetValType(void); - virtual PJOB GetObject(void); - virtual PJAR GetArray(void); - virtual PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); } - virtual PSZ GetText(PGLOBAL g, PSTRG text); - virtual bool IsNull(void); - - // Specific - //inline PVL GetVal(void) { return Val; } - //inline void SetVal(PVL vlp) { Val = vlp; } - inline PJSON GetJson(void) { return (DataType == TYPE_JSON ? Jsp : this); } - PSZ GetString(PGLOBAL g, char* buff = NULL); - int GetInteger(void); - long long GetBigint(void); - double GetFloat(void); - PVAL GetValue(PGLOBAL g); - void SetValue(PJSON jsp); - void SetValue(PGLOBAL g, PVAL valp); - void SetString(PGLOBAL g, PSZ s, int ci = 0); - void SetInteger(PGLOBAL g, int n); - void SetBigint(PGLOBAL g, longlong ll); - void SetFloat(PGLOBAL g, double f); - void SetBool(PGLOBAL g, bool b); - -protected: - union { - PJSON Jsp; // To the json value - char* Strp; // Ptr to a string - int N; // An integer value - long long LLn; // A big integer value - double F; // A (double) float value - bool B; // True or false - }; - //PVL Val; // To the string or numeric value - PJVAL Next; // Next value in array - JTYP DataType; // The data value type - int Nd; // Decimal number - bool Del; // True when deleted -}; // end of class JVALUE - - -/***********************************************************************/ -/* Class JOUT. Used by Serialize. */ -/***********************************************************************/ -class JOUT : public BLOCK { -public: - JOUT(PGLOBAL gp) : BLOCK() { g = gp; Pretty = 3; } - - virtual bool WriteStr(const char* s) = 0; - virtual bool WriteChr(const char c) = 0; - virtual bool Escape(const char* s) = 0; - int Prty(void) { return Pretty; } - - // Member - PGLOBAL g; - int Pretty; -}; // end of class JOUT - -/***********************************************************************/ -/* Class JOUTSTR. Used to Serialize to a string. */ -/***********************************************************************/ -class JOUTSTR : public JOUT { -public: - JOUTSTR(PGLOBAL g); - - virtual bool WriteStr(const char* s); - virtual bool WriteChr(const char c); - virtual bool Escape(const char* s); - - // Member - char* Strp; // The serialized string - size_t N; // Position of next char - size_t Max; // String max size -}; // end of class JOUTSTR - -/***********************************************************************/ -/* Class JOUTFILE. Used to Serialize to a file. */ -/***********************************************************************/ -class JOUTFILE : public JOUT { -public: - JOUTFILE(PGLOBAL g, FILE* str, int pty) : JOUT(g) { Stream = str; Pretty = pty; } - - virtual bool WriteStr(const char* s); - virtual bool WriteChr(const char c); - virtual bool Escape(const char* s); - - // Member - FILE* Stream; -}; // end of class JOUTFILE - -/***********************************************************************/ -/* Class JOUTPRT. Used to Serialize to a pretty file. */ -/***********************************************************************/ -class JOUTPRT : public JOUTFILE { -public: - JOUTPRT(PGLOBAL g, FILE* str) : JOUTFILE(g, str, 2) { M = 0; B = false; } - - virtual bool WriteStr(const char* s); - virtual bool WriteChr(const char c); - - // Member - int M; - bool B; -}; // end of class JOUTPRT - - -/***********************************************************************/ -/* Class SWAP. Used to make or unmake a JSON tree movable. */ -/* This is done by making all pointers to offsets. */ -/***********************************************************************/ -class SWAP : public BLOCK { -public: - // Constructor - SWAP(PGLOBAL g, PJSON jsp) { - G = g, Base = (char*)jsp - 8; - } - - // Methods - void SwapJson(PJSON jsp, bool move); - -protected: - size_t MoffJson(PJSON jnp); - size_t MoffArray(PJAR jarp); - size_t MoffObject(PJOB jobp); - size_t MoffJValue(PJVAL jvp); - size_t MoffPair(PJPR jpp); - //size_t MoffVal(PVL vlp); - PJSON MptrJson(PJSON jnp); - PJAR MptrArray(PJAR jarp); - PJOB MptrObject(PJOB jobp); - PJVAL MptrJValue(PJVAL jvp); - PJPR MptrPair(PJPR jpp); - //PVL MptrVal(PVL vlp); - - // Member - PGLOBAL G; - void* Base; -}; // end of class SWAP diff --git a/storage/connect/block.h b/storage/connect/block.h index 6ac3b73bf13..c10fc4761ac 100644 --- a/storage/connect/block.h +++ b/storage/connect/block.h @@ -18,8 +18,8 @@ /* global pointer of the Plug application, and an optional pointer to */ /* the memory pool to use, defaulting to NULL meaning using the Plug */ /* standard default memory pool, example: */ -/* tabp = new(g) XTAB("EMPLOYEE"); */ -/* allocates a XTAB class object in the standard Plug memory pool. */ +/* tabp = new(g) XTAB("EMPLOYEE"); */ +/* allocates a XTAB class object in the standard Plug memory pool. */ /***********************************************************************/ #if !defined(BLOCK_DEFINED) #define BLOCK_DEFINED diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index bf0755d4eae..a291dd69df6 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -1,9 +1,9 @@ -/*************** json CPP Declares Source Code File (.H) ***************/ -/* Name: json.cpp Version 1.5 */ +/*************** bson CPP Declares Source Code File (.H) ***************/ +/* Name: bson.cpp Version 1.0 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2014 - 2020 */ +/* (C) Copyright to the author Olivier BERTRAND 2020 */ /* */ -/* This file contains the JSON classes functions. */ +/* This file contains the BJSON classes functions. */ /***********************************************************************/ /***********************************************************************/ @@ -15,7 +15,7 @@ /* Include application header files: */ /* global.h is header containing all global declarations. */ /* plgdbsem.h is header containing the DB application declarations. */ -/* xjson.h is header containing the JSON classes declarations. */ +/* bson.h is header containing the BSON classes declarations. */ /***********************************************************************/ #include "global.h" #include "plgdbsem.h" @@ -112,6 +112,7 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) { int i, pretty = (ptyp) ? *ptyp : 3; bool b = false; + PBVAL bvp = NULL; s = js; len = lng; @@ -128,26 +129,26 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) pty[0] = false; try { - Bvp = NewVal(); - Bvp->Type = TYPE_UNKNOWN; + bvp = NewVal(); + bvp->Type = TYPE_UNKNOWN; for (i = 0; i < len; i++) switch (s[i]) { case '[': - if (Bvp->Type != TYPE_UNKNOWN) - Bvp->To_Val = ParseAsArray(i, pretty, ptyp); + if (bvp->Type != TYPE_UNKNOWN) + bvp->To_Val = ParseAsArray(i, pretty, ptyp); else - Bvp->To_Val = ParseArray(++i); + bvp->To_Val = ParseArray(++i); - Bvp->Type = TYPE_JAR; + bvp->Type = TYPE_JAR; break; case '{': - if (Bvp->Type != TYPE_UNKNOWN) { - Bvp->To_Val = ParseAsArray(i, pretty, ptyp); - Bvp->Type = TYPE_JAR; + if (bvp->Type != TYPE_UNKNOWN) { + bvp->To_Val = ParseAsArray(i, pretty, ptyp); + bvp->Type = TYPE_JAR; } else { - Bvp->To_Val = ParseObject(++i); - Bvp->Type = TYPE_JOB; + bvp->To_Val = ParseObject(++i); + bvp->Type = TYPE_JOB; } // endif Type break; @@ -157,7 +158,7 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) case '\r': break; case ',': - if (Bvp->Type != TYPE_UNKNOWN && (pretty == 1 || pretty == 3)) { + if (bvp->Type != TYPE_UNKNOWN && (pretty == 1 || pretty == 3)) { if (comma) *comma = true; @@ -177,18 +178,18 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) } // endif b default: - if (Bvp->Type != TYPE_UNKNOWN) { - Bvp->To_Val = ParseAsArray(i, pretty, ptyp); - Bvp->Type = TYPE_JAR; - } else if ((Bvp->To_Val = MOF(ParseValue(i)))) - Bvp->Type = TYPE_JVAL; + if (bvp->Type != TYPE_UNKNOWN) { + bvp->To_Val = ParseAsArray(i, pretty, ptyp); + bvp->Type = TYPE_JAR; + } else if ((bvp->To_Val = MOF(ParseValue(i)))) + bvp->Type = TYPE_JVAL; else throw 4; break; }; // endswitch s[i] - if (Bvp->Type == TYPE_UNKNOWN) + if (bvp->Type == TYPE_UNKNOWN) sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN((int)len, 50), s); else if (ptyp && pretty == 3) { *ptyp = 3; // Not recognized pretty @@ -205,13 +206,13 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) if (trace(1)) htrc("Exception %d: %s\n", n, G->Message); GetMsg(g); - Bvp = NULL; + bvp = NULL; } catch (const char* msg) { strcpy(g->Message, msg); - Bvp = NULL; + bvp = NULL; } // end catch - return Bvp; + return bvp; } // end of ParseJson /***********************************************************************/ @@ -391,13 +392,11 @@ suite: bvp->Type = TYPE_JOB; break; case '"': - // jvp->Val = AllocVal(g, TYPE_STRG); bvp->To_Val = ParseString(++i); bvp->Type = TYPE_STRG; break; case 't': if (!strncmp(s + i, "true", 4)) { - // jvp->Val = AllocVal(g, TYPE_BOOL); bvp->B = true; bvp->Type = TYPE_BOOL; i += 3; @@ -407,7 +406,6 @@ suite: break; case 'f': if (!strncmp(s + i, "false", 5)) { - // jvp->Val = AllocVal(g, TYPE_BOOL); bvp->B = false; bvp->Type = TYPE_BOOL; i += 4; @@ -872,7 +870,7 @@ void BJSON::SubSet(bool b) if (b) G->Saved_Size = 0; -} /* end of JsonSubSet */ +} // end of SubSet /* ------------------------ Bobject functions ------------------------ */ diff --git a/storage/connect/bson.h b/storage/connect/bson.h index 03d5b9bd854..a6e160a3f3b 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -1,4 +1,3 @@ -#pragma once /**************** bson H Declares Source Code File (.H) ****************/ /* Name: bson.h Version 1.0 */ /* */ @@ -6,6 +5,7 @@ /* */ /* This file contains the BSON classe declares. */ /***********************************************************************/ +#pragma once #include #include "json.h" #include "xobject.h" @@ -189,130 +189,3 @@ protected: // Default constructor not to be used BDOC(void) {} }; // end of class BDOC - -#if 0 -/***********************************************************************/ -/* Class JOBJECT: contains a list of value pairs. */ -/***********************************************************************/ -class JOBJECT : public JSON { - friend class JDOC; - friend class JSNX; - friend class SWAP; -public: - JOBJECT(void) : JSON() { Type = TYPE_JOB; First = Last = NULL; } - JOBJECT(int i) : JSON(i) {} - - // Methods - virtual void Clear(void) { First = Last = NULL; } - virtual PJPR GetFirst(void) { return First; } - virtual int GetSize(PBPR prp, bool b); - virtual PJOB GetObject(void) { return this; } - virtual PSZ GetText(PGLOBAL g, PSTRG text); - virtual bool Merge(PGLOBAL g, PJSON jsp); - virtual bool IsNull(void); - - // Specific - PJPR AddPair(PGLOBAL g, PCSZ key); - PJVAL GetKeyValue(const char* key); - PJAR GetKeyList(PGLOBAL g); - PJAR GetValList(PGLOBAL g); - void SetKeyValue(PGLOBAL g, PJVAL jvp, PCSZ key); - void DeleteKey(PCSZ k); - -protected: - PJPR First; - PJPR Last; -}; // end of class JOBJECT - -/***********************************************************************/ -/* Class JARRAY. */ -/***********************************************************************/ -class JARRAY : public JSON { - friend class SWAP; -public: - JARRAY(void); - JARRAY(int i) : JSON(i) {} - - // Methods - virtual void Clear(void) { First = Last = NULL; Size = 0; } - virtual int size(void) { return Size; } - virtual PJAR GetArray(void) { return this; } - virtual int GetSize(bool b); - virtual PJVAL GetArrayValue(int i); - virtual PSZ GetText(PGLOBAL g, PSTRG text); - virtual bool Merge(PGLOBAL g, PJSON jsp); - virtual bool DeleteValue(int n); - virtual bool IsNull(void); - - // Specific - PJVAL AddArrayValue(PGLOBAL g, PJVAL jvp = NULL, int* x = NULL); - bool SetArrayValue(PGLOBAL g, PJVAL jvp, int i); - void InitArray(PGLOBAL g); - -protected: - // Members - int Size; // The number of items in the array - int Alloc; // The Mvals allocated size - PJVAL First; // Used when constructing - PJVAL Last; // Last constructed value - PJVAL* Mvals; // Allocated when finished -}; // end of class JARRAY - -/***********************************************************************/ -/* Class JVALUE. */ -/***********************************************************************/ -class JVALUE : public JSON { - friend class JARRAY; - friend class JSNX; - friend class JSONDISC; - friend class JSONCOL; - friend class JSON; - friend class JDOC; - friend class SWAP; -public: - JVALUE(void) : JSON() { Type = TYPE_JVAL; Clear(); } - JVALUE(PJSON jsp); - JVALUE(PGLOBAL g, PVAL valp); - JVALUE(PGLOBAL g, PCSZ strp); - JVALUE(int i) : JSON(i) {} - - // Methods - virtual void Clear(void); - //virtual JTYP GetType(void) {return TYPE_JVAL;} - virtual JTYP GetValType(void); - virtual PJOB GetObject(void); - virtual PJAR GetArray(void); - virtual PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); } - virtual PSZ GetText(PGLOBAL g, PSTRG text); - virtual bool IsNull(void); - - // Specific - inline PJSON GetJson(void) { return (DataType == TYPE_JSON ? Jsp : this); } - PSZ GetString(PGLOBAL g, char* buff = NULL); - int GetInteger(void); - long long GetBigint(void); - double GetFloat(void); - PVAL GetValue(PGLOBAL g); - void SetValue(PJSON jsp); - void SetValue(PGLOBAL g, PVAL valp); - void SetString(PGLOBAL g, PSZ s, int ci = 0); - void SetInteger(PGLOBAL g, int n); - void SetBigint(PGLOBAL g, longlong ll); - void SetFloat(PGLOBAL g, double f); - void SetBool(PGLOBAL g, bool b); - -protected: - union { - PJSON Jsp; // To the json value - char* Strp; // Ptr to a string - int N; // An integer value - long long LLn; // A big integer value - double F; // A (double) float value - bool B; // True or false - }; - PJVAL Next; // Next value in array - JTYP DataType; // The data value type - int Nd; // Decimal number - bool Del; // True when deleted -}; // end of class JVALUE -#endif // 0 diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 5163f042ad0..b7a778b8387 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -1706,3 +1706,127 @@ void bson_locate_all_deinit(UDF_INIT* initid) { JsonFreeMem((PGLOBAL)initid->ptr); } // end of bson_locate_all_deinit +/*********************************************************************************/ +/* Convert a pretty=0 Json file to binary BJSON. */ +/*********************************************************************************/ +my_bool bfile_bjson_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen; + + if (args->arg_count != 2 && args->arg_count != 3) { + strcpy(message, "This function must have 2 or 3 arguments"); + return true; + } else if (args->arg_count == 3 && args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third Argument must be an integer (LRECL)"); + return true; + } else for (int i = 0; i < 2; i++) + if (args->arg_type[i] != STRING_RESULT) { + sprintf(message, "Arguments %d must be a string (file name)", i + 1); + return true; + } // endif args + + CalcLen(args, false, reslen, memlen); + memlen = memlen * M; + memlen += (args->arg_count == 3) ? (ulong)*(longlong*)args->args[2] : 1024; + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bfile_bjson_init + +char *bfile_bjson(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char*, char *error) { + char *buf, *str = NULL, fn[_MAX_PATH], ofn[_MAX_PATH]; + bool loop; + ssize_t len, newloc; + size_t lrecl, binszp; + PBVAL jsp; + PBJNX bnxp; + PGLOBAL g = (PGLOBAL)initid->ptr; + BDOC doc(g); + + strcpy(fn, MakePSZ(g, args, 0)); + strcpy(ofn, MakePSZ(g, args, 1)); + + if (args->arg_count == 3) + lrecl = (size_t)*(longlong*)args->args[2]; + else + lrecl = 1024; + + if (!g->Xchk) { + int msgid = MSGID_OPEN_MODE_STRERROR; + FILE *fout; + FILE *fin; + + if (!(fin = global_fopen(g, msgid, fn, "rt"))) + str = strcpy(result, g->Message); + else if (!(fout = global_fopen(g, msgid, ofn, "wb"))) + str = strcpy(result, g->Message); + else if ((buf = (char*)malloc(lrecl))) { + try { + do { + loop = false; + PlugSubSet(g->Sarea, g->Sarea_Size); + + if (!fgets(buf, lrecl, fin)) { + if (!feof(fin)) { + sprintf(g->Message, "Error %d reading %zd bytes from %s", + errno, lrecl, fn); + str = strcpy(result, g->Message); + } else + str = strcpy(result, ofn); + + } else if ((len = strlen(buf))) { + if ((jsp = doc.ParseJson(g, buf, len))) { + newloc = (size_t)PlugSubAlloc(g, NULL, 0); + binszp = newloc - (size_t)jsp; + + if (fwrite(&binszp, sizeof(binszp), 1, fout) != 1) { + sprintf(g->Message, "Error %d writing %zd bytes to %s", + errno, sizeof(binszp), ofn); + str = strcpy(result, g->Message); + } else if (fwrite(jsp, binszp, 1, fout) != 1) { + sprintf(g->Message, "Error %d writing %zd bytes to %s", + errno, binszp, ofn); + str = strcpy(result, g->Message); + } else + loop = true; + + } else { + str = strcpy(result, g->Message); + } // endif jsp + + } else + loop = true; + + } while (loop); + + } catch (int) { + str = strcpy(result, g->Message); + } catch (const char* msg) { + str = strcpy(result, msg); + } // end catch + + free(buf); + } else + str = strcpy(result, "Buffer malloc failed"); + + if (fin) fclose(fin); + if (fout) fclose(fout); + g->Xchk = str; + } else + str = (char*)g->Xchk; + + if (!str) { + if (g->Message) + str = strcpy(result, g->Message); + else + str = strcpy(result, "Unexpected error"); + + } // endif str + + *res_length = strlen(str); + return str; +} // end of bfile_bjson + +void bfile_bjson_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bfile_bjson_deinit + + diff --git a/storage/connect/bsonudf.h b/storage/connect/bsonudf.h index b591b6b89f8..1675d36cee5 100644 --- a/storage/connect/bsonudf.h +++ b/storage/connect/bsonudf.h @@ -106,5 +106,8 @@ extern "C" { DllExport my_bool bson_locate_all_init(UDF_INIT*, UDF_ARGS*, char*); DllExport char* bson_locate_all(UDF_EXEC_ARGS); DllExport void bson_locate_all_deinit(UDF_INIT*); -} // extern "C" + DllExport my_bool bfile_bjson_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bfile_bjson(UDF_EXEC_ARGS); + DllExport void bfile_bjson_deinit(UDF_INIT*); +} // extern "C" diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 6728550447c..65c3ea5c5d6 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -170,7 +170,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.07.0002 December 07, 2020"; + char version[]= "Version 1.07.0002 December 12, 2020"; #if defined(__WIN__) char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__; char slash= '\\'; @@ -516,7 +516,9 @@ char *GetJavaWrapper(void) bool MongoEnabled(void) {return THDVAR(current_thd, enable_mongo);} #endif // JAVA_SUPPORT || CMGO_SUPPORT +#if defined(BSON_SUPPORT) bool Force_Bson(void) {return THDVAR(current_thd, force_bson);} +#endif // BSON_SUPPORT) #if defined(XMSG) || defined(NEWMSG) extern "C" const char *msglang(void) diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 773828a96dd..0909cb96477 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -1174,13 +1174,16 @@ static uint GetJsonGroupSize(void) /*********************************************************************************/ /* Program for SubSet re-initialization of the memory pool. */ /*********************************************************************************/ -my_bool JsonSubSet(PGLOBAL g) +my_bool JsonSubSet(PGLOBAL g, my_bool b) { PPOOLHEADER pph = (PPOOLHEADER)g->Sarea; pph->To_Free = (g->Saved_Size) ? g->Saved_Size : sizeof(POOLHEADER); pph->FreeBlk = g->Sarea_Size - pph->To_Free; - g->Saved_Size = 0; + + if (b) + g->Saved_Size = 0; + return FALSE; } /* end of JsonSubSet */ @@ -1458,7 +1461,7 @@ int IsJson(UDF_ARGS *args, uint i, bool b) char *sap; PGLOBAL g = PlugInit(NULL, (size_t)args->lengths[i] * M + 1024); - JsonSubSet(g); +// JsonSubSet(g); sap = MakePSZ(g, args, i); if (ParseJson(g, sap, strlen(sap))) diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h index 2a2b2cac20e..d99122aa775 100644 --- a/storage/connect/jsonudf.h +++ b/storage/connect/jsonudf.h @@ -53,13 +53,13 @@ typedef struct _jnode { typedef class JSNX *PJSNX; /*********************************************************************************/ -/* The JSON tree node. Can be an Object or an Array. */ +/* The JSON utility functions. */ /*********************************************************************************/ bool IsNum(PSZ s); char *NextChr(PSZ s, char sep); char *GetJsonNull(void); uint GetJsonGrpSize(void); -my_bool JsonSubSet(PGLOBAL g); +my_bool JsonSubSet(PGLOBAL g, my_bool b = false); my_bool CalcLen(UDF_ARGS* args, my_bool obj, unsigned long& reslen, unsigned long& memlen, my_bool mod = false); my_bool JsonInit(UDF_INIT* initid, UDF_ARGS* args, char* message, my_bool mbn, diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp index ac07f45ccf2..c33639bf744 100644 --- a/storage/connect/plugutil.cpp +++ b/storage/connect/plugutil.cpp @@ -96,7 +96,7 @@ char *msglang(void); typedef struct { ushort Segsize; ushort Size; - } AREASIZE; +} AREASIZE; ACTIVITY defActivity = { /* Describes activity and language */ NULL, /* Points to user work area(s) */ @@ -204,7 +204,7 @@ PGLOBAL PlugExit(PGLOBAL g) /* Note: this routine is not really implemented for Unix. */ /***********************************************************************/ LPSTR PlugRemoveType(LPSTR pBuff, LPCSTR FileName) - { +{ #if defined(__WIN__) char drive[_MAX_DRIVE]; #else @@ -228,8 +228,7 @@ LPSTR PlugRemoveType(LPSTR pBuff, LPCSTR FileName) htrc("buff='%s'\n", pBuff); return pBuff; - } // end of PlugRemoveType - +} // end of PlugRemoveType BOOL PlugIsAbsolutePath(LPCSTR path) { @@ -246,7 +245,7 @@ BOOL PlugIsAbsolutePath(LPCSTR path) /* Note: this routine is not really implemented for Unix. */ /***********************************************************************/ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) - { +{ char newname[_MAX_PATH]; char direc[_MAX_DIR], defdir[_MAX_DIR], tmpdir[_MAX_DIR]; char fname[_MAX_FNAME]; @@ -347,14 +346,14 @@ LPCSTR PlugSetPath(LPSTR pBuff, LPCSTR prefix, LPCSTR FileName, LPCSTR defpath) } else return FileName; // Error, return unchanged name - } // end of PlugSetPath +} // end of PlugSetPath #if defined(XMSG) /***********************************************************************/ /* PlugGetMessage: get a message from the message file. */ /***********************************************************************/ char *PlugReadMessage(PGLOBAL g, int mid, char *m) - { +{ char msgfile[_MAX_PATH], msgid[32], buff[256]; char *msg; FILE *mfile = NULL; @@ -405,14 +404,14 @@ char *PlugReadMessage(PGLOBAL g, int mid, char *m) msg = stmsg; return msg; - } // end of PlugReadMessage +} // end of PlugReadMessage #elif defined(NEWMSG) /***********************************************************************/ /* PlugGetMessage: get a message from the resource string table. */ /***********************************************************************/ char *PlugGetMessage(PGLOBAL g, int mid) - { +{ char *msg; #if 0 // was !defined(UNIX) && !defined(UNIV_LINUX) @@ -440,7 +439,7 @@ char *PlugGetMessage(PGLOBAL g, int mid) msg = stmsg; return msg; - } // end of PlugGetMessage +} // end of PlugGetMessage #endif // NEWMSG #if defined(__WIN__) @@ -448,13 +447,13 @@ char *PlugGetMessage(PGLOBAL g, int mid) /* Return the line length of the console screen buffer. */ /***********************************************************************/ short GetLineLength(PGLOBAL g) - { +{ CONSOLE_SCREEN_BUFFER_INFO coninfo; HANDLE hcons = GetStdHandle(STD_OUTPUT_HANDLE); BOOL b = GetConsoleScreenBufferInfo(hcons, &coninfo); return (b) ? coninfo.dwSize.X : 0; - } // end of GetLineLength +} // end of GetLineLength #endif // __WIN__ /***********************************************************************/ @@ -527,13 +526,13 @@ void FreeSarea(PGLOBAL g) /* the address and size not larger than memory size. */ /***********************************************************************/ BOOL PlugSubSet(void *memp, size_t size) - { +{ PPOOLHEADER pph = (PPOOLHEADER)memp; pph->To_Free = (size_t)sizeof(POOLHEADER); pph->FreeBlk = size - pph->To_Free; return FALSE; - } /* end of PlugSubSet */ +} /* end of PlugSubSet */ /***********************************************************************/ /* Use it to export a function that do throwing. */ @@ -596,7 +595,7 @@ void *PlugSubAlloc(PGLOBAL g, void *memp, size_t size) /* Program for sub-allocating and copying a string in a storage area. */ /***********************************************************************/ char *PlugDup(PGLOBAL g, const char *str) - { +{ if (str) { char *sm = (char*)PlugSubAlloc(g, NULL, strlen(str) + 1); @@ -605,12 +604,13 @@ char *PlugDup(PGLOBAL g, const char *str) } else return NULL; - } // end of PlugDup +} // end of PlugDup /*************************************************************************/ /* This routine makes a pointer from an offset to a memory pointer. */ /*************************************************************************/ -void* MakePtr(void* memp, size_t offset) { +void* MakePtr(void* memp, size_t offset) +{ // return ((offset == 0) ? NULL : &((char*)memp)[offset]); return (!offset) ? NULL : (char *)memp + offset; } /* end of MakePtr */ @@ -618,11 +618,14 @@ void* MakePtr(void* memp, size_t offset) { /*************************************************************************/ /* This routine makes an offset from a pointer new format. */ /*************************************************************************/ -size_t MakeOff(void* memp, void* ptr) { +size_t MakeOff(void* memp, void* ptr) +{ if (ptr) { #if defined(_DEBUG) || defined(DEVELOPMENT) - if (ptr <= memp) + if (ptr <= memp) { fprintf(stderr, "ptr %p <= memp %p", ptr, memp); + throw 999; + } // endif ptr #endif // _DEBUG || DEVELOPMENT return (size_t)((char*)ptr - (size_t)memp); } else diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp index bac437b2d18..90a49aac1d5 100644 --- a/storage/connect/tabbson.cpp +++ b/storage/connect/tabbson.cpp @@ -259,7 +259,7 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) } // endif Lrecl // Allocate the parse work memory - tdp->G = PlugInit(NULL, (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 6 : 2)); + tdp->G = PlugInit(NULL, (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 4 : 2)); tdp->Ending = GetIntegerTableOption(g, topt, "Ending", CRLF); if (tdp->Zipped) { @@ -1167,7 +1167,7 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) if (Lrecl) { // Allocate the parse work memory - G = PlugInit(NULL, (size_t)Lrecl * 6); + G = PlugInit(NULL, (size_t)Lrecl * 4); } else { strcpy(g->Message, "LRECL is not defined"); return NULL; @@ -1200,7 +1200,6 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) } // endif Driver } else if (Zipped) { -// if (Zipped) { #if defined(ZIP_SUPPORT) if (m == MODE_READ || m == MODE_ANY || m == MODE_ALTER) { txfp = new(g) UNZFAM(this); @@ -1226,9 +1225,9 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) #endif // !GZ_SUPPORT } else if (map) txfp = new(g) MAPFAM(this); - else if (Pretty < 0) // BJsonfile + else if (Pretty < 0) { // BJsonfile txfp = new(g) BINFAM(this); - else + } else txfp = new(g) DOSFAM(this); // Txfp must be set for TDBBSN @@ -1436,7 +1435,6 @@ bool TDBBSN::OpenDB(PGLOBAL g) } // endif Use if (Pretty < 0) { -#if 0 /*******************************************************************/ /* Binary BJSON table. */ /*******************************************************************/ @@ -1450,7 +1448,7 @@ bool TDBBSN::OpenDB(PGLOBAL g) if (!To_Kindex) { Txfp->Rewind(); // see comment in Work.log } else // Table is to be accessed through a sorted index table - To_Kindex->Reset(); + To_Kindex->Reset(); // TODO: NIY return false; } // endif use @@ -1469,14 +1467,12 @@ bool TDBBSN::OpenDB(PGLOBAL g) /*********************************************************************/ size_t linelen = Lrecl; - //To_Line = (char*)PlugSubAlloc(g, NULL, linelen); - //memset(To_Line, 0, linelen); + // Buffer should be the first allocated thing in G->Sarea + Txfp->AllocateBuffer(Bp->G); To_Line = Txfp->GetBuf(); + memset(To_Line, 0, linelen); + Bp->MemSave(); xtrc(1, "OpenJSN: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line); - return false; -#endif // 0 - strcpy(g->Message, "TDBBSN: Binary NIY"); - return true; } else if (TDBDOS::OpenDB(g)) return true; @@ -1548,23 +1544,14 @@ int TDBBSN::ReadDB(PGLOBAL g) rc = RC_EF; } else { -#if 0 // Here we get a movable Json binary tree - PJSON jsp; - SWAP* swp; - - jsp = (PJSON)To_Line; - swp = new(g) SWAP(G, jsp); - swp->SwapJson(jsp, false); // Restore pointers from offsets - Row = jsp; - Row = FindRow(g); + Bp->SubSet(); // Perhaps Useful when updating + Row = (PBVAL)To_Line; + Row = Bp->FindRow(g); SameRow = 0; Fpos++; M = 1; rc = RC_OK; -#endif // 0 - strcpy(g->Message, "TDBBSN: Binary NIY"); - rc = RC_FX; } // endif Pretty } // endif ReadDB diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index f606f4b1a00..429efea56fa 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -298,14 +298,6 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) tjnp->SetMode(MODE_READ); // Allocate the parse work memory -#if 0 - PGLOBAL G = (PGLOBAL)PlugSubAlloc(g, NULL, sizeof(GLOBAL)); - memset(G, 0, sizeof(GLOBAL)); - G->Sarea_Size = (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 10 : 2); - G->Sarea = PlugSubAlloc(g, NULL, G->Sarea_Size); - PlugSubSet(G->Sarea, G->Sarea_Size); - G->jump_level = 0; -#endif // 0 G = PlugInit(NULL, (size_t)tdp->Lrecl * (tdp->Pretty >= 0 ? 10 : 2)); tjnp->SetG(G); From aa10789f472b975f3ffae2a5adc6514a879226ba Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 11 Dec 2020 16:34:50 +0100 Subject: [PATCH 038/150] BSON development --- storage/connect/bson.h | 2 +- storage/connect/bsonudf.cpp | 1 - storage/connect/filamtxt.cpp | 200 +++++++++++++++++++++++++++++------ storage/connect/filamtxt.h | 25 +++-- storage/connect/tabbson.cpp | 121 +++++++++++---------- storage/connect/tabdos.cpp | 3 + 6 files changed, 249 insertions(+), 103 deletions(-) diff --git a/storage/connect/bson.h b/storage/connect/bson.h index a6e160a3f3b..402981befaa 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -145,7 +145,7 @@ public: void SetBigint(PBVAL vlp, longlong ll); void SetFloat(PBVAL vlp, double f); void SetBool(PBVAL vlp, bool b); - void Clear(PBVAL vlp) { vlp->N = 0; vlp->Nd = 0; vlp->Next = 0; vlp->Type = TYPE_NULL; } + void Clear(PBVAL vlp) { vlp->N = 0; vlp->Nd = 0; vlp->Next = 0; } bool IsValueNull(PBVAL vlp); bool IsJson(PBVAL vlp) {return (vlp->Type == TYPE_JAR || vlp->Type == TYPE_JOB);} diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index b7a778b8387..76ecce5133b 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -1737,7 +1737,6 @@ char *bfile_bjson(UDF_INIT *initid, UDF_ARGS *args, char *result, ssize_t len, newloc; size_t lrecl, binszp; PBVAL jsp; - PBJNX bnxp; PGLOBAL g = (PGLOBAL)initid->ptr; BDOC doc(g); diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index 758a4b1d8cf..ef6d3ecafca 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -1668,13 +1668,14 @@ void BLKFAM::Rewind(void) /***********************************************************************/ /* BIN GetFileLength: returns file size in number of bytes. */ /***********************************************************************/ -int BINFAM::GetFileLength(PGLOBAL g) { +int BINFAM::GetFileLength(PGLOBAL g) +{ int len; - if (!BStream) + if (!Stream) len = TXTFAM::GetFileLength(g); else - if ((len = _filelength(_fileno(BStream))) < 0) + if ((len = _filelength(_fileno(Stream))) < 0) sprintf(g->Message, MSG(FILELEN_ERROR), "_filelength", To_File); xtrc(1, "File length=%d\n", len); @@ -1686,10 +1687,12 @@ int BINFAM::GetFileLength(PGLOBAL g) { /* This function can be called with a null argument to test the */ /* availability of Cardinality implementation (1 yes, 0 no). */ /***********************************************************************/ -int BINFAM::Cardinality(PGLOBAL g) { +int BINFAM::Cardinality(PGLOBAL g) +{ return (g) ? -1 : 0; } // end of Cardinality +#if 0 /***********************************************************************/ /* OpenTableFile: Open a DOS/UNIX table file using C standard I/Os. */ /***********************************************************************/ @@ -1713,16 +1716,16 @@ bool BINFAM::OpenTableFile(PGLOBAL g) { // Now open the file stream PlugSetPath(filename, To_File, Tdbp->GetPath()); - if (!(BStream = PlugOpenFile(g, filename, opmode))) { + if (!(Stream = PlugOpenFile(g, filename, opmode))) { if (trace(1)) htrc("%s\n", g->Message); return (mode == MODE_READ && errno == ENOENT) ? PushWarning(g, Tdbp) : true; - } // endif BStream + } // endif Stream if (trace(1)) - htrc("File %s open BStream=%p mode=%s\n", filename, BStream, opmode); + htrc("File %s open Stream=%p mode=%s\n", filename, Stream, opmode); To_Fb = dbuserp->Openlist; // Keep track of File block @@ -1731,12 +1734,14 @@ bool BINFAM::OpenTableFile(PGLOBAL g) { /*********************************************************************/ return AllocateBuffer(g); } // end of OpenTableFile +#endif 0 /***********************************************************************/ /* Allocate the line buffer. For mode Delete a bigger buffer has to */ /* be allocated because is it also used to move lines into the file. */ /***********************************************************************/ -bool BINFAM::AllocateBuffer(PGLOBAL g) { +bool BINFAM::AllocateBuffer(PGLOBAL g) +{ MODE mode = Tdbp->GetMode(); // Lrecl is Ok @@ -1749,6 +1754,7 @@ bool BINFAM::AllocateBuffer(PGLOBAL g) { return false; } // end of AllocateBuffer +#if 0 /***********************************************************************/ /* GetRowID: return the RowID of last read record. */ /***********************************************************************/ @@ -1767,7 +1773,7 @@ int BINFAM::GetPos(void) { /* GetNextPos: return the position of next record. */ /***********************************************************************/ int BINFAM::GetNextPos(void) { - return ftell(BStream); + return ftell(Stream); } // end of GetNextPos /***********************************************************************/ @@ -1776,7 +1782,7 @@ int BINFAM::GetNextPos(void) { bool BINFAM::SetPos(PGLOBAL g, int pos) { Fpos = pos; - if (fseek(BStream, Fpos, SEEK_SET)) { + if (fseek(Stream, Fpos, SEEK_SET)) { sprintf(g->Message, MSG(FSETPOS_ERROR), Fpos); return true; } // endif @@ -1789,7 +1795,7 @@ bool BINFAM::SetPos(PGLOBAL g, int pos) { /* Record file position in case of UPDATE or DELETE. */ /***********************************************************************/ bool BINFAM::RecordPos(PGLOBAL g) { - if ((Fpos = ftell(BStream)) < 0) { + if ((Fpos = ftell(Stream)) < 0) { sprintf(g->Message, MSG(FTELL_ERROR), 0, strerror(errno)); // strcat(g->Message, " (possible wrong ENDING option value)"); return true; @@ -1797,14 +1803,16 @@ bool BINFAM::RecordPos(PGLOBAL g) { return false; } // end of RecordPos +#endif // 0 /***********************************************************************/ /* ReadBuffer: Read one line for a text file. */ /***********************************************************************/ -int BINFAM::ReadBuffer(PGLOBAL g) { +int BINFAM::ReadBuffer(PGLOBAL g) +{ int rc; - if (!BStream) + if (!Stream) return RC_EF; xtrc(2, "ReadBuffer: Tdbp=%p To_Line=%p Placed=%d\n", @@ -1823,11 +1831,11 @@ int BINFAM::ReadBuffer(PGLOBAL g) { Placed = false; xtrc(2, " About to read: bstream=%p To_Buf=%p Buflen=%d\n", - BStream, To_Buf, Buflen); + Stream, To_Buf, Buflen); // Read the prefix giving the row length - if (!fread(&Recsize, sizeof(size_t), 1, BStream)) { - if (!feof(BStream)) { + if (!fread(&Recsize, sizeof(size_t), 1, Stream)) { + if (!feof(Stream)) { strcpy(g->Message, "Error reading line prefix\n"); return RC_FX; } else @@ -1838,12 +1846,12 @@ int BINFAM::ReadBuffer(PGLOBAL g) { return RC_FX; } // endif Recsize - if (fread(To_Buf, Recsize, 1, BStream)) { + if (fread(To_Buf, Recsize, 1, Stream)) { xtrc(2, " Read: To_Buf=%p Recsize=%zd\n", To_Buf, Recsize); // memcpy(Tdbp->GetLine(), To_Buf, Recsize); num_read++; rc = RC_OK; - } else if (feof(BStream)) { + } else if (feof(Stream)) { rc = RC_EF; } else { #if defined(__WIN__) @@ -1863,23 +1871,24 @@ int BINFAM::ReadBuffer(PGLOBAL g) { /***********************************************************************/ /* WriteBuffer: File write routine for BIN access method. */ /***********************************************************************/ -int BINFAM::WriteBuffer(PGLOBAL g) { +int BINFAM::WriteBuffer(PGLOBAL g) +{ int curpos = 0; bool moved = true; /*********************************************************************/ /* Prepare writing the line. */ /*********************************************************************/ - memcpy(To_Buf, Tdbp->GetLine(), Recsize); +//memcpy(To_Buf, Tdbp->GetLine(), Recsize); /*********************************************************************/ /* Now start the writing process. */ /*********************************************************************/ - if (fwrite(&Recsize, sizeof(size_t), 1, BStream) != 1) { + if (fwrite(&Recsize, sizeof(size_t), 1, Stream) != 1) { sprintf(g->Message, "Error %d writing prefix to %s", errno, To_File); return RC_FX; - } else if (fwrite(To_Buf, Recsize, 1, BStream) != 1) { + } else if (fwrite(To_Buf, Recsize, 1, Stream) != 1) { sprintf(g->Message, "Error %d writing %zd bytes to %s", errno, Recsize, To_File); return RC_FX; @@ -1889,24 +1898,153 @@ int BINFAM::WriteBuffer(PGLOBAL g) { return RC_OK; } // end of WriteBuffer +#if 0 +/***********************************************************************/ +/* Data Base delete line routine for DOS and BLK access methods. */ +/***********************************************************************/ +int DOSFAM::DeleteRecords(PGLOBAL g, int irc) +{ + bool moved; + int curpos = ftell(Stream); + + /*********************************************************************/ + /* There is an alternative here: */ + /* 1 - use a temporary file in which are copied all not deleted */ + /* lines, at the end the original file will be deleted and */ + /* the temporary file renamed to the original file name. */ + /* 2 - directly move the not deleted lines inside the original */ + /* file, and at the end erase all trailing records. */ + /* This will be experimented. */ + /*********************************************************************/ + if (trace(1)) + htrc( + "DOS DeleteDB: rc=%d UseTemp=%d curpos=%d Fpos=%d Tpos=%d Spos=%d\n", + irc, UseTemp, curpos, Fpos, Tpos, Spos); + + if (irc != RC_OK) { + /*******************************************************************/ + /* EOF: position Fpos at the end-of-file position. */ + /*******************************************************************/ + fseek(Stream, 0, SEEK_END); + Fpos = ftell(Stream); + + if (trace(1)) + htrc("Fpos placed at file end=%d\n", Fpos); + + } // endif irc + + if (Tpos == Spos) { + /*******************************************************************/ + /* First line to delete, Open temporary file. */ + /*******************************************************************/ + if (UseTemp) { + if (OpenTempFile(g)) + return RC_FX; + + } else { + /*****************************************************************/ + /* Move of eventual preceding lines is not required here. */ + /* Set the target file as being the source file itself. */ + /* Set the future Tpos, and give Spos a value to block copying. */ + /*****************************************************************/ + T_Stream = Stream; + Spos = Tpos = Fpos; + } // endif UseTemp + + } // endif Tpos == Spos + + /*********************************************************************/ + /* Move any intermediate lines. */ + /*********************************************************************/ + if (MoveIntermediateLines(g, &moved)) + return RC_FX; + + if (irc == RC_OK) { + /*******************************************************************/ + /* Reposition the file pointer and set Spos. */ + /*******************************************************************/ + if (!UseTemp || moved) + if (fseek(Stream, curpos, SEEK_SET)) { + sprintf(g->Message, MSG(FSETPOS_ERROR), 0); + return RC_FX; + } // endif + + Spos = GetNextPos(); // New start position + + if (trace(1)) + htrc("after: Tpos=%d Spos=%d\n", Tpos, Spos); + + } else { + /*******************************************************************/ + /* Last call after EOF has been reached. */ + /* The UseTemp case is treated in CloseTableFile. */ + /*******************************************************************/ + if (!UseTemp & !Abort) { + /*****************************************************************/ + /* Because the chsize functionality is only accessible with a */ + /* system call we must close the file and reopen it with the */ + /* open function (_fopen for MS ??) this is still to be checked */ + /* for compatibility with Text files and other OS's. */ + /*****************************************************************/ + char filename[_MAX_PATH]; + int h; // File handle, return code + + PlugSetPath(filename, To_File, Tdbp->GetPath()); + /*rc=*/ PlugCloseFile(g, To_Fb); + + if ((h= global_open(g, MSGID_OPEN_STRERROR, filename, O_WRONLY)) <= 0) + return RC_FX; + + /*****************************************************************/ + /* Remove extra records. */ + /*****************************************************************/ +#if defined(__WIN__) + if (chsize(h, Tpos)) { + sprintf(g->Message, MSG(CHSIZE_ERROR), strerror(errno)); + close(h); + return RC_FX; + } // endif +#else + if (ftruncate(h, (off_t)Tpos)) { + sprintf(g->Message, MSG(TRUNCATE_ERROR), strerror(errno)); + close(h); + return RC_FX; + } // endif +#endif + + close(h); + + if (trace(1)) + htrc("done, h=%d irc=%d\n", h, irc); + + } // endif !UseTemp + + } // endif irc + + return RC_OK; // All is correct +} // end of DeleteRecords +#endif // 0 + /***********************************************************************/ /* Table file close routine for DOS access method. */ /***********************************************************************/ -void BINFAM::CloseTableFile(PGLOBAL g, bool abort) { - int rc; +void BINFAM::CloseTableFile(PGLOBAL g, bool abort) +{ + int rc; - Abort = abort; - rc = PlugCloseFile(g, To_Fb); - xtrc(1, "BIN Close: closing %s rc=%d\n", To_File, rc); - BStream = NULL; // So we can know whether table is open + Abort = abort; + rc = PlugCloseFile(g, To_Fb); + xtrc(1, "BIN Close: closing %s rc=%d\n", To_File, rc); + Stream = NULL; // So we can know whether table is open } // end of CloseTableFile /***********************************************************************/ /* Rewind routine for BIN access method. */ /***********************************************************************/ -void BINFAM::Rewind(void) { - if (BStream) // Can be NULL when making index on void table - rewind(BStream); +void BINFAM::Rewind(void) +{ + if (Stream) // Can be NULL when making index on void table + rewind(Stream); Rows = 0; OldBlk = CurBlk = -1; diff --git a/storage/connect/filamtxt.h b/storage/connect/filamtxt.h index 8c1fe5e7dbc..e5067b5a3e0 100644 --- a/storage/connect/filamtxt.h +++ b/storage/connect/filamtxt.h @@ -215,16 +215,16 @@ class DllExport BLKFAM : public DOSFAM { /* This is the DOS/UNIX Access Method class declaration for binary */ /* files with variable record format (BJSON) */ /***********************************************************************/ -class DllExport BINFAM : public TXTFAM { +class DllExport BINFAM : public DOSFAM { public: // Constructor - BINFAM(PDOSDEF tdp) : TXTFAM(tdp) {BStream = NULL; Recsize = 0;} - BINFAM(PBINFAM txfp) : TXTFAM(txfp) {BStream = txfp->BStream;} + BINFAM(PDOSDEF tdp) : DOSFAM(tdp) {Recsize = 0;} + BINFAM(PBINFAM txfp) : DOSFAM(txfp) {Recsize = txfp->Recsize;} // Implementation virtual AMT GetAmType(void) {return TYPE_AM_BIN;} - virtual int GetPos(void); - virtual int GetNextPos(void); +//virtual int GetPos(void); +//virtual int GetNextPos(void); virtual PTXF Duplicate(PGLOBAL g) { return (PTXF)new(g) BINFAM(this); } // Methods @@ -233,23 +233,22 @@ public: virtual int Cardinality(PGLOBAL g); virtual int MaxBlkSize(PGLOBAL g, int s) {return s;} virtual bool AllocateBuffer(PGLOBAL g); - virtual int GetRowID(void); - virtual bool RecordPos(PGLOBAL g); - virtual bool SetPos(PGLOBAL g, int recpos); +//virtual int GetRowID(void); +//virtual bool RecordPos(PGLOBAL g); +//virtual bool SetPos(PGLOBAL g, int recpos); virtual int SkipRecord(PGLOBAL g, bool header) {return 0;} - virtual bool OpenTableFile(PGLOBAL g); +//virtual bool OpenTableFile(PGLOBAL g); virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); - virtual int DeleteRecords(PGLOBAL g, int irc) {return RC_FX;} +//virtual int DeleteRecords(PGLOBAL g, int irc); virtual void CloseTableFile(PGLOBAL g, bool abort); virtual void Rewind(void); -protected: +//protected: //virtual int InitDelete(PGLOBAL g, int fpos, int spos); // Members - FILE *BStream; // Points to Bin file structure - size_t Recsize; // Length of last read record + size_t Recsize; // Length of last read or next written record }; // end of class BINFAM #endif // __FILAMTXT_H diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp index 90a49aac1d5..c1647604b63 100644 --- a/storage/connect/tabbson.cpp +++ b/storage/connect/tabbson.cpp @@ -1012,7 +1012,7 @@ PBVAL BCUTIL::GetRow(PGLOBAL g) PBVAL nwr, row = Tp->Row; for (int i = 0; i < nod && row; i++) { - if (nodes[i + 1].Op == OP_XX) + if (i < nod-1 && nodes[i+1].Op == OP_XX) break; else switch (row->Type) { case TYPE_JOB: @@ -1411,29 +1411,31 @@ int TDBBSN::EstimatedLength(void) /***********************************************************************/ bool TDBBSN::OpenDB(PGLOBAL g) { + TUSE use = Use; + + if (Pretty < 0 && Mode == MODE_UPDATE) { + sprintf(g->Message, "Mode %d NIY for Bjson", Mode); + return true; + } // endif Mode + if (Use == USE_OPEN) { /*******************************************************************/ - /* Table already open replace it at its beginning. */ + /* Table already open replace it at its beginning. ??? */ /*******************************************************************/ Fpos = -1; NextSame = 0; SameRow = 0; - } else { - /*******************************************************************/ - /* First opening. */ - /*******************************************************************/ - if (Mode == MODE_INSERT) - switch (Jmode) { - case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break; - case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break; - case MODE_VALUE: Row = Bp->NewVal(TYPE_JVAL); break; - default: - sprintf(g->Message, "Invalid Jmode %d", Jmode); - return true; - } // endswitch Jmode - } // endif Use + /*********************************************************************/ + /* Open according to logical input/output mode required. */ + /*********************************************************************/ + if (TDBDOS::OpenDB(g)) + return true; + + if (use == USE_OPEN) + return false; + if (Pretty < 0) { /*******************************************************************/ /* Binary BJSON table. */ @@ -1441,45 +1443,45 @@ bool TDBBSN::OpenDB(PGLOBAL g) xtrc(1, "JSN OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n", this, Tdb_No, Use, Mode); - if (Use == USE_OPEN) { - /*******************************************************************/ - /* Table already open, just replace it at its beginning. */ - /*******************************************************************/ - if (!To_Kindex) { - Txfp->Rewind(); // see comment in Work.log - } else // Table is to be accessed through a sorted index table - To_Kindex->Reset(); // TODO: NIY - - return false; - } // endif use - /*********************************************************************/ - /* Open according to logical input/output mode required. */ - /* Use conventionnal input/output functions. */ - /*********************************************************************/ - if (Txfp->OpenTableFile(g)) - return true; - - Use = USE_OPEN; // Do it now in case we are recursively called - - /*********************************************************************/ - /* Lrecl is Ok. */ + /* Lrecl is Ok. */ /*********************************************************************/ size_t linelen = Lrecl; - // Buffer should be the first allocated thing in G->Sarea + // Buffer must be set to G->Sarea Txfp->AllocateBuffer(Bp->G); + + if (Mode == MODE_INSERT) + Bp->SubSet(true); + else + Bp->MemSave(); + To_Line = Txfp->GetBuf(); memset(To_Line, 0, linelen); - Bp->MemSave(); xtrc(1, "OpenJSN: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line); - } else if (TDBDOS::OpenDB(g)) - return true; + } // endif Pretty + + /***********************************************************************/ + /* First opening. */ + /***********************************************************************/ + if (Mode == MODE_INSERT) { + switch (Jmode) { + case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break; + case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break; + case MODE_VALUE: Row = Bp->NewVal(TYPE_JVAL); break; + default: + sprintf(g->Message, "Invalid Jmode %d", Jmode); + return true; + } // endswitch Jmode + + Bp->MemSave(); + } // endif Mode if (Xcol) To_Filter = NULL; // Imcompatible return false; + } // end of OpenDB /***********************************************************************/ @@ -1564,26 +1566,30 @@ int TDBBSN::ReadDB(PGLOBAL g) /***********************************************************************/ bool TDBBSN::PrepareWriting(PGLOBAL g) { - PSZ s; + if (Pretty >= 0) { + PSZ s; - if (!(Top = Bp->MakeTopTree(g, Row))) - return true; + if (!(Top = Bp->MakeTopTree(g, Row))) + return true; - if ((s = Bp->SerialVal(g, Top, Pretty))) { - if (Comma) - strcat(s, ","); + if ((s = Bp->SerialVal(g, Top, Pretty))) { + if (Comma) + strcat(s, ","); - if ((signed)strlen(s) > Lrecl) { - strncpy(To_Line, s, Lrecl); - sprintf(g->Message, "Line truncated (lrecl=%d)", Lrecl); - return PushWarning(g, this); + if ((signed)strlen(s) > Lrecl) { + strncpy(To_Line, s, Lrecl); + sprintf(g->Message, "Line truncated (lrecl=%d)", Lrecl); + return PushWarning(g, this); + } else + strcpy(To_Line, s); + + return false; } else - strcpy(To_Line, s); - - return false; + return true; } else - return true; - + ((BINFAM*)Txfp)->Recsize = ((size_t)PlugSubAlloc(Bp->G, NULL, 0) + - (size_t)To_Line); + return false; } // end of PrepareWriting /***********************************************************************/ @@ -2034,6 +2040,7 @@ void BSONCOL::WriteColumn(PGLOBAL g) else Cp->AddArrayValue(row, jsp); + break; case TYPE_JOB: if (Nodes[Nod - 1].Key) Cp->SetKeyValue(row, jsp, Nodes[Nod - 1].Key); diff --git a/storage/connect/tabdos.cpp b/storage/connect/tabdos.cpp index a2b5204cd0a..8c57157f5a9 100644 --- a/storage/connect/tabdos.cpp +++ b/storage/connect/tabdos.cpp @@ -2148,6 +2148,9 @@ bool TDBDOS::OpenDB(PGLOBAL g) } // endif use if (Mode == MODE_DELETE && !Next && Txfp->GetAmType() != TYPE_AM_DOS +#if defined(BSON_SUPPORT) + && Txfp->GetAmType() != TYPE_AM_BIN +#endif // BSON_SUPPORT && Txfp->GetAmType() != TYPE_AM_MGO) { // Delete all lines. Not handled in MAP or block mode Txfp = new(g) DOSFAM((PDOSDEF)To_Def); From ceacffbb3b9504c88d0649d472396fa42397a62c Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 15 Dec 2020 12:28:03 +0100 Subject: [PATCH 039/150] - Fix pretty=2 Tabjson bug on INSERT. Occuring when inserting more than one line in one statement. modified: storage/connect/json.cpp - Fix a wrong if statement modified: storage/connect/tabjson.cpp - Continue BSON implementation modified: storage/connect/bson.cpp modified: storage/connect/bson.h modified: storage/connect/filamtxt.cpp modified: storage/connect/filamtxt.h modified: storage/connect/tabbson.cpp modified: storage/connect/tabbson.h - No need anymore deleted: storage/connect/mysql-test/connect/r/bson.result deleted: storage/connect/mysql-test/connect/t/bson.test --- storage/connect/bson.cpp | 33 +- storage/connect/bson.h | 1 + storage/connect/filamtxt.cpp | 104 +++- storage/connect/filamtxt.h | 10 +- storage/connect/json.cpp | 4 +- .../connect/mysql-test/connect/r/bson.result | 517 ------------------ .../connect/mysql-test/connect/t/bson.test | 294 ---------- storage/connect/tabbson.cpp | 127 +++-- storage/connect/tabbson.h | 5 +- storage/connect/tabjson.cpp | 2 +- 10 files changed, 189 insertions(+), 908 deletions(-) delete mode 100644 storage/connect/mysql-test/connect/r/bson.result delete mode 100644 storage/connect/mysql-test/connect/t/bson.test diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index a291dd69df6..08f4dca46f2 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -872,7 +872,18 @@ void BJSON::SubSet(bool b) } // end of SubSet -/* ------------------------ Bobject functions ------------------------ */ +/*********************************************************************************/ +/* Set the beginning of suballocations. */ +/*********************************************************************************/ +void BJSON::MemSet(size_t size) +{ + PPOOLHEADER pph = (PPOOLHEADER)G->Sarea; + + pph->To_Free = size + sizeof(POOLHEADER); + pph->FreeBlk = G->Sarea_Size - pph->To_Free; +} // end of MemSet + + /* ------------------------ Bobject functions ------------------------ */ /***********************************************************************/ /* Sub-allocate and initialize a BPAIR. */ @@ -1187,18 +1198,17 @@ void BJSON::MergeArray(PBVAL bap1, PBVAL bap2) void BJSON::SetArrayValue(PBVAL bap, PBVAL nvp, int n) { CheckType(bap, TYPE_JAR); + int i = 0; PBVAL bvp = NULL, pvp = NULL; - if (bap->To_Val) { - for (int i = 0; bvp = GetArray(bap); i++, bvp = GetNext(bvp)) + if (bap->To_Val) + for (bvp = GetArray(bap); bvp; i++, bvp = GetNext(bvp)) if (i == n) { SetValueVal(bvp, nvp); return; } else pvp = bvp; - } // endif bap - if (!bvp) AddArrayValue(bap, MOF(nvp)); @@ -1264,7 +1274,8 @@ void BJSON::DeleteValue(PBVAL bap, int n) bap->Nd--; break; - } // endif i + } else + pvp = bvp; } // end of DeleteValue @@ -1587,17 +1598,17 @@ PBVAL BJSON::SetValue(PBVAL vlp, PVAL valp) } else switch (valp->GetType()) { case TYPE_DATE: if (((DTVAL*)valp)->IsFormatted()) - vlp->To_Val = MOF(PlugDup(G, valp->GetCharValue())); + vlp->To_Val = DupStr(valp->GetCharValue()); else { char buf[32]; - vlp->To_Val = MOF(PlugDup(G, valp->GetCharString(buf))); + vlp->To_Val = DupStr(valp->GetCharString(buf)); } // endif Formatted vlp->Type = TYPE_DTM; break; case TYPE_STRING: - vlp->To_Val = MOF(PlugDup(G, valp->GetCharValue())); + vlp->To_Val = DupStr(valp->GetCharValue()); vlp->Type = TYPE_STRG; break; case TYPE_DOUBLE: @@ -1608,7 +1619,7 @@ PBVAL BJSON::SetValue(PBVAL vlp, PVAL valp) vlp->F = (float)valp->GetFloatValue(); vlp->Type = TYPE_FLOAT; } else { - double *dp = (double*)PlugSubAlloc(G, NULL, sizeof(double)); + double *dp = (double*)BsonSubAlloc(sizeof(double)); *dp = valp->GetFloatValue(); vlp->To_Val = MOF(dp); @@ -1629,7 +1640,7 @@ PBVAL BJSON::SetValue(PBVAL vlp, PVAL valp) vlp->N = valp->GetIntValue(); vlp->Type = TYPE_INTG; } else { - longlong* llp = (longlong*)PlugSubAlloc(G, NULL, sizeof(longlong)); + longlong* llp = (longlong*)BsonSubAlloc(sizeof(longlong)); *llp = valp->GetBigintValue(); vlp->To_Val = MOF(llp); diff --git a/storage/connect/bson.h b/storage/connect/bson.h index 402981befaa..435e355d249 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -76,6 +76,7 @@ public: void* GetBase(void) { return Base; } void SubSet(bool b = false); void MemSave(void) {G->Saved_Size = ((PPOOLHEADER)G->Sarea)->To_Free;} + void MemSet(size_t size); void GetMsg(PGLOBAL g) { if (g != G) strcpy(g->Message, G->Message); } // SubAlloc functions diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index ef6d3ecafca..35f1102cf5d 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -805,8 +805,8 @@ int DOSFAM::ReadBuffer(PGLOBAL g) Placed = false; if (trace(2)) - htrc(" About to read: stream=%p To_Buf=%p Buflen=%d\n", - Stream, To_Buf, Buflen); + htrc(" About to read: stream=%p To_Buf=%p Buflen=%d Fpos=%d\n", + Stream, To_Buf, Buflen, Fpos); if (fgets(To_Buf, Buflen, Stream)) { p = To_Buf + strlen(To_Buf) - 1; @@ -1665,6 +1665,7 @@ void BLKFAM::Rewind(void) /* --------------------------- Class BINFAM -------------------------- */ +#if 0 /***********************************************************************/ /* BIN GetFileLength: returns file size in number of bytes. */ /***********************************************************************/ @@ -1692,7 +1693,6 @@ int BINFAM::Cardinality(PGLOBAL g) return (g) ? -1 : 0; } // end of Cardinality -#if 0 /***********************************************************************/ /* OpenTableFile: Open a DOS/UNIX table file using C standard I/Os. */ /***********************************************************************/ @@ -1742,16 +1742,34 @@ bool BINFAM::OpenTableFile(PGLOBAL g) { /***********************************************************************/ bool BINFAM::AllocateBuffer(PGLOBAL g) { - MODE mode = Tdbp->GetMode(); + MODE mode = Tdbp->GetMode(); - // Lrecl is Ok - Buflen = Lrecl; + // Lrecl is Ok + Buflen = Lrecl; - if (trace(1)) - htrc("SubAllocating a buffer of %d bytes\n", Buflen); + // Buffer will be allocated separately + if (mode == MODE_ANY) { + xtrc(1, "SubAllocating a buffer of %d bytes\n", Buflen); + To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen); + } else if (UseTemp || mode == MODE_DELETE) { + // Have a big buffer to move lines + Dbflen = Buflen * DOS_BUFF_LEN; + DelBuf = PlugSubAlloc(g, NULL, Dbflen); + } // endif mode - To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen); - return false; + return false; +#if 0 + MODE mode = Tdbp->GetMode(); + + // Lrecl is Ok + Dbflen = Buflen = Lrecl; + + if (trace(1)) + htrc("SubAllocating a buffer of %d bytes\n", Buflen); + + DelBuf = To_Buf = (char*)PlugSubAlloc(g, NULL, Buflen); + return false; +#endif // 0 } // end of AllocateBuffer #if 0 @@ -1830,8 +1848,8 @@ int BINFAM::ReadBuffer(PGLOBAL g) } else Placed = false; - xtrc(2, " About to read: bstream=%p To_Buf=%p Buflen=%d\n", - Stream, To_Buf, Buflen); + xtrc(2, " About to read: bstream=%p To_Buf=%p Buflen=%d Fpos=%d\n", + Stream, To_Buf, Buflen, Fpos); // Read the prefix giving the row length if (!fread(&Recsize, sizeof(size_t), 1, Stream)) { @@ -1848,7 +1866,6 @@ int BINFAM::ReadBuffer(PGLOBAL g) if (fread(To_Buf, Recsize, 1, Stream)) { xtrc(2, " Read: To_Buf=%p Recsize=%zd\n", To_Buf, Recsize); - // memcpy(Tdbp->GetLine(), To_Buf, Recsize); num_read++; rc = RC_OK; } else if (feof(Stream)) { @@ -1876,7 +1893,51 @@ int BINFAM::WriteBuffer(PGLOBAL g) int curpos = 0; bool moved = true; - /*********************************************************************/ + // T_Stream is the temporary stream or the table file stream itself + if (!T_Stream) { + if (UseTemp && Tdbp->GetMode() == MODE_UPDATE) { + if (OpenTempFile(g)) + return RC_FX; + + } else + T_Stream = Stream; + + } // endif T_Stream + + if (Tdbp->GetMode() == MODE_UPDATE) { + /*******************************************************************/ + /* Here we simply rewrite a record on itself. There are two cases */ + /* were another method should be used, a/ when Update apply to */ + /* the whole file, b/ when updating the last field of a variable */ + /* length file. The method could be to rewrite a new file, then */ + /* to erase the old one and rename the new updated file. */ + /*******************************************************************/ + curpos = ftell(Stream); + + if (trace(1)) + htrc("Last : %d cur: %d\n", Fpos, curpos); + + if (UseTemp) { + /*****************************************************************/ + /* We are using a temporary file. */ + /* Before writing the updated record, we must eventually copy */ + /* all the intermediate records that have not been updated. */ + /*****************************************************************/ + if (MoveIntermediateLines(g, &moved)) + return RC_FX; + + Spos = curpos; // New start position + } else + // Update is directly written back into the file, + // with this (fast) method, record size cannot change. + if (fseek(Stream, Fpos, SEEK_SET)) { + sprintf(g->Message, MSG(FSETPOS_ERROR), 0); + return RC_FX; + } // endif + + } // endif mode + + /*********************************************************************/ /* Prepare writing the line. */ /*********************************************************************/ //memcpy(To_Buf, Tdbp->GetLine(), Recsize); @@ -1884,17 +1945,23 @@ int BINFAM::WriteBuffer(PGLOBAL g) /*********************************************************************/ /* Now start the writing process. */ /*********************************************************************/ - if (fwrite(&Recsize, sizeof(size_t), 1, Stream) != 1) { + if (fwrite(&Recsize, sizeof(size_t), 1, T_Stream) != 1) { sprintf(g->Message, "Error %d writing prefix to %s", errno, To_File); return RC_FX; - } else if (fwrite(To_Buf, Recsize, 1, Stream) != 1) { + } else if (fwrite(To_Buf, Recsize, 1, T_Stream) != 1) { sprintf(g->Message, "Error %d writing %zd bytes to %s", errno, Recsize, To_File); return RC_FX; } // endif fwrite - xtrc(1, "write done\n"); + if (Tdbp->GetMode() == MODE_UPDATE && moved) + if (fseek(Stream, curpos, SEEK_SET)) { + sprintf(g->Message, MSG(FSEEK_ERROR), strerror(errno)); + return RC_FX; + } // endif + + xtrc(1, "Binary write done\n"); return RC_OK; } // end of WriteBuffer @@ -2023,7 +2090,6 @@ int DOSFAM::DeleteRecords(PGLOBAL g, int irc) return RC_OK; // All is correct } // end of DeleteRecords -#endif // 0 /***********************************************************************/ /* Table file close routine for DOS access method. */ @@ -2049,4 +2115,4 @@ void BINFAM::Rewind(void) Rows = 0; OldBlk = CurBlk = -1; } // end of Rewind - +#endif // 0 diff --git a/storage/connect/filamtxt.h b/storage/connect/filamtxt.h index e5067b5a3e0..353e06ad3bd 100644 --- a/storage/connect/filamtxt.h +++ b/storage/connect/filamtxt.h @@ -229,20 +229,20 @@ public: // Methods //virtual void Reset(void) {TXTFAM::Reset();} - virtual int GetFileLength(PGLOBAL g); - virtual int Cardinality(PGLOBAL g); +//virtual int GetFileLength(PGLOBAL g); +//virtual int Cardinality(PGLOBAL g); virtual int MaxBlkSize(PGLOBAL g, int s) {return s;} virtual bool AllocateBuffer(PGLOBAL g); //virtual int GetRowID(void); //virtual bool RecordPos(PGLOBAL g); //virtual bool SetPos(PGLOBAL g, int recpos); - virtual int SkipRecord(PGLOBAL g, bool header) {return 0;} + virtual int SkipRecord(PGLOBAL g, bool header) {return RC_OK;} //virtual bool OpenTableFile(PGLOBAL g); virtual int ReadBuffer(PGLOBAL g); virtual int WriteBuffer(PGLOBAL g); //virtual int DeleteRecords(PGLOBAL g, int irc); - virtual void CloseTableFile(PGLOBAL g, bool abort); - virtual void Rewind(void); +//virtual void CloseTableFile(PGLOBAL g, bool abort); +//virtual void Rewind(void); //protected: //virtual int InitDelete(PGLOBAL g, int fpos, int spos); diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index bf7ff7170ff..7c1748e0fde 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -1665,7 +1665,7 @@ void JVALUE::SetValue(PGLOBAL g, PVAL valp) } else switch (valp->GetType()) { case TYPE_DATE: if (((DTVAL*)valp)->IsFormatted()) - Strp = valp->GetCharValue(); + Strp = PlugDup(g, valp->GetCharValue()); else { char buf[32]; @@ -1675,7 +1675,7 @@ void JVALUE::SetValue(PGLOBAL g, PVAL valp) DataType = TYPE_DTM; break; case TYPE_STRING: - Strp = valp->GetCharValue(); + Strp = PlugDup(g, valp->GetCharValue()); DataType = TYPE_STRG; break; case TYPE_DOUBLE: diff --git a/storage/connect/mysql-test/connect/r/bson.result b/storage/connect/mysql-test/connect/r/bson.result deleted file mode 100644 index fd15e020aac..00000000000 --- a/storage/connect/mysql-test/connect/r/bson.result +++ /dev/null @@ -1,517 +0,0 @@ -# -# Testing doc samples -# -CREATE TABLE t1 -( -ISBN CHAR(15), -LANG CHAR(2), -SUBJECT CHAR(32), -AUTHOR CHAR(64), -TITLE CHAR(32), -TRANSLATION CHAR(32), -TRANSLATOR CHAR(80), -PUBLISHER CHAR(32), -DATEPUB int(4) -) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; -SELECT * FROM t1; -ISBN LANG SUBJECT AUTHOR TITLE TRANSLATION TRANSLATOR PUBLISHER DATEPUB -9782212090819 fr applications Jean-Christophe Bernadac, Franois Knab Construire une application XML NULL NULL Eyrolles Paris 1999 -9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 -DROP TABLE t1; -# -# Testing Jpath. Get the number of authors -# -CREATE TABLE t1 -( -ISBN CHAR(15), -Language CHAR(2) JPATH='$.LANG', -Subject CHAR(32) JPATH='$.SUBJECT', -Authors INT(2) JPATH='$.AUTHOR[#]', -Title CHAR(32) JPATH='$.TITLE', -Translation CHAR(32) JPATH='$.TRANSLATION', -Translator CHAR(80) JPATH='$.TRANSLATOR', -Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', -Location CHAR(16) JPATH='$.PUBLISHER.PLACE', -Year int(4) JPATH='$.DATEPUB' -) -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; -SELECT * FROM t1; -ISBN Language Subject Authors Title Translation Translator Publisher Location Year -9782212090819 fr applications 2 Construire une application XML NULL NULL Eyrolles Paris 1999 -9782840825685 fr applications 1 XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 -DROP TABLE t1; -# -# Concatenates the authors -# -CREATE TABLE t1 -( -ISBN CHAR(15), -Language CHAR(2) JPATH='$.LANG', -Subject CHAR(32) JPATH='$.SUBJECT', -AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME', -AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME', -Title CHAR(32) JPATH='$.TITLE', -Translation CHAR(32) JPATH='$.TRANSLATION', -Translator CHAR(80) JPATH='$.TRANSLATOR', -Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', -Location CHAR(16) JPATH='$.PUBLISHER.PLACE', -Year int(4) JPATH='$.DATEPUB' -) -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; -SELECT * FROM t1; -ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year -9782212090819 fr applications Jean-Christophe and Franois Bernadac and Knab Construire une application XML NULL NULL Eyrolles Paris 1999 -9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 -DROP TABLE t1; -# -# Testing expanding authors -# -CREATE TABLE t1 -( -ISBN CHAR(15), -Language CHAR(2) JPATH='$.LANG', -Subject CHAR(32) JPATH='$.SUBJECT', -AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', -AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', -Title CHAR(32) JPATH='$.TITLE', -Translation CHAR(32) JPATH='$.TRANSLATION', -Translator CHAR(80) JPATH='$.TRANSLATOR', -Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', -Location CHAR(16) JPATH='$.PUBLISHER.PLACE', -Year int(4) JPATH='$.DATEPUB' -) -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; -SELECT * FROM t1; -ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year -9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 -9782212090819 fr applications Franois Knab Construire une application XML NULL NULL Eyrolles Paris 1999 -9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 -UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab'; -SELECT * FROM t1 WHERE ISBN = '9782212090819'; -ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year -9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 -9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999 -# -# To add an author a new table must be created -# -CREATE TABLE t2 ( -FIRSTNAME CHAR(32), -LASTNAME CHAR(32)) -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json' OPTION_LIST='Object=$[1].AUTHOR'; -SELECT * FROM t2; -FIRSTNAME LASTNAME -William J. Pardi -INSERT INTO t2 VALUES('Charles','Dickens'); -SELECT * FROM t1; -ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year -9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 -9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999 -9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 -9782840825685 fr applications Charles Dickens XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 -DROP TABLE t1; -DROP TABLE t2; -# -# Check the biblio file has the good format -# -CREATE TABLE t1 -( -line char(255) -) -ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json'; -SELECT * FROM t1; -line -[ - { - "ISBN": "9782212090819", - "LANG": "fr", - "SUBJECT": "applications", - "AUTHOR": [ - { - "FIRSTNAME": "Jean-Christophe", - "LASTNAME": "Bernadac" - }, - { - "FIRSTNAME": "Philippe", - "LASTNAME": "Knab" - } - ], - "TITLE": "Construire une application XML", - "PUBLISHER": { - "NAME": "Eyrolles", - "PLACE": "Paris" - }, - "DATEPUB": 1999 - }, - { - "ISBN": "9782840825685", - "LANG": "fr", - "SUBJECT": "applications", - "AUTHOR": [ - { - "FIRSTNAME": "William J.", - "LASTNAME": "Pardi" - }, - { - "FIRSTNAME": "Charles", - "LASTNAME": "Dickens" - } - ], - "TITLE": "XML en Action", - "TRANSLATION": "adapt de l'anglais par", - "TRANSLATOR": { - "FIRSTNAME": "James", - "LASTNAME": "Guerin" - }, - "PUBLISHER": { - "NAME": "Microsoft Press", - "PLACE": "Paris" - }, - "DATEPUB": 1999 - } -] -DROP TABLE t1; -# -# Testing a pretty=0 file -# -CREATE TABLE t1 -( -ISBN CHAR(15) NOT NULL, -Language CHAR(2) JPATH='$.LANG', -Subject CHAR(32) JPATH='$.SUBJECT', -AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', -AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', -Title CHAR(32) JPATH='$.TITLE', -Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX', -TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME', -TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME', -Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', -Location CHAR(16) JPATH='$.PUBLISHER.PLACE', -Year int(4) JPATH='$.DATEPUB', -INDEX IX(ISBN) -) -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0'; -SHOW INDEX FROM t1; -Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment -t1 1 IX 1 ISBN A NULL NULL NULL XINDEX -SELECT * FROM t1; -ISBN Language Subject AuthorFN AuthorLN Title Translation TranslatorFN TranslatorLN Publisher Location Year -9782212090819 fr applications Jean-Michel Bernadac Construire une application XML NULL NULL NULL Eyrolles Paris 1999 -9782212090819 fr applications Franois Knab Construire une application XML NULL NULL NULL Eyrolles Paris 1999 -9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 2001 -DESCRIBE SELECT * FROM t1 WHERE ISBN = '9782212090819'; -id select_type table type possible_keys key key_len ref rows Extra -1 SIMPLE t1 ref IX IX 15 const 1 Using where -UPDATE t1 SET AuthorFN = 'Philippe' WHERE ISBN = '9782212090819'; -ERROR HY000: Got error 122 'Cannot write expanded column when Pretty is not 2' from CONNECT -DROP TABLE t1; -# -# A file with 2 arrays -# -CREATE TABLE t1 ( -WHO CHAR(12), -WEEK INT(2) JPATH='$.WEEK[*].NUMBER', -WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; -SELECT * FROM t1; -WHO WEEK WHAT AMOUNT -Joe 3 Beer+Food+Food+Car 69.00 -Joe 4 Beer+Beer+Food+Food+Beer 83.00 -Joe 5 Beer+Food 26.00 -Beth 3 Beer 16.00 -Beth 4 Food+Beer 32.00 -Beth 5 Food+Beer 32.00 -Janet 3 Car+Food+Beer 55.00 -Janet 4 Car 17.00 -Janet 5 Beer+Car+Beer+Food 57.00 -DROP TABLE t1; -# -# Now it can be fully expanded -# -CREATE TABLE t1 ( -WHO CHAR(12), -WEEK INT(2) JPATH='$.WEEK[*].NUMBER', -WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; -SELECT * FROM t1; -WHO WEEK WHAT AMOUNT -Joe 3 Beer 18.00 -Joe 3 Food 12.00 -Joe 3 Food 19.00 -Joe 3 Car 20.00 -Joe 4 Beer 19.00 -Joe 4 Beer 16.00 -Joe 4 Food 17.00 -Joe 4 Food 17.00 -Joe 4 Beer 14.00 -Joe 5 Beer 14.00 -Joe 5 Food 12.00 -Beth 3 Beer 16.00 -Beth 4 Food 17.00 -Beth 4 Beer 15.00 -Beth 5 Food 12.00 -Beth 5 Beer 20.00 -Janet 3 Car 19.00 -Janet 3 Food 18.00 -Janet 3 Beer 18.00 -Janet 4 Car 17.00 -Janet 5 Beer 14.00 -Janet 5 Car 12.00 -Janet 5 Beer 19.00 -Janet 5 Food 12.00 -DROP TABLE t1; -# -# A table showing many calculated results -# -CREATE TABLE t1 ( -WHO CHAR(12) NOT NULL, -WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER', -SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT', -SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT', -AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT', -SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT', -AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT', -AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT', -AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; -SELECT * FROM t1; -WHO WEEKS SUMS SUM AVGS SUMAVG AVGSUM AVGAVG AVERAGE -Joe 3, 4, 5 69.00+83.00+26.00 178.00 17.25+16.60+13.00 46.85 59.33 15.62 16.18 -Beth 3, 4, 5 16.00+32.00+32.00 80.00 16.00+16.00+16.00 48.00 26.67 16.00 16.00 -Janet 3, 4, 5 55.00+17.00+57.00 129.00 18.33+17.00+14.25 49.58 43.00 16.53 16.12 -DROP TABLE t1; -# -# Expand expense in 3 one week tables -# -CREATE TABLE t2 ( -WHO CHAR(12), -WEEK INT(2) JPATH='$.WEEK[0].NUMBER', -WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; -SELECT * FROM t2; -WHO WEEK WHAT AMOUNT -Joe 3 Beer 18.00 -Joe 3 Food 12.00 -Joe 3 Food 19.00 -Joe 3 Car 20.00 -Beth 3 Beer 16.00 -Janet 3 Car 19.00 -Janet 3 Food 18.00 -Janet 3 Beer 18.00 -CREATE TABLE t3 ( -WHO CHAR(12), -WEEK INT(2) JPATH='$.WEEK[1].NUMBER', -WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; -SELECT * FROM t3; -WHO WEEK WHAT AMOUNT -Joe 4 Beer 19.00 -Joe 4 Beer 16.00 -Joe 4 Food 17.00 -Joe 4 Food 17.00 -Joe 4 Beer 14.00 -Beth 4 Food 17.00 -Beth 4 Beer 15.00 -Janet 4 Car 17.00 -CREATE TABLE t4 ( -WHO CHAR(12), -WEEK INT(2) JPATH='$.WEEK[2].NUMBER', -WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; -SELECT * FROM t4; -WHO WEEK WHAT AMOUNT -Joe 5 Beer 14.00 -Joe 5 Food 12.00 -Beth 5 Food 12.00 -Beth 5 Beer 20.00 -Janet 5 Beer 14.00 -Janet 5 Car 12.00 -Janet 5 Beer 19.00 -Janet 5 Food 12.00 -# -# The expanded table is made as a TBL table -# -CREATE TABLE t1 ( -WHO CHAR(12), -WEEK INT(2), -WHAT CHAR(32), -AMOUNT DOUBLE(8,2)) -ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4'; -SELECT * FROM t1; -WHO WEEK WHAT AMOUNT -Joe 3 Beer 18.00 -Joe 3 Food 12.00 -Joe 3 Food 19.00 -Joe 3 Car 20.00 -Beth 3 Beer 16.00 -Janet 3 Car 19.00 -Janet 3 Food 18.00 -Janet 3 Beer 18.00 -Joe 4 Beer 19.00 -Joe 4 Beer 16.00 -Joe 4 Food 17.00 -Joe 4 Food 17.00 -Joe 4 Beer 14.00 -Beth 4 Food 17.00 -Beth 4 Beer 15.00 -Janet 4 Car 17.00 -Joe 5 Beer 14.00 -Joe 5 Food 12.00 -Beth 5 Food 12.00 -Beth 5 Beer 20.00 -Janet 5 Beer 14.00 -Janet 5 Car 12.00 -Janet 5 Beer 19.00 -Janet 5 Food 12.00 -DROP TABLE t1, t2, t3, t4; -# -# Three partial JSON tables -# -CREATE TABLE t2 ( -WHO CHAR(12), -WEEK INT(2), -WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp3.json'; -SELECT * FROM t2; -WHO WEEK WHAT AMOUNT -Joe 3 Beer 18.00 -Joe 3 Food 12.00 -Joe 3 Food 19.00 -Joe 3 Car 20.00 -Beth 3 Beer 16.00 -Janet 3 Car 19.00 -Janet 3 Food 18.00 -Janet 3 Beer 18.00 -CREATE TABLE t3 ( -WHO CHAR(12), -WEEK INT(2), -WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp4.json'; -SELECT * FROM t3; -WHO WEEK WHAT AMOUNT -Joe 4 Beer 19.00 -Joe 4 Beer 16.00 -Joe 4 Food 17.00 -Joe 4 Food 17.00 -Joe 4 Beer 14.00 -Beth 4 Food 17.00 -Beth 4 Beer 15.00 -Janet 4 Car 17.00 -CREATE TABLE t4 ( -WHO CHAR(12), -WEEK INT(2), -WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp5.json'; -SELECT * FROM t4; -WHO WEEK WHAT AMOUNT -Joe 5 Beer 14.00 -Joe 5 Food 12.00 -Beth 5 Food 12.00 -Beth 5 Beer 20.00 -Janet 5 Beer 14.00 -Janet 5 Car 12.00 -Janet 5 Beer 19.00 -Janet 5 Food 12.00 -# -# The complete table can be a multiple JSON table -# -CREATE TABLE t1 ( -WHO CHAR(12), -WEEK INT(2), -WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp*.json' MULTIPLE=1; -SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT; -WHO WEEK WHAT AMOUNT -Beth 3 Beer 16.00 -Beth 4 Beer 15.00 -Beth 4 Food 17.00 -Beth 5 Beer 20.00 -Beth 5 Food 12.00 -Janet 3 Beer 18.00 -Janet 3 Car 19.00 -Janet 3 Food 18.00 -Janet 4 Car 17.00 -Janet 5 Beer 14.00 -Janet 5 Beer 19.00 -Janet 5 Car 12.00 -Janet 5 Food 12.00 -Joe 3 Beer 18.00 -Joe 3 Car 20.00 -Joe 3 Food 12.00 -Joe 3 Food 19.00 -Joe 4 Beer 14.00 -Joe 4 Beer 16.00 -Joe 4 Beer 19.00 -Joe 4 Food 17.00 -Joe 4 Food 17.00 -Joe 5 Beer 14.00 -Joe 5 Food 12.00 -DROP TABLE t1; -# -# Or also a partition JSON table -# -CREATE TABLE t1 ( -WHO CHAR(12), -WEEK INT(2), -WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp%s.json'; -ALTER TABLE t1 -PARTITION BY LIST COLUMNS(WEEK) ( -PARTITION `3` VALUES IN(3), -PARTITION `4` VALUES IN(4), -PARTITION `5` VALUES IN(5)); -Warnings: -Warning 1105 Data repartition in 3 is unchecked -Warning 1105 Data repartition in 4 is unchecked -Warning 1105 Data repartition in 5 is unchecked -SHOW WARNINGS; -Level Code Message -Warning 1105 Data repartition in 3 is unchecked -Warning 1105 Data repartition in 4 is unchecked -Warning 1105 Data repartition in 5 is unchecked -SELECT * FROM t1; -WHO WEEK WHAT AMOUNT -Joe 3 Beer 18.00 -Joe 3 Food 12.00 -Joe 3 Food 19.00 -Joe 3 Car 20.00 -Beth 3 Beer 16.00 -Janet 3 Car 19.00 -Janet 3 Food 18.00 -Janet 3 Beer 18.00 -Joe 4 Beer 19.00 -Joe 4 Beer 16.00 -Joe 4 Food 17.00 -Joe 4 Food 17.00 -Joe 4 Beer 14.00 -Beth 4 Food 17.00 -Beth 4 Beer 15.00 -Janet 4 Car 17.00 -Joe 5 Beer 14.00 -Joe 5 Food 12.00 -Beth 5 Food 12.00 -Beth 5 Beer 20.00 -Janet 5 Beer 14.00 -Janet 5 Car 12.00 -Janet 5 Beer 19.00 -Janet 5 Food 12.00 -SELECT * FROM t1 WHERE WEEK = 4; -WHO WEEK WHAT AMOUNT -Joe 4 Beer 19.00 -Joe 4 Beer 16.00 -Joe 4 Food 17.00 -Joe 4 Food 17.00 -Joe 4 Beer 14.00 -Beth 4 Food 17.00 -Beth 4 Beer 15.00 -Janet 4 Car 17.00 -DROP TABLE t1, t2, t3, t4; diff --git a/storage/connect/mysql-test/connect/t/bson.test b/storage/connect/mysql-test/connect/t/bson.test deleted file mode 100644 index ab38cab73fc..00000000000 --- a/storage/connect/mysql-test/connect/t/bson.test +++ /dev/null @@ -1,294 +0,0 @@ ---source include/not_embedded.inc ---source include/have_partition.inc - -let $MYSQLD_DATADIR= `select @@datadir`; - ---copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json ---copy_file $MTR_SUITE_DIR/std_data/bib0.json $MYSQLD_DATADIR/test/bib0.json ---copy_file $MTR_SUITE_DIR/std_data/expense.json $MYSQLD_DATADIR/test/expense.json ---copy_file $MTR_SUITE_DIR/std_data/mulexp3.json $MYSQLD_DATADIR/test/mulexp3.json ---copy_file $MTR_SUITE_DIR/std_data/mulexp4.json $MYSQLD_DATADIR/test/mulexp4.json ---copy_file $MTR_SUITE_DIR/std_data/mulexp5.json $MYSQLD_DATADIR/test/mulexp5.json - ---echo # ---echo # Testing doc samples ---echo # -CREATE TABLE t1 -( - ISBN CHAR(15), - LANG CHAR(2), - SUBJECT CHAR(32), - AUTHOR CHAR(64), - TITLE CHAR(32), - TRANSLATION CHAR(32), - TRANSLATOR CHAR(80), - PUBLISHER CHAR(32), - DATEPUB int(4) -) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; -SELECT * FROM t1; -DROP TABLE t1; - - ---echo # ---echo # Testing Jpath. Get the number of authors ---echo # -CREATE TABLE t1 -( - ISBN CHAR(15), - Language CHAR(2) JPATH='$.LANG', - Subject CHAR(32) JPATH='$.SUBJECT', - Authors INT(2) JPATH='$.AUTHOR[#]', - Title CHAR(32) JPATH='$.TITLE', - Translation CHAR(32) JPATH='$.TRANSLATION', - Translator CHAR(80) JPATH='$.TRANSLATOR', - Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', - Location CHAR(16) JPATH='$.PUBLISHER.PLACE', - Year int(4) JPATH='$.DATEPUB' -) -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; -SELECT * FROM t1; -DROP TABLE t1; - ---echo # ---echo # Concatenates the authors ---echo # -CREATE TABLE t1 -( - ISBN CHAR(15), - Language CHAR(2) JPATH='$.LANG', - Subject CHAR(32) JPATH='$.SUBJECT', - AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME', - AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME', - Title CHAR(32) JPATH='$.TITLE', - Translation CHAR(32) JPATH='$.TRANSLATION', - Translator CHAR(80) JPATH='$.TRANSLATOR', - Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', - Location CHAR(16) JPATH='$.PUBLISHER.PLACE', - Year int(4) JPATH='$.DATEPUB' -) -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; -SELECT * FROM t1; -DROP TABLE t1; - ---echo # ---echo # Testing expanding authors ---echo # -CREATE TABLE t1 -( - ISBN CHAR(15), - Language CHAR(2) JPATH='$.LANG', - Subject CHAR(32) JPATH='$.SUBJECT', - AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', - AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', - Title CHAR(32) JPATH='$.TITLE', - Translation CHAR(32) JPATH='$.TRANSLATION', - Translator CHAR(80) JPATH='$.TRANSLATOR', - Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', - Location CHAR(16) JPATH='$.PUBLISHER.PLACE', - Year int(4) JPATH='$.DATEPUB' -) -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; -SELECT * FROM t1; -UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab'; -SELECT * FROM t1 WHERE ISBN = '9782212090819'; - ---echo # ---echo # To add an author a new table must be created ---echo # -CREATE TABLE t2 ( -FIRSTNAME CHAR(32), -LASTNAME CHAR(32)) -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json' OPTION_LIST='Object=$[1].AUTHOR'; -SELECT * FROM t2; -INSERT INTO t2 VALUES('Charles','Dickens'); -SELECT * FROM t1; -DROP TABLE t1; -DROP TABLE t2; - ---echo # ---echo # Check the biblio file has the good format ---echo # -CREATE TABLE t1 -( - line char(255) -) -ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json'; -SELECT * FROM t1; -DROP TABLE t1; - ---echo # ---echo # Testing a pretty=0 file ---echo # -CREATE TABLE t1 -( - ISBN CHAR(15) NOT NULL, - Language CHAR(2) JPATH='$.LANG', - Subject CHAR(32) JPATH='$.SUBJECT', - AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', - AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', - Title CHAR(32) JPATH='$.TITLE', - Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX', - TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME', - TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME', - Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', - Location CHAR(16) JPATH='$.PUBLISHER.PLACE', - Year int(4) JPATH='$.DATEPUB', - INDEX IX(ISBN) -) -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0'; -SHOW INDEX FROM t1; -SELECT * FROM t1; -DESCRIBE SELECT * FROM t1 WHERE ISBN = '9782212090819'; ---error ER_GET_ERRMSG -UPDATE t1 SET AuthorFN = 'Philippe' WHERE ISBN = '9782212090819'; -DROP TABLE t1; - ---echo # ---echo # A file with 2 arrays ---echo # -CREATE TABLE t1 ( -WHO CHAR(12), -WEEK INT(2) JPATH='$.WEEK[*].NUMBER', -WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; -SELECT * FROM t1; -DROP TABLE t1; - ---echo # ---echo # Now it can be fully expanded ---echo # -CREATE TABLE t1 ( -WHO CHAR(12), -WEEK INT(2) JPATH='$.WEEK[*].NUMBER', -WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; -#--error ER_GET_ERRMSG -SELECT * FROM t1; -DROP TABLE t1; - ---echo # ---echo # A table showing many calculated results ---echo # -CREATE TABLE t1 ( -WHO CHAR(12) NOT NULL, -WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER', -SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT', -SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT', -AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT', -SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT', -AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT', -AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT', -AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; -SELECT * FROM t1; -DROP TABLE t1; - ---echo # ---echo # Expand expense in 3 one week tables ---echo # -CREATE TABLE t2 ( -WHO CHAR(12), -WEEK INT(2) JPATH='$.WEEK[0].NUMBER', -WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; -SELECT * FROM t2; - -CREATE TABLE t3 ( -WHO CHAR(12), -WEEK INT(2) JPATH='$.WEEK[1].NUMBER', -WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; -SELECT * FROM t3; - -CREATE TABLE t4 ( -WHO CHAR(12), -WEEK INT(2) JPATH='$.WEEK[2].NUMBER', -WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; -SELECT * FROM t4; - ---echo # ---echo # The expanded table is made as a TBL table ---echo # -CREATE TABLE t1 ( -WHO CHAR(12), -WEEK INT(2), -WHAT CHAR(32), -AMOUNT DOUBLE(8,2)) -ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4'; -SELECT * FROM t1; -DROP TABLE t1, t2, t3, t4; - ---echo # ---echo # Three partial JSON tables ---echo # -CREATE TABLE t2 ( -WHO CHAR(12), -WEEK INT(2), -WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp3.json'; -SELECT * FROM t2; - -CREATE TABLE t3 ( -WHO CHAR(12), -WEEK INT(2), -WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp4.json'; -SELECT * FROM t3; - -CREATE TABLE t4 ( -WHO CHAR(12), -WEEK INT(2), -WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp5.json'; -SELECT * FROM t4; - ---echo # ---echo # The complete table can be a multiple JSON table ---echo # -CREATE TABLE t1 ( -WHO CHAR(12), -WEEK INT(2), -WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp*.json' MULTIPLE=1; -SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT; -DROP TABLE t1; - ---echo # ---echo # Or also a partition JSON table ---echo # -CREATE TABLE t1 ( -WHO CHAR(12), -WEEK INT(2), -WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', -AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') -ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp%s.json'; -ALTER TABLE t1 -PARTITION BY LIST COLUMNS(WEEK) ( -PARTITION `3` VALUES IN(3), -PARTITION `4` VALUES IN(4), -PARTITION `5` VALUES IN(5)); -SHOW WARNINGS; -SELECT * FROM t1; -SELECT * FROM t1 WHERE WEEK = 4; -DROP TABLE t1, t2, t3, t4; - -# -# Clean up -# ---remove_file $MYSQLD_DATADIR/test/biblio.json ---remove_file $MYSQLD_DATADIR/test/bib0.dnx ---remove_file $MYSQLD_DATADIR/test/bib0.json ---remove_file $MYSQLD_DATADIR/test/expense.json ---remove_file $MYSQLD_DATADIR/test/mulexp3.json ---remove_file $MYSQLD_DATADIR/test/mulexp4.json ---remove_file $MYSQLD_DATADIR/test/mulexp5.json diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp index c1647604b63..454484fc610 100644 --- a/storage/connect/tabbson.cpp +++ b/storage/connect/tabbson.cpp @@ -634,29 +634,28 @@ PBVAL BTUTIL::ParseLine(PGLOBAL g, int *pretty, bool *comma) /***********************************************************************/ /* Make the top tree from the object path. */ /***********************************************************************/ -PBVAL BTUTIL::MakeTopTree(PGLOBAL g, PBVAL jsp) +PBVAL BTUTIL::MakeTopTree(PGLOBAL g, int type) { - PBVAL top = NULL; + PBVAL top = NULL, val = NULL; if (Tp->Objname) { - if (!Tp->Val) { - // Parse and allocate Objname item(s) + if (!Tp->Row) { + // Parse and allocate Objpath item(s) char* p; - char* objpath = PlugDup(g, Tp->Objname); + char *objpath = PlugDup(g, Tp->Objname); int i; PBVAL objp = NULL; PBVAL arp = NULL; - PBVAL val = NULL; for (; objpath; objpath = p) { if ((p = strchr(objpath, Tp->Sep))) *p++ = 0; if (*objpath != '[' && !IsNum(objpath)) { - // objp = new(g) JOBJECT; + objp = NewVal(TYPE_JOB); if (!top) - top = NewVal(TYPE_JOB); + top = objp; if (val) SetValueObj(val, objp); @@ -687,12 +686,12 @@ PBVAL BTUTIL::MakeTopTree(PGLOBAL g, PBVAL jsp) } // endfor p - Tp->Val = val; } // endif Val - SetValueVal(Tp->Val, jsp); + Tp->Row = val; + Tp->Row->Type = type; } else - top = jsp; + top = Tp->Row = NewVal(type); return top; } // end of MakeTopTree @@ -1270,7 +1269,6 @@ TDBBSN::TDBBSN(PGLOBAL g, PBDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) Bp = new(g) BTUTIL(tdp->G, this); Top = NULL; Row = NULL; - Val = NULL; Colp = NULL; if (tdp) { @@ -1306,7 +1304,6 @@ TDBBSN::TDBBSN(TDBBSN* tdbp) : TDBDOS(NULL, tdbp) Bp = tdbp->Bp; Top = tdbp->Top; Row = tdbp->Row; - Val = tdbp->Val; Colp = tdbp->Colp; Jmode = tdbp->Jmode; Objname = tdbp->Objname; @@ -1413,11 +1410,6 @@ bool TDBBSN::OpenDB(PGLOBAL g) { TUSE use = Use; - if (Pretty < 0 && Mode == MODE_UPDATE) { - sprintf(g->Message, "Mode %d NIY for Bjson", Mode); - return true; - } // endif Mode - if (Use == USE_OPEN) { /*******************************************************************/ /* Table already open replace it at its beginning. ??? */ @@ -1437,19 +1429,20 @@ bool TDBBSN::OpenDB(PGLOBAL g) return false; if (Pretty < 0) { - /*******************************************************************/ - /* Binary BJSON table. */ - /*******************************************************************/ + /*********************************************************************/ + /* Binary BJSON table. */ + /*********************************************************************/ xtrc(1, "JSN OpenDB: tdbp=%p tdb=R%d use=%d mode=%d\n", this, Tdb_No, Use, Mode); - /*********************************************************************/ - /* Lrecl is Ok. */ - /*********************************************************************/ + // Lrecl is Ok size_t linelen = Lrecl; + MODE mode = Mode; - // Buffer must be set to G->Sarea + // Buffer must be allocated in G->Sarea + Mode = MODE_ANY; Txfp->AllocateBuffer(Bp->G); + Mode = mode; if (Mode == MODE_INSERT) Bp->SubSet(true); @@ -1461,27 +1454,29 @@ bool TDBBSN::OpenDB(PGLOBAL g) xtrc(1, "OpenJSN: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line); } // endif Pretty - /***********************************************************************/ - /* First opening. */ - /***********************************************************************/ + /***********************************************************************/ + /* First opening. */ + /***********************************************************************/ if (Mode == MODE_INSERT) { + int type; + switch (Jmode) { - case MODE_OBJECT: Row = Bp->NewVal(TYPE_JOB); break; - case MODE_ARRAY: Row = Bp->NewVal(TYPE_JAR); break; - case MODE_VALUE: Row = Bp->NewVal(TYPE_JVAL); break; - default: - sprintf(g->Message, "Invalid Jmode %d", Jmode); - return true; + case MODE_OBJECT: type = TYPE_JOB; break; + case MODE_ARRAY: type = TYPE_JAR; break; + case MODE_VALUE: type = TYPE_JVAL; break; + default: + sprintf(g->Message, "Invalid Jmode %d", Jmode); + return true; } // endswitch Jmode + Top = Bp->MakeTopTree(g, type); Bp->MemSave(); } // endif Mode if (Xcol) - To_Filter = NULL; // Imcompatible + To_Filter = NULL; // Not compatible return false; - } // end of OpenDB /***********************************************************************/ @@ -1534,6 +1529,7 @@ int TDBBSN::ReadDB(PGLOBAL g) Bp->SubSet(); if ((Row = Bp->ParseLine(g, &Pretty, &Comma))) { + Top = Row; Row = Bp->FindRow(g); SameRow = 0; Fpos++; @@ -1545,10 +1541,9 @@ int TDBBSN::ReadDB(PGLOBAL g) } else rc = RC_EF; - } else { - // Here we get a movable Json binary tree - Bp->SubSet(); // Perhaps Useful when updating - Row = (PBVAL)To_Line; + } else { // Here we get a movable Json binary tree + Bp->MemSet(((BINFAM*)Txfp)->Recsize); // Useful when updating + Row = Top = (PBVAL)To_Line; Row = Bp->FindRow(g); SameRow = 0; Fpos++; @@ -1569,8 +1564,8 @@ bool TDBBSN::PrepareWriting(PGLOBAL g) if (Pretty >= 0) { PSZ s; - if (!(Top = Bp->MakeTopTree(g, Row))) - return true; +// if (!(Top = Bp->MakeTopTree(g, Row->Type))) +// return true; if ((s = Bp->SerialVal(g, Top, Pretty))) { if (Comma) @@ -2030,25 +2025,44 @@ void BSONCOL::WriteColumn(PGLOBAL g) throw 666; } // endif jsp + switch (row->Type) { + case TYPE_JAR: + if (Nod > 1 && Nodes[Nod - 2].Op == OP_EQ) + Cp->SetArrayValue(row, jsp, Nodes[Nod - 2].Rank); + else + Cp->AddArrayValue(row, jsp); + + break; + case TYPE_JOB: + if (Nod > 1 && Nodes[Nod - 2].Key) + Cp->SetKeyValue(row, jsp, Nodes[Nod - 2].Key); + + break; + case TYPE_JVAL: + default: + Cp->SetValueVal(row, jsp); + } // endswitch Type + + break; } else jsp = Cp->NewVal(Value); switch (row->Type) { - case TYPE_JAR: - if (Nodes[Nod - 1].Op == OP_EQ) - Cp->SetArrayValue(row, jsp, Nodes[Nod - 1].Rank); - else - Cp->AddArrayValue(row, jsp); + case TYPE_JAR: + if (Nodes[Nod - 1].Op == OP_EQ) + Cp->SetArrayValue(row, jsp, Nodes[Nod - 1].Rank); + else + Cp->AddArrayValue(row, jsp); - break; - case TYPE_JOB: - if (Nodes[Nod - 1].Key) - Cp->SetKeyValue(row, jsp, Nodes[Nod - 1].Key); + break; + case TYPE_JOB: + if (Nodes[Nod - 1].Key) + Cp->SetKeyValue(row, jsp, Nodes[Nod - 1].Key); - break; - case TYPE_JVAL: - default: - Cp->SetValueVal(row, jsp); + break; + case TYPE_JVAL: + default: + Cp->SetValueVal(row, jsp); } // endswitch Type break; @@ -2103,9 +2117,10 @@ int TDBBSON::MakeNewDoc(PGLOBAL g) // Create a void table that will be populated Docp = Bp->NewVal(TYPE_JAR); - if (!(Top = Bp->MakeTopTree(g, Docp))) + if (!(Top = Bp->MakeTopTree(g, TYPE_JAR))) return RC_FX; + Docp = Row; Done = true; return RC_OK; } // end of MakeNewDoc diff --git a/storage/connect/tabbson.h b/storage/connect/tabbson.h index 677bcbfd6e9..5b764b2eabd 100644 --- a/storage/connect/tabbson.h +++ b/storage/connect/tabbson.h @@ -111,7 +111,7 @@ public: // Utility functions PBVAL FindRow(PGLOBAL g); PBVAL ParseLine(PGLOBAL g, int *pretty, bool *comma); - PBVAL MakeTopTree(PGLOBAL g, PBVAL jsp); + PBVAL MakeTopTree(PGLOBAL g, int type); PSZ SerialVal(PGLOBAL g, PBVAL top, int pretty); protected: @@ -196,10 +196,9 @@ protected: //int MakeTopTree(PGLOBAL g, PBVAL jsp); // Members - PBTUT Bp; // The BSUTIL handling class + PBTUT Bp; // The BSUTIL handling class PBVAL Top; // The top JSON tree PBVAL Row; // The current row - PBVAL Val; // The value of the current row PBSCOL Colp; // The multiple column JMODE Jmode; // MODE_OBJECT by default PCSZ Objname; // The table object name diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 429efea56fa..4bddef1940e 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1925,7 +1925,7 @@ PJSON JSONCOL::GetRow(PGLOBAL g) PJSON nwr, row = Tjp->Row; for (int i = 0; i < Nod && row; i++) { - if (Nodes[i+1].Op == OP_XX) + if (i < Nod-1 && Nodes[i+1].Op == OP_XX) break; else switch (row->GetType()) { case TYPE_JOB: From a7867410009ce91e3e710ace0b0d97261170d44a Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Thu, 17 Dec 2020 13:58:13 +0100 Subject: [PATCH 040/150] - Fix crash with JsonContains UDF + BSON development --- storage/connect/bson.cpp | 2 +- storage/connect/jsonudf.cpp | 21 +++++++-------------- storage/connect/jsonudf.h | 6 +++--- storage/connect/tabbson.cpp | 13 ++++++------- 4 files changed, 17 insertions(+), 25 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index 08f4dca46f2..f7d4e5731c5 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -27,7 +27,7 @@ #if defined(_DEBUG) #define CheckType(X,Y) if (!X || X ->Type != Y) throw MSG(VALTYPE_NOMATCH); #else -#define CheckType(V) +#define CheckType(X,Y) #endif #if defined(__WIN__) diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 0909cb96477..bbe7cba28cc 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -4066,17 +4066,14 @@ my_bool jsoncontains_init(UDF_INIT *initid, UDF_ARGS *args, char *message) return JsonInit(initid, args, message, false, reslen, memlen, more); } // end of jsoncontains_init -long long jsoncontains(UDF_INIT *initid, UDF_ARGS *args, char *result, - unsigned long *res_length, char *is_null, char *error) +long long jsoncontains(UDF_INIT *initid, UDF_ARGS *args, char *, char *error) { - char *p __attribute__((unused)), res[256]; - long long n; + char isn, res[256]; unsigned long reslen; - *is_null = 0; - p = jsonlocate(initid, args, res, &reslen, is_null, error); - n = (*is_null) ? 0LL : 1LL; - return n; + isn = 0; + jsonlocate(initid, args, res, &reslen, &isn, error); + return (isn) ? 0LL : 1LL; } // end of jsoncontains void jsoncontains_deinit(UDF_INIT* initid) @@ -4118,8 +4115,7 @@ my_bool jsoncontains_path_init(UDF_INIT *initid, UDF_ARGS *args, char *message) return JsonInit(initid, args, message, true, reslen, memlen, more); } // end of jsoncontains_path_init -long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result, - unsigned long *res_length, char *is_null, char *error) +long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *, char *error) { char *p, *path; long long n; @@ -4130,7 +4126,6 @@ long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result, if (g->N) { if (!g->Activityp) { - *is_null = 1; return 0LL; } else return *(long long*)g->Activityp; @@ -4188,7 +4183,6 @@ long long jsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *result, err: if (g->Mrr) *error = 1; - *is_null = 1; return 0LL; } // end of jsoncontains_path @@ -6528,8 +6522,7 @@ my_bool countin_init(UDF_INIT *initid, UDF_ARGS *args, char *message) return false; } // end of countin_init -long long countin(UDF_INIT *initid, UDF_ARGS *args, char *result, - unsigned long *res_length, char *is_null, char *) +long long countin(UDF_INIT *initid, UDF_ARGS *args, char *is_null, char *) { PSZ str1, str2; char *s; diff --git a/storage/connect/jsonudf.h b/storage/connect/jsonudf.h index d99122aa775..689a02ebbc5 100644 --- a/storage/connect/jsonudf.h +++ b/storage/connect/jsonudf.h @@ -174,7 +174,7 @@ extern "C" { DllExport void jsonget_real_deinit(UDF_INIT*); DllExport my_bool jsoncontains_init(UDF_INIT*, UDF_ARGS*, char*); - DllExport long long jsoncontains(UDF_EXEC_ARGS); + DllExport long long jsoncontains(UDF_INIT*, UDF_ARGS*, char*, char*); DllExport void jsoncontains_deinit(UDF_INIT*); DllExport my_bool jsonlocate_init(UDF_INIT*, UDF_ARGS*, char*); @@ -186,7 +186,7 @@ extern "C" { DllExport void json_locate_all_deinit(UDF_INIT*); DllExport my_bool jsoncontains_path_init(UDF_INIT*, UDF_ARGS*, char*); - DllExport long long jsoncontains_path(UDF_EXEC_ARGS); + DllExport long long jsoncontains_path(UDF_INIT*, UDF_ARGS*, char*, char*); DllExport void jsoncontains_path_deinit(UDF_INIT*); DllExport my_bool json_set_item_init(UDF_INIT*, UDF_ARGS*, char*); @@ -294,7 +294,7 @@ extern "C" { #endif // DEVELOPMENT DllExport my_bool countin_init(UDF_INIT*, UDF_ARGS*, char*); - DllExport long long countin(UDF_EXEC_ARGS); + DllExport long long countin(UDF_INIT*, UDF_ARGS*, char*, char*); } // extern "C" diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp index 454484fc610..d4e5f09138a 100644 --- a/storage/connect/tabbson.cpp +++ b/storage/connect/tabbson.cpp @@ -1161,18 +1161,19 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) USETEMP tmp = UseTemp(); bool map = Mapped && Pretty >= 0 && m != MODE_INSERT && !(tmp != TMP_NO && m == MODE_UPDATE) && - !(tmp == TMP_FORCE && - (m == MODE_UPDATE || m == MODE_DELETE)); + !(tmp == TMP_FORCE && (m == MODE_UPDATE || m == MODE_DELETE)); if (Lrecl) { // Allocate the parse work memory - G = PlugInit(NULL, (size_t)Lrecl * 4); + G = PlugInit(NULL, (size_t)Lrecl * (Pretty < 0 ? 2 : 4)); } else { strcpy(g->Message, "LRECL is not defined"); return NULL; } // endif Lrecl - if (Uri) { + if (Pretty < 0) { // BJsonfile + txfp = new(g) BINFAM(this); + } else if (Uri) { if (Driver && toupper(*Driver) == 'C') { #if defined(CMGO_SUPPORT) txfp = new(g) CMGFAM(this); @@ -1222,10 +1223,8 @@ PTDB BSONDEF::GetTable(PGLOBAL g, MODE m) sprintf(g->Message, MSG(NO_FEAT_SUPPORT), "GZ"); return NULL; #endif // !GZ_SUPPORT - } else if (map) + } else if (map) { txfp = new(g) MAPFAM(this); - else if (Pretty < 0) { // BJsonfile - txfp = new(g) BINFAM(this); } else txfp = new(g) DOSFAM(this); From 24c18ce8926105d77ebff2d63611af440aaa8bee Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 18 Dec 2020 18:59:52 +0100 Subject: [PATCH 041/150] - Fix json parser (void objects not recognized) modified: json.cpp --- storage/connect/ha_connect.cc | 23 +++++++++++++---------- storage/connect/json.cpp | 4 ++-- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 65c3ea5c5d6..cf3a8866ff0 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -170,7 +170,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.07.0002 December 12, 2020"; + char version[]= "Version 1.07.0002 December 18, 2020"; #if defined(__WIN__) char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__; char slash= '\\'; @@ -1070,12 +1070,12 @@ static PGLOBAL GetPlug(THD *thd, PCONNECT& lxp) /****************************************************************************/ TABTYPE ha_connect::GetRealType(PTOS pos) { - TABTYPE type; + TABTYPE type= TAB_UNDEF; if (pos || (pos= GetTableOptionStruct())) { type= GetTypeID(pos->type); - if (type == TAB_UNDEF) + if (type == TAB_UNDEF && !pos->http) type= pos->srcdef ? TAB_MYSQL : pos->tabname ? TAB_PRX : TAB_DOS; #if defined(REST_SUPPORT) else if (pos->http) @@ -1083,7 +1083,8 @@ TABTYPE ha_connect::GetRealType(PTOS pos) case TAB_JSON: case TAB_XML: case TAB_CSV: - type = TAB_REST; + case TAB_UNDEF: + type = TAB_REST; break; case TAB_REST: type = TAB_NIY; @@ -1093,8 +1094,7 @@ TABTYPE ha_connect::GetRealType(PTOS pos) } // endswitch type #endif // REST_SUPPORT - } else - type= TAB_UNDEF; + } // endif pos return type; } // end of GetRealType @@ -5690,7 +5690,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, try { // Check table type - if (ttp == TAB_UNDEF) { + if (ttp == TAB_UNDEF && !topt->http) { topt->type= (src) ? "MYSQL" : (tab) ? "PROXY" : "DOS"; ttp= GetTypeID(topt->type); sprintf(g->Message, "No table_type. Was set to %s", topt->type); @@ -5708,7 +5708,8 @@ static int connect_assisted_discovery(handlerton *, THD* thd, #endif // BSON_SUPPORT case TAB_XML: case TAB_CSV: - ttp = TAB_REST; + case TAB_UNDEF: + ttp = TAB_REST; break; default: break; @@ -6131,8 +6132,10 @@ static int connect_assisted_discovery(handlerton *, THD* thd, } // endif !nblin // Restore language type - if (ttp == TAB_REST) - ttp = GetTypeID(topt->type); + if (ttp == TAB_REST) { + ttp = GetTypeID(topt->type); + ttp = (ttp == TAB_UNDEF) ? TAB_JSON : ttp; + } // endif ttp for (i= 0; !rc && i < qrp->Nblin; i++) { typ= len= prec= dec= flg= 0; diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index 7c1748e0fde..bcbd71b5031 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -554,7 +554,7 @@ PJAR JDOC::ParseArray(PGLOBAL g, int& i) PJOB JDOC::ParseObject(PGLOBAL g, int& i) { PSZ key; - int level = 0; + int level = -1; PJOB jobp = new(g) JOBJECT; PJPR jpp = NULL; @@ -590,7 +590,7 @@ PJOB JDOC::ParseObject(PGLOBAL g, int& i) break; case '}': - if (level < 2) { + if (level == 0 || level == 1) { sprintf(g->Message, "Unexpected '}' near %.*s", ARGS); throw 2; } // endif level From 2113cab7ec808721d3492870f094d681842e7274 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 22 Dec 2020 22:50:12 +0100 Subject: [PATCH 042/150] Make REST tables default file name. Commit before continuing BSON development --- storage/connect/bson.cpp | 33 +++++++++++++++------------------ storage/connect/bson.h | 11 +++++++++-- storage/connect/ha_connect.cc | 21 ++++++++++++++------- storage/connect/tabbson.cpp | 12 ++++++++---- storage/connect/tabbson.h | 2 +- storage/connect/tabrest.cpp | 10 ++++++++++ 6 files changed, 57 insertions(+), 32 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index f7d4e5731c5..df95bd4c9c8 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -101,17 +101,19 @@ BDOC::BDOC(PGLOBAL G) : BJSON(G, NULL) jp = NULL; s = NULL; len = 0; + pretty = 3; pty[0] = pty[1] = pty[2] = true; + comma = false; } // end of BDOC constructor /***********************************************************************/ /* Parse a json string. */ /* Note: when pretty is not known, the caller set pretty to 3. */ /***********************************************************************/ -PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) +PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng) { - int i, pretty = (ptyp) ? *ptyp : 3; - bool b = false; + int i; + bool b = false, ptyp = (bool *)pty; PBVAL bvp = NULL; s = js; @@ -121,8 +123,7 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) if (!s || !len) { strcpy(g->Message, "Void JSON object"); return NULL; - } else if (comma) - *comma = false; + } // endif s // Trying to guess the pretty format if (s[0] == '[' && (s[1] == '\n' || (s[1] == '\r' && s[2] == '\n'))) @@ -136,7 +137,7 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) switch (s[i]) { case '[': if (bvp->Type != TYPE_UNKNOWN) - bvp->To_Val = ParseAsArray(i, pretty, ptyp); + bvp->To_Val = ParseAsArray(i); else bvp->To_Val = ParseArray(++i); @@ -144,7 +145,7 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) break; case '{': if (bvp->Type != TYPE_UNKNOWN) { - bvp->To_Val = ParseAsArray(i, pretty, ptyp); + bvp->To_Val = ParseAsArray(i); bvp->Type = TYPE_JAR; } else { bvp->To_Val = ParseObject(++i); @@ -159,9 +160,7 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) break; case ',': if (bvp->Type != TYPE_UNKNOWN && (pretty == 1 || pretty == 3)) { - if (comma) - *comma = true; - + comma = true; pty[0] = pty[2] = false; break; } // endif pretty @@ -179,7 +178,7 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) default: if (bvp->Type != TYPE_UNKNOWN) { - bvp->To_Val = ParseAsArray(i, pretty, ptyp); + bvp->To_Val = ParseAsArray(i); bvp->Type = TYPE_JAR; } else if ((bvp->To_Val = MOF(ParseValue(i)))) bvp->Type = TYPE_JVAL; @@ -191,12 +190,10 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) if (bvp->Type == TYPE_UNKNOWN) sprintf(g->Message, "Invalid Json string '%.*s'", MY_MIN((int)len, 50), s); - else if (ptyp && pretty == 3) { - *ptyp = 3; // Not recognized pretty - + else if (pretty == 3) { for (i = 0; i < 3; i++) if (pty[i]) { - *ptyp = i; + pretty = i; break; } // endif pty @@ -218,12 +215,12 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng, int* ptyp, bool* comma) /***********************************************************************/ /* Parse several items as being in an array. */ /***********************************************************************/ -OFFSET BDOC::ParseAsArray(int& i, int pretty, int* ptyp) { +OFFSET BDOC::ParseAsArray(int& i) { if (pty[0] && (!pretty || pretty > 2)) { OFFSET jsp; - if ((jsp = ParseArray((i = 0))) && ptyp && pretty == 3) - *ptyp = (pty[0]) ? 0 : 3; + if ((jsp = ParseArray((i = 0))) && pretty == 3) + pretty = (pty[0]) ? 0 : 3; return jsp; } else diff --git a/storage/connect/bson.h b/storage/connect/bson.h index 435e355d249..ca776dd1950 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -167,7 +167,12 @@ class BDOC : public BJSON { public: BDOC(PGLOBAL G); - PBVAL ParseJson(PGLOBAL g, char* s, size_t n, int* prty = NULL, bool* b = NULL); + bool GetComma(void) { return comma; } + int GetPretty(void) { return pretty; } + void SetPretty(int pty) { pretty = pty; } + + // Methods + PBVAL ParseJson(PGLOBAL g, char* s, size_t n); PSZ Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty); protected: @@ -176,7 +181,7 @@ protected: PBVAL ParseValue(int& i); OFFSET ParseString(int& i); void ParseNumeric(int& i, PBVAL bvp); - OFFSET ParseAsArray(int& i, int pretty, int* ptyp); + OFFSET ParseAsArray(int& i); bool SerializeArray(OFFSET arp, bool b); bool SerializeObject(OFFSET obp); bool SerializeValue(PBVAL vp); @@ -185,7 +190,9 @@ protected: JOUT* jp; // Used with serialize char* s; // The Json string to parse int len; // The Json string length + int pretty; // The pretty style of the file to parse bool pty[3]; // Used to guess what pretty is + bool comma; // True if Pretty = 1 // Default constructor not to be used BDOC(void) {} diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index cf3a8866ff0..d6cbcbc077f 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -170,7 +170,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.07.0002 December 18, 2020"; + char version[]= "Version 1.07.0002 December 19, 2020"; #if defined(__WIN__) char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__; char slash= '\\'; @@ -5701,14 +5701,20 @@ static int connect_assisted_discovery(handlerton *, THD* thd, goto err; #if defined(REST_SUPPORT) } else if (topt->http) { - switch (ttp) { + if (ttp == TAB_UNDEF) { + topt->type = "JSON"; + ttp= GetTypeID(topt->type); + sprintf(g->Message, "No table_type. Was set to %s", topt->type); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, g->Message); + } // endif ttp + + switch (ttp) { case TAB_JSON: #if defined(BSON_SUPPORT) case TAB_BSON: #endif // BSON_SUPPORT case TAB_XML: case TAB_CSV: - case TAB_UNDEF: ttp = TAB_REST; break; default: @@ -5894,7 +5900,7 @@ static int connect_assisted_discovery(handlerton *, THD* thd, #if defined(BSON_SUPPORT) case TAB_BSON: #endif // BSON_SUPPORT - dsn= strz(g, create_info->connect_string); + dsn= strz(g, create_info->connect_string); if (!fn && !zfn && !mul && !dsn) sprintf(g->Message, "Missing %s file name", topt->type); @@ -6132,10 +6138,8 @@ static int connect_assisted_discovery(handlerton *, THD* thd, } // endif !nblin // Restore language type - if (ttp == TAB_REST) { + if (ttp == TAB_REST) ttp = GetTypeID(topt->type); - ttp = (ttp == TAB_UNDEF) ? TAB_JSON : ttp; - } // endif ttp for (i= 0; !rc && i < qrp->Nblin; i++) { typ= len= prec= dec= flg= 0; @@ -6436,6 +6440,9 @@ int ha_connect::create(const char *name, TABLE *table_arg, // Check table type if (type == TAB_UNDEF) { options->type= (options->srcdef) ? "MYSQL" : +#if defined(BSON_SUPPORT) + (options->http) ? "JSON" : +#endif // BSON_SUPPORT (options->tabname) ? "PROXY" : "DOS"; type= GetTypeID(options->type); sprintf(g->Message, "No table_type. Will be set to %s", options->type); diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp index d4e5f09138a..7635a6afe70 100644 --- a/storage/connect/tabbson.cpp +++ b/storage/connect/tabbson.cpp @@ -626,9 +626,11 @@ PBVAL BTUTIL::FindRow(PGLOBAL g) /***********************************************************************/ /* Parse the read line. */ /***********************************************************************/ -PBVAL BTUTIL::ParseLine(PGLOBAL g, int *pretty, bool *comma) +PBVAL BTUTIL::ParseLine(PGLOBAL g, int prty, bool cma) { - return ParseJson(g, Tp->To_Line, strlen(Tp->To_Line), pretty, comma); + pretty = prty; + comma = cma; + return ParseJson(g, Tp->To_Line, strlen(Tp->To_Line)); } // end of ParseLine /***********************************************************************/ @@ -1296,6 +1298,7 @@ TDBBSN::TDBBSN(PGLOBAL g, PBDEF tdp, PTXF txfp) : TDBDOS(tdp, txfp) SameRow = 0; Xval = -1; Comma = false; + Bp->SetPretty(Pretty); } // end of TDBBSN standard constructor TDBBSN::TDBBSN(TDBBSN* tdbp) : TDBDOS(NULL, tdbp) @@ -1527,7 +1530,7 @@ int TDBBSN::ReadDB(PGLOBAL g) // Recover the memory used for parsing Bp->SubSet(); - if ((Row = Bp->ParseLine(g, &Pretty, &Comma))) { + if ((Row = Bp->ParseLine(g, Pretty, Comma))) { Top = Row; Row = Bp->FindRow(g); SameRow = 0; @@ -2081,6 +2084,7 @@ TDBBSON::TDBBSON(PGLOBAL g, PBDEF tdp, PTXF txfp) : TDBBSN(g, tdp, txfp) Docp = NULL; Multiple = tdp->Multiple; Done = Changed = false; + Bp->SetPretty(2); } // end of TDBBSON standard constructor TDBBSON::TDBBSON(PBTDB tdbp) : TDBBSN(tdbp) @@ -2165,7 +2169,7 @@ int TDBBSON::MakeDocument(PGLOBAL g) /* Parse the json file and allocate its tree structure. */ /*********************************************************************/ g->Message[0] = 0; - jsp = Top = Bp->ParseJson(g, memory, len, &Pretty); + jsp = Top = Bp->ParseJson(g, memory, len); Txfp->CloseTableFile(g, false); Mode = mode; // Restore saved Mode diff --git a/storage/connect/tabbson.h b/storage/connect/tabbson.h index 5b764b2eabd..a53f33bd737 100644 --- a/storage/connect/tabbson.h +++ b/storage/connect/tabbson.h @@ -110,7 +110,7 @@ public: // Utility functions PBVAL FindRow(PGLOBAL g); - PBVAL ParseLine(PGLOBAL g, int *pretty, bool *comma); + PBVAL ParseLine(PGLOBAL g, int prty, bool cma); PBVAL MakeTopTree(PGLOBAL g, int type); PSZ SerialVal(PGLOBAL g, PBVAL top, int pretty); diff --git a/storage/connect/tabrest.cpp b/storage/connect/tabrest.cpp index ec8cd70cac3..1efda6e3bca 100644 --- a/storage/connect/tabrest.cpp +++ b/storage/connect/tabrest.cpp @@ -13,6 +13,8 @@ /***********************************************************************/ #if defined(MARIADB) #include // All MariaDB stuff +#include +#include #else // !MARIADB OEM module #include "mini-global.h" #define _MAX_PATH 260 @@ -45,6 +47,12 @@ #include "tabfmt.h" #include "tabrest.h" +#if defined(connect_EXPORTS) +#define PUSH_WARNING(M) push_warning(current_thd, Sql_condition::WARN_LEVEL_WARN, 0, M) +#else +#define PUSH_WARNING(M) htrc(M) +#endif + #if defined(__WIN__) || defined(_WINDOWS) #define popen _popen #define pclose _pclose @@ -223,6 +231,8 @@ PQRYRES __stdcall ColREST(PGLOBAL g, PTOS tp, char *tab, char *db, bool info) fn = filename; tp->filename = PlugDup(g, fn); + sprintf(g->Message, "No file name. Table will use %s", fn); + PUSH_WARNING(g->Message); } // endif fn // We used the file name relative to recorded datapath From a35424829a4534ad63a80f30a73adb0ce74f742e Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 26 Dec 2020 19:44:38 +0100 Subject: [PATCH 043/150] - Continue BSON implementation + fix create modified ha_connect.cc --- storage/connect/bson.cpp | 174 ++++++++++++++++++---------------- storage/connect/bson.h | 28 +++--- storage/connect/bsonudf.cpp | 8 +- storage/connect/ha_connect.cc | 8 +- storage/connect/tabbson.cpp | 39 ++++---- 5 files changed, 132 insertions(+), 125 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index df95bd4c9c8..cdb619d07ca 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -59,38 +59,6 @@ void trans_func(unsigned int u, _EXCEPTION_POINTERS* pExp) { char* GetExceptionDesc(PGLOBAL g, unsigned int e); #endif // SE_CATCH -#if 0 -char* GetJsonNull(void); - -/***********************************************************************/ -/* IsNum: check whether this string is all digits. */ -/***********************************************************************/ -bool IsNum(PSZ s) { - for (char* p = s; *p; p++) - if (*p == ']') - break; - else if (!isdigit(*p) || *p == '-') - return false; - - return true; -} // end of IsNum - -/***********************************************************************/ -/* NextChr: return the first found '[' or Sep pointer. */ -/***********************************************************************/ -char* NextChr(PSZ s, char sep) { - char* p1 = strchr(s, '['); - char* p2 = strchr(s, sep); - - if (!p2) - return p1; - else if (p1) - return MY_MIN(p1, p2); - - return p2; -} // end of NextChr -#endif // 0 - /* --------------------------- Class BDOC ---------------------------- */ /***********************************************************************/ @@ -180,7 +148,7 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng) if (bvp->Type != TYPE_UNKNOWN) { bvp->To_Val = ParseAsArray(i); bvp->Type = TYPE_JAR; - } else if ((bvp->To_Val = MOF(ParseValue(i)))) + } else if ((bvp->To_Val = MOF(ParseValue(i, NewVal())))) bvp->Type = TYPE_JVAL; else throw 4; @@ -269,11 +237,11 @@ OFFSET BDOC::ParseArray(int& i) sprintf(G->Message, "Unexpected value near %.*s", ARGS); throw 1; } else if (lastvlp) { - vlp = ParseValue(i); + vlp = ParseValue(i, NewVal()); lastvlp->Next = MOF(vlp); lastvlp = vlp; } else - firstvlp = lastvlp = ParseValue(i); + firstvlp = lastvlp = ParseValue(i, NewVal()); level = (b) ? 1 : 2; break; @@ -303,10 +271,10 @@ OFFSET BDOC::ParseObject(int& i) case '"': if (level < 2) { key = ParseString(++i); - bpp = SubAllocPair(key); + bpp = NewPair(key); if (lastbpp) { - lastbpp->Next = MOF(bpp); + lastbpp->Vlp.Next = MOF(bpp); lastbpp = bpp; } else firstbpp = lastbpp = bpp; @@ -320,7 +288,7 @@ OFFSET BDOC::ParseObject(int& i) break; case ':': if (level == 2) { - lastbpp->Vlp = MOF(ParseValue(++i)); + ParseValue(++i, GetVlp(lastbpp)); level = 3; } else { sprintf(G->Message, "Unexpected ':' near %.*s", ARGS); @@ -362,10 +330,8 @@ OFFSET BDOC::ParseObject(int& i) /***********************************************************************/ /* Parse a JSON Value. */ /***********************************************************************/ -PBVAL BDOC::ParseValue(int& i) +PBVAL BDOC::ParseValue(int& i, PBVAL bvp) { - PBVAL bvp = NewVal(); - for (; i < len; i++) switch (s[i]) { case '\n': @@ -750,7 +716,7 @@ bool BDOC::SerializeObject(OFFSET obp) if (jp->WriteChr('{')) return true; - for (prp; prp; prp = MPP(prp->Next)) { + for (prp; prp; prp = GetNext(prp)) { if (first) first = false; else if (jp->WriteChr(',')) @@ -760,7 +726,7 @@ bool BDOC::SerializeObject(OFFSET obp) jp->WriteStr(MZP(prp->Key)) || jp->WriteChr('"') || jp->WriteChr(':') || - SerializeValue(MVP(prp->Vlp))) + SerializeValue(GetVlp(prp))) return true; } // endfor i @@ -883,15 +849,35 @@ void BJSON::MemSet(size_t size) /* ------------------------ Bobject functions ------------------------ */ /***********************************************************************/ +/* Set a pair vlp to some PVAL values. */ +/***********************************************************************/ +void BJSON::SetPairValue(PBPR brp, PBVAL bvp) +{ + if (bvp) { + brp->Vlp.To_Val = bvp->To_Val; + brp->Vlp.Nd = bvp->Nd; + brp->Vlp.Type = bvp->Type; + } else { + brp->Vlp.To_Val = 0; + brp->Vlp.Nd = 0; + brp->Vlp.Type = TYPE_NULL; + } // endif bvp + +} // end of SetPairValue + + /***********************************************************************/ /* Sub-allocate and initialize a BPAIR. */ /***********************************************************************/ -PBPR BJSON::SubAllocPair(OFFSET key, OFFSET val) +PBPR BJSON::NewPair(OFFSET key, int type) { PBPR bpp = (PBPR)BsonSubAlloc(sizeof(BPAIR)); bpp->Key = key; - bpp->Vlp = val; - bpp->Next = 0; + bpp->Vlp.Ktp = TYPE_STRG; + bpp->Vlp.Type = type; + bpp->Vlp.To_Val = 0; + bpp->Vlp.Nd = 0; + bpp->Vlp.Next = 0; return bpp; } // end of SubAllocPair @@ -905,7 +891,7 @@ int BJSON::GetObjectSize(PBVAL bop, bool b) for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) // If b return only non null pairs - if (!b || (brp->Vlp && GetVal(brp)->Type != TYPE_NULL)) + if (!b || (brp->Vlp.To_Val && brp->Vlp.Type != TYPE_NULL)) n++; return n; @@ -914,20 +900,21 @@ int BJSON::GetObjectSize(PBVAL bop, bool b) /***********************************************************************/ /* Add a new pair to an Object and return it. */ /***********************************************************************/ -void BJSON::AddPair(PBVAL bop, PSZ key, OFFSET val) +PBVAL BJSON::AddPair(PBVAL bop, PSZ key, int type) { CheckType(bop, TYPE_JOB); PBPR brp; - OFFSET nrp = NewPair(key, val); + OFFSET nrp = NewPair(key, type); if (bop->To_Val) { - for (brp = GetObject(bop); brp->Next; brp = GetNext(brp)); + for (brp = GetObject(bop); brp->Vlp.Next; brp = GetNext(brp)); - brp->Next = nrp; + brp->Vlp.Next = nrp; } else bop->To_Val = nrp; bop->Nd++; + return GetVlp(MPP(nrp)); } // end of AddPair /***********************************************************************/ @@ -953,7 +940,7 @@ PBVAL BJSON::GetObjectValList(PBVAL bop) PBVAL arp = NewVal(TYPE_JAR); for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) - AddArrayValue(arp, brp->Vlp); + AddArrayValue(arp, GetVlp(brp)); return arp; } // end of GetObjectValList @@ -967,7 +954,7 @@ PBVAL BJSON::GetKeyValue(PBVAL bop, PSZ key) for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) if (!strcmp(GetKey(brp), key)) - return GetVal(brp); + return GetVlp(brp); return NULL; } // end of GetKeyValue; @@ -993,11 +980,11 @@ PSZ BJSON::GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text) b = false; } // endif text - if (b && !brp->Next && !strcmp(MZP(brp->Key), "$date")) { + if (b && !brp->Vlp.Next && !strcmp(MZP(brp->Key), "$date")) { int i; PSZ s; - GetValueText(g, MVP(brp->Vlp), text); + GetValueText(g, GetVlp(brp), text); s = text->GetStr(); i = (s[1] == '-' ? 2 : 1); @@ -1013,10 +1000,10 @@ PSZ BJSON::GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text) } // endif text - } else for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) { - GetValueText(g, GetVal(brp), text); + } else for (; brp; brp = GetNext(brp)) { + GetValueText(g, GetVlp(brp), text); - if (brp->Next) + if (brp->Vlp.Next) text->Append(' '); } // endfor brp @@ -1041,18 +1028,18 @@ void BJSON::SetKeyValue(PBVAL bop, OFFSET bvp, PSZ key) if (bop->To_Val) { for (brp = GetObject(bop); brp; brp = GetNext(brp)) - if (!strcmp(GetKey(brp), key)) { - brp->Vlp = bvp; - return; - } else + if (!strcmp(GetKey(brp), key)) + break; + else prp = brp; if (!brp) - prp->Next = NewPair(key, bvp); + brp = MPP(prp->Vlp.Next = NewPair(key)); } else - bop->To_Val = NewPair(key, bvp); + brp = MPP(bop->To_Val = NewPair(key)); + SetPairValue(brp, MVP(bvp)); bop->Nd++; } // end of SetKeyValue @@ -1066,7 +1053,7 @@ PBVAL BJSON::MergeObject(PBVAL bop1, PBVAL bop2) if (bop1->To_Val) for (PBPR brp = GetObject(bop2); brp; brp = GetNext(brp)) - SetKeyValue(bop1, brp->Vlp, GetKey(brp)); + SetKeyValue(bop1, GetVlp(brp), GetKey(brp)); else { bop1->To_Val = bop2->To_Val; @@ -1087,9 +1074,9 @@ void BJSON::DeleteKey(PBVAL bop, PCSZ key) for (brp = GetObject(bop); brp; brp = GetNext(brp)) if (!strcmp(MZP(brp->Key), key)) { if (pbrp) { - pbrp->Next = brp->Next; + pbrp->Vlp.Next = brp->Vlp.Next; } else - bop->To_Val = brp->Next; + bop->To_Val = brp->Vlp.Next; bop->Nd--; break; @@ -1106,7 +1093,7 @@ bool BJSON::IsObjectNull(PBVAL bop) CheckType(bop, TYPE_JOB); for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) - if (brp->Vlp && (MVP(brp->Vlp))->Type != TYPE_NULL) + if (brp->Vlp.To_Val && brp->Vlp.Type != TYPE_NULL) return false; return true; @@ -1368,6 +1355,25 @@ int BJSON::GetSize(PBVAL vlp, bool b) } // end of GetSize +PBVAL BJSON::GetBson(PBVAL bvp) +{ + PBVAL bp = NULL; + + switch (bvp->Type) { + case TYPE_JAR: + bp = MVP(bvp->To_Val); + break; + case TYPE_JOB: + bp = GetVlp(MPP(bvp->To_Val)); + break; + default: + bp = bvp; + break; + } // endswitch Type + + return bp; +} // end of GetBson + /***********************************************************************/ /* Return the Value's as a Value struct. */ /***********************************************************************/ @@ -1378,22 +1384,22 @@ PVAL BJSON::GetValue(PGLOBAL g, PBVAL vp) PBVAL vlp = vp->Type == TYPE_JVAL ? MVP(vp->To_Val) : vp; switch (vlp->Type) { - case TYPE_STRG: - case TYPE_DBL: - case TYPE_BINT: - valp = AllocateValue(g, MP(vlp->To_Val), vlp->Type, vlp->Nd); - break; - case TYPE_INTG: - case TYPE_BOOL: - valp = AllocateValue(g, vlp, vlp->Type); - break; - case TYPE_FLOAT: - d = (double)vlp->F; - valp = AllocateValue(g, &d, TYPE_DOUBLE, vlp->Nd); - break; - default: - valp = NULL; - break; + case TYPE_STRG: + case TYPE_DBL: + case TYPE_BINT: + valp = AllocateValue(g, MP(vlp->To_Val), vlp->Type, vlp->Nd); + break; + case TYPE_INTG: + case TYPE_BOOL: + valp = AllocateValue(g, vlp, vlp->Type); + break; + case TYPE_FLOAT: + d = (double)vlp->F; + valp = AllocateValue(g, &d, TYPE_DOUBLE, vlp->Nd); + break; + default: + valp = NULL; + break; } // endswitch Type return valp; diff --git a/storage/connect/bson.h b/storage/connect/bson.h index ca776dd1950..a09f9c3ef89 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -37,7 +37,8 @@ typedef struct _jvalue { bool B; // A boolean value True or false (0) }; short Nd; // Number of decimals - short Type; // The value type + char Type; // The value type + char Ktp; // The key type OFFSET Next; // Offset to the next value in array } BVAL, *PBVAL; // end of struct BVALUE @@ -46,13 +47,12 @@ typedef struct _jvalue { /***********************************************************************/ typedef struct _jpair { OFFSET Key; // Offset to this pair key name - OFFSET Vlp; // To the value of the pair - OFFSET Next; // Offset to the next pair in object + BVAL Vlp; // The value of the pair } BPAIR, *PBPR; // end of struct BPAIR char* NextChr(PSZ s, char sep); char* GetJsonNull(void); -const char* GetFmt(int type, bool un); +const char* GetFmt(int type, bool un); DllExport bool IsNum(PSZ s); @@ -81,9 +81,9 @@ public: // SubAlloc functions void* BsonSubAlloc(size_t size); - PBPR SubAllocPair(OFFSET key, OFFSET val = 0); - OFFSET NewPair(PSZ key, OFFSET val = 0) - {return MOF(SubAllocPair(DupStr(key), val));} + PBPR NewPair(OFFSET key, int type = TYPE_NULL); + OFFSET NewPair(PSZ key, int type = TYPE_NULL) + {return MOF(NewPair(DupStr(key), type));} PBVAL NewVal(int type = TYPE_NULL); PBVAL NewVal(PVAL valp); PBVAL SubAllocVal(OFFSET toval, int type = TYPE_NULL, short nd = 0); @@ -110,13 +110,15 @@ public: // Object functions inline PBPR GetObject(PBVAL bop) {return MPP(bop->To_Val);} - inline PBPR GetNext(PBPR brp) { return MPP(brp->Next); } + inline PBPR GetNext(PBPR brp) { return MPP(brp->Vlp.Next); } + void SetPairValue(PBPR brp, PBVAL bvp); int GetObjectSize(PBVAL bop, bool b = false); PSZ GetObjectText(PGLOBAL g, PBVAL bop, PSTRG text); PBVAL MergeObject(PBVAL bop1, PBVAL bop2); - void AddPair(PBVAL bop, PSZ key, OFFSET val = 0); - PSZ GetKey(PBPR prp) {return MZP(prp->Key);} - PBVAL GetVal(PBPR prp) {return MVP(prp->Vlp);} + PBVAL AddPair(PBVAL bop, PSZ key, int type = TYPE_NULL); + PSZ GetKey(PBPR prp) {return prp ? MZP(prp->Key) : NULL;} + PBVAL GetTo_Val(PBPR prp) {return prp ? MVP(prp->Vlp.To_Val) : NULL;} + PBVAL GetVlp(PBPR prp) {return prp ? (PBVAL)&prp->Vlp : NULL;} PBVAL GetKeyValue(PBVAL bop, PSZ key); PBVAL GetKeyList(PBVAL bop); PBVAL GetObjectValList(PBVAL bop); @@ -131,7 +133,7 @@ public: PBVAL GetNext(PBVAL vlp) {return MVP(vlp->Next);} //PJSON GetJsp(void) { return (DataType == TYPE_JSON ? Jsp : NULL); } PSZ GetValueText(PGLOBAL g, PBVAL vlp, PSTRG text); - inline PBVAL GetBson(PBVAL bvp) { return IsJson(bvp) ? MVP(bvp->To_Val) : bvp; } + PBVAL GetBson(PBVAL bvp); PSZ GetString(PBVAL vp, char* buff = NULL); int GetInteger(PBVAL vp); long long GetBigint(PBVAL vp); @@ -178,7 +180,7 @@ public: protected: OFFSET ParseArray(int& i); OFFSET ParseObject(int& i); - PBVAL ParseValue(int& i); + PBVAL ParseValue(int& i, PBVAL bvp); OFFSET ParseString(int& i); void ParseNumeric(int& i, PBVAL bvp); OFFSET ParseAsArray(int& i); diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 76ecce5133b..d43444a9bd0 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -856,7 +856,7 @@ my_bool BJNX::LocateObject(PGLOBAL g, PBVAL jobp) if (Jp->WriteStr(MZP(pair->Key))) return true; - if (LocateValue(g, MVP(pair->Vlp))) + if (LocateValue(g, GetVlp(pair))) return true; } // endfor i @@ -976,7 +976,7 @@ my_bool BJNX::LocateObjectAll(PGLOBAL g, PBVAL jobp) for (PBPR pair = GetObject(jobp); pair; pair = GetNext(pair)) { Jpnp[I].Key = MZP(pair->Key); - if (LocateValueAll(g, MVP(pair->Vlp))) + if (LocateValueAll(g, GetVlp(pair))) return true; } // endfor i @@ -1020,8 +1020,8 @@ my_bool BJNX::CompareTree(PGLOBAL g, PBVAL jp1, PBVAL jp2) PBPR p1 = GetObject(jp1), p2 = GetObject(jp2); // Keys can be differently ordered - for (; found && p1 && p2; p1 = MPP(p1->Next)) - found = CompareValues(g, MVP(p1->Vlp), GetKeyValue(jp2, MZP(p1->Key))); + for (; found && p1 && p2; p1 = GetNext(p1)) + found = CompareValues(g, GetVlp(p1), GetKeyValue(jp2, GetKey(p1))); } else if (jp1->Type == TYPE_JVAL) { found = CompareTree(g, MVP(jp1->To_Val), (MVP(jp2->To_Val))); diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index d6cbcbc077f..9b40b5c9a13 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -170,7 +170,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.07.0002 December 19, 2020"; + char version[]= "Version 1.07.0002 December 25, 2020"; #if defined(__WIN__) char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__; char slash= '\\'; @@ -6440,9 +6440,9 @@ int ha_connect::create(const char *name, TABLE *table_arg, // Check table type if (type == TAB_UNDEF) { options->type= (options->srcdef) ? "MYSQL" : -#if defined(BSON_SUPPORT) +#if defined(REST_SUPPORT) (options->http) ? "JSON" : -#endif // BSON_SUPPORT +#endif // REST_SUPPORT (options->tabname) ? "PROXY" : "DOS"; type= GetTypeID(options->type); sprintf(g->Message, "No table_type. Will be set to %s", options->type); @@ -6460,7 +6460,7 @@ int ha_connect::create(const char *name, TABLE *table_arg, DBUG_RETURN(HA_ERR_INTERNAL_ERROR); inward= IsFileType(type) && !options->filename && - (type != TAB_JSON || !cnc.length); + ((type != TAB_JSON && type != TAB_BSON) || !cnc.length); if (options->data_charset) { const CHARSET_INFO *data_charset; diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp index 7635a6afe70..69dd5749122 100644 --- a/storage/connect/tabbson.cpp +++ b/storage/connect/tabbson.cpp @@ -342,7 +342,7 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) strncpy(colname, bp->GetKey(jpp), 64); fmt[bf] = 0; - if (Find(g, bp->GetVal(jpp), colname, MY_MIN(lvl, 0))) + if (Find(g, bp->GetVlp(jpp), colname, MY_MIN(lvl, 0))) goto err; } // endfor jpp @@ -444,7 +444,7 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j) strncat(strncat(colname, "_", n), k, n - 1); } // endif Key - if (Find(g, bp->GetVal(jrp), k, j + 1)) + if (Find(g, bp->GetVlp(jrp), k, j + 1)) return true; *p = *pc = 0; @@ -691,7 +691,7 @@ PBVAL BTUTIL::MakeTopTree(PGLOBAL g, int type) } // endif Val Tp->Row = val; - Tp->Row->Type = type; + if (Tp->Row) Tp->Row->Type = type; } else top = Tp->Row = NewVal(type); @@ -1052,18 +1052,20 @@ PBVAL BCUTIL::GetRow(PGLOBAL g) } else { // Construct missing objects for (i++; row && i < nod; i++) { + int type; + if (nodes[i].Op == OP_XX) break; else if (!nodes[i].Key) // Construct intermediate array - nwr = NewVal(TYPE_JAR); + type = TYPE_JAR; else - nwr = NewVal(TYPE_JOB); + type = TYPE_JOB; if (row->Type == TYPE_JOB) { - SetKeyValue(row, MOF(nwr), nodes[i - 1].Key); + nwr = AddPair(row, nodes[i - 1].Key, type); } else if (row->Type == TYPE_JAR) { - AddArrayValue(row, nwr); + AddArrayValue(row, (nwr = NewVal(type))); } else { strcpy(g->Message, "Wrong type when writing new row"); nwr = NULL; @@ -2258,19 +2260,16 @@ int TDBBSON::MakeDocument(PGLOBAL g) Docp = jsp; else { // The table is void or is just one object or one value - Docp = Bp->NewVal(TYPE_JAR); - - if (val) - Bp->AddArrayValue(Docp, val); - else if (jsp) - Bp->AddArrayValue(Docp, Bp->DupVal(jsp)); - - if (objp) - Bp->SetKeyValue(objp, Bp->DupVal(Docp), key); - else if (arp) - Bp->SetArrayValue(arp, Bp->DupVal(Docp), i); - else - Top = Docp; + if (objp) { + Docp = Bp->GetKeyValue(objp, key); + Docp->To_Val = Bp->MOF(Bp->DupVal(Docp)); + Docp->Type = TYPE_JAR; + } else if (arp) { + Docp = Bp->NewVal(TYPE_JAR); + Bp->AddArrayValue(Docp, jsp); + Bp->SetArrayValue(arp, Docp, i); + } else + Top = Docp = Bp->NewVal(TYPE_JAR); } // endif jsp From cba46c9912b2bcd062ecc6b53082ba5eb5109e41 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Thu, 31 Dec 2020 15:43:52 +0100 Subject: [PATCH 044/150] - Fix jfile_convert crash on error. modified: jsonudf.cpp (plus BSON UDF's) --- storage/connect/bson.cpp | 38 +- storage/connect/bson.h | 6 +- storage/connect/bsonudf.cpp | 2753 ++++++++++++++++++++++++++++++++-- storage/connect/bsonudf.h | 189 ++- storage/connect/filamtxt.cpp | 2 +- storage/connect/jsonudf.cpp | 25 +- 6 files changed, 2861 insertions(+), 152 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index cdb619d07ca..2588657089f 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -808,17 +808,17 @@ void* BJSON::BsonSubAlloc(size_t size) /*********************************************************************************/ /* Program for SubSet re-initialization of the memory pool. */ /*********************************************************************************/ -OFFSET BJSON::DupStr(PSZ str) +PSZ BJSON::NewStr(PSZ str) { if (str) { PSZ sm = (PSZ)BsonSubAlloc(strlen(str) + 1); strcpy(sm, str); - return MOF(sm); + return sm; } else return NULL; -} // end of DupStr +} // end of NewStr /*********************************************************************************/ /* Program for SubSet re-initialization of the memory pool. */ @@ -940,7 +940,7 @@ PBVAL BJSON::GetObjectValList(PBVAL bop) PBVAL arp = NewVal(TYPE_JAR); for (PBPR brp = GetObject(bop); brp; brp = GetNext(brp)) - AddArrayValue(arp, GetVlp(brp)); + AddArrayValue(arp, DupVal(GetVlp(brp))); return arp; } // end of GetObjectValList @@ -1135,24 +1135,28 @@ PBVAL BJSON::GetArrayValue(PBVAL bap, int n) /***********************************************************************/ /* Add a Value to the Array Value list. */ /***********************************************************************/ -void BJSON::AddArrayValue(PBVAL bap, OFFSET nvp, int* x) +void BJSON::AddArrayValue(PBVAL bap, OFFSET nbv, int* x) { CheckType(bap, TYPE_JAR); - if (!nvp) - nvp = MOF(NewVal()); + int i = 0; + PBVAL bvp, lbp = NULL; - if (bap->To_Val) { - int i = 0, n = (x) ? *x : INT_MAX32; + if (!nbv) + nbv = MOF(NewVal()); - for (PBVAL bvp = GetArray(bap); bvp; bvp = GetNext(bvp), i++) - if (!bvp->Next || (x && i == n)) { - MVP(nvp)->Next = bvp->Next; - bvp->Next = nvp; - break; - } // endif Next + for (bvp = GetArray(bap); bvp; bvp = GetNext(bvp), i++) + if (x && i == *x) + break; + else + lbp = bvp; - } else - bap->To_Val = nvp; + if (lbp) { + MVP(nbv)->Next = lbp->Next; + lbp->Next = nbv; + } else { + MVP(nbv)->Next = bap->To_Val; + bap->To_Val = nbv; + } // endif lbp bap->Nd++; } // end of AddArrayValue diff --git a/storage/connect/bson.h b/storage/connect/bson.h index a09f9c3ef89..dd299c7c53e 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -93,7 +93,8 @@ public: PBVAL SubAllocStr(PSZ str, short nd = 0) {return SubAllocStr(DupStr(str), nd);} PBVAL DupVal(PBVAL bvp); - OFFSET DupStr(PSZ str); + OFFSET DupStr(PSZ str) { return MOF(NewStr(str)); } + PSZ NewStr(PSZ str); // Array functions inline PBVAL GetArray(PBVAL vlp) {return MVP(vlp->To_Val);} @@ -150,7 +151,8 @@ public: void SetBool(PBVAL vlp, bool b); void Clear(PBVAL vlp) { vlp->N = 0; vlp->Nd = 0; vlp->Next = 0; } bool IsValueNull(PBVAL vlp); - bool IsJson(PBVAL vlp) {return (vlp->Type == TYPE_JAR || vlp->Type == TYPE_JOB);} + bool IsJson(PBVAL vlp) + {return vlp ? vlp->Type == TYPE_JAR || vlp->Type == TYPE_JOB : false;} // Members PGLOBAL G; diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index d43444a9bd0..9c80b881e52 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -27,6 +27,8 @@ #endif #define M 6 +int IsArgJson(UDF_ARGS* args, uint i); + /* --------------------------------- JSON UDF ---------------------------------- */ /*********************************************************************************/ @@ -68,6 +70,35 @@ static PBJNX BjnxNew(PGLOBAL g, PBVAL vlp, int type, int len) /* ----------------------------------- BSNX ------------------------------------ */ +/*********************************************************************************/ +/* BSNX public constructor. */ +/*********************************************************************************/ +BJNX::BJNX(PGLOBAL g) : BDOC(g) +{ + Row = NULL; + Bvalp = NULL; + Jpnp = NULL; + Jp = NULL; + Nodes = NULL; + Value = NULL; + MulVal = NULL; + Jpath = NULL; + Buf_Type = TYPE_NULL; + Long = len; + Prec = 0; + Nod = 0; + Xnod = -1; + K = 0; + I = -1; + Imax = 9; + B = 0; + Xpd = false; + Parsed = false; + Found = false; + Wr = false; + Jb = false; +} // end of BJNX constructor + /*********************************************************************************/ /* BSNX public constructor. */ /*********************************************************************************/ @@ -330,6 +361,51 @@ my_bool BJNX::ParseJpath(PGLOBAL g) return false; } // end of ParseJpath +/*********************************************************************************/ +/* Make a valid key from the passed argument. */ +/*********************************************************************************/ +PSZ BJNX::MakeKey(UDF_ARGS *args, int i) +{ + if (args->arg_count > (unsigned)i) { + int j = 0, n = args->attribute_lengths[i]; + my_bool b; // true if attribute is zero terminated + PSZ p; + PCSZ s = args->attributes[i]; + + if (s && *s && (n || *s == '\'')) { + if ((b = (!n || !s[n]))) + n = strlen(s); + + if (IsArgJson(args, i)) + j = (int)(strchr(s, '_') - s + 1); + + if (j && n > j) { + s += j; + n -= j; + } else if (*s == '\'' && s[n-1] == '\'') { + s++; + n -= 2; + b = false; + } // endif *s + + if (n < 1) + return NewStr("Key"); + + if (!b) { + p = (PSZ)BsonSubAlloc(n + 1); + memcpy(p, s, n); + p[n] = 0; + return p; + } // endif b + + } // endif s + + return NewStr((PSZ)s); + } // endif count + + return NewStr("Key"); +} // end of MakeKey + /*********************************************************************************/ /* MakeJson: Serialize the json item and set value to it. */ /*********************************************************************************/ @@ -367,6 +443,7 @@ void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) vp->SetValue(GetInteger(vlp)); break; case TYPE_DBL: + case TYPE_FLOAT: if (vp->IsTypeNum()) vp->SetValue(GetDouble(vlp)); else // Get the proper number of decimals @@ -509,7 +586,6 @@ PVAL BJNX::ExpandArray(PGLOBAL g, PBVAL arp, int n) /*********************************************************************************/ PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) { -#if 0 int i, ars = GetArraySize(bap), nv = 0; bool err; OPVAL op = Nodes[n].Op; @@ -526,45 +602,45 @@ PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) if (!IsValueNull(bvrp) || (op == OP_CNC && GetJsonNull())) { if (IsValueNull(bvrp)) { - SetString(bvrp, GetJsonNull(), 0); + SetString(bvrp, NewStr(GetJsonNull()), 0); bvp = bvrp; - } else if (n < Nod - 1 && bvrp->GetJson()) { - bval.SetValue(g, GetColumnValue(g, jvrp->GetJson(), n + 1)); + } else if (n < Nod - 1 && IsJson(bvrp)) { + SetValue(&bval, GetColumnValue(g, bvrp, n + 1)); bvp = &bval; } else - jvp = jvrp; + bvp = bvrp; if (trace(1)) - htrc("jvp=%s null=%d\n", - jvp->GetString(g), jvp->IsNull() ? 1 : 0); + htrc("bvp=%s null=%d\n", + GetString(bvp), IsValueNull(bvp) ? 1 : 0); if (!nv++) { - SetJsonValue(g, vp, jvp); + SetJsonValue(g, vp, bvp); continue; } else - SetJsonValue(g, MulVal, jvp); + SetJsonValue(g, MulVal, bvp); if (!MulVal->IsNull()) { switch (op) { - case OP_CNC: - if (Nodes[n].CncVal) { - val[0] = Nodes[n].CncVal; - err = vp->Compute(g, val, 1, op); - } // endif CncVal + case OP_CNC: + if (Nodes[n].CncVal) { + val[0] = Nodes[n].CncVal; + err = vp->Compute(g, val, 1, op); + } // endif CncVal - val[0] = MulVal; - err = vp->Compute(g, val, 1, op); - break; - // case OP_NUM: - case OP_SEP: - val[0] = Nodes[n].Valp; - val[1] = MulVal; - err = vp->Compute(g, val, 2, OP_ADD); - break; - default: - val[0] = Nodes[n].Valp; - val[1] = MulVal; - err = vp->Compute(g, val, 2, op); + val[0] = MulVal; + err = vp->Compute(g, val, 1, op); + break; + // case OP_NUM: + case OP_SEP: + val[0] = Nodes[n].Valp; + val[1] = MulVal; + err = vp->Compute(g, val, 2, OP_ADD); + break; + default: + val[0] = Nodes[n].Valp; + val[1] = MulVal; + err = vp->Compute(g, val, 2, op); } // endswitch Op if (err) @@ -595,53 +671,8 @@ PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) } // endif Op return vp; -#else - strcpy(g->Message, "Calculate array NIY"); - return NULL; -#endif } // end of CalculateArray -/*********************************************************************************/ -/* CheckPath: Checks whether the path exists in the document. */ -/*********************************************************************************/ -my_bool BJNX::CheckPath(PGLOBAL g) -{ - PBVAL val = NULL; - PBVAL row = Row; - - for (int i = 0; i < Nod && row; i++) { - val = NULL; - - if (Nodes[i].Op == OP_NUM || Nodes[i].Op == OP_XX) { - } else switch (row->Type) { - case TYPE_JOB: - if (Nodes[i].Key) - val = GetKeyValue(row, Nodes[i].Key); - - break; - case TYPE_JAR: - if (!Nodes[i].Key) - if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) - val = GetArrayValue(row, Nodes[i].Rank); - - break; - case TYPE_JVAL: - val = MVP(row->To_Val); - break; - default: - sprintf(g->Message, "Invalid row JSON type %d", row->Type); - } // endswitch Type - -// if (i < Nod - 1) -// if (!(row = (val) ? val->GetJsp() : NULL)) -// val = NULL; - - row = val; - } // endfor i - - return (val != NULL); -} // end of CheckPath - /***********************************************************************/ /* GetRow: Set the complete path of the object to be set. */ /***********************************************************************/ @@ -758,6 +789,79 @@ my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) return false; } // end of WriteValue +/*********************************************************************************/ +/* CheckPath: Checks whether the path exists in the document. */ +/*********************************************************************************/ +my_bool BJNX::CheckPath(PGLOBAL g) +{ + PBVAL val = NULL; + PBVAL row = Row; + + for (int i = 0; i < Nod && row; i++) { + val = NULL; + + if (Nodes[i].Op == OP_NUM || Nodes[i].Op == OP_XX) { + } else switch (row->Type) { + case TYPE_JOB: + if (Nodes[i].Key) + val = GetKeyValue(row, Nodes[i].Key); + + break; + case TYPE_JAR: + if (!Nodes[i].Key) + if (Nodes[i].Op == OP_EQ || Nodes[i].Op == OP_LE) + val = GetArrayValue(row, Nodes[i].Rank); + + break; + case TYPE_JVAL: + val = row; + break; + default: + sprintf(g->Message, "Invalid row JSON type %d", row->Type); + } // endswitch Type + + if (i < Nod-1) + if (!(row = (IsJson(val)) ? val : NULL)) + val = NULL; + + } // endfor i + + return (val != NULL); +} // end of CheckPath + +/*********************************************************************************/ +/* Check if a path was specified and set jvp according to it. */ +/*********************************************************************************/ +my_bool BJNX::CheckPath(PGLOBAL g, UDF_ARGS *args, PBVAL jsp, PBVAL& jvp, int n) +{ + for (uint i = n; i < args->arg_count; i++) + if (args->arg_type[i] == STRING_RESULT && args->args[i]) { + // A path to a subset of the json tree is given + char *path = MakePSZ(g, args, i); + + if (path) { + Row = jsp; + + if (SetJpath(g, path)) + return true; + + if (!(jvp = GetJson(g))) { + sprintf(g->Message, "No sub-item at '%s'", path); + return true; + } else + return false; + + } else { + strcpy(g->Message, "Path argument is null"); + return true; + } // endif path + + } // endif type + + jvp = jsp; + return false; +} // end of CheckPath + /*********************************************************************************/ /* Locate a value in a JSON tree: */ /*********************************************************************************/ @@ -1141,24 +1245,97 @@ my_bool BJNX::AddPath(void) return false; } // end of AddPath -/* -----------------------------Utility functions ------------------------------ */ +/*********************************************************************************/ +/* Make a JSON value from the passed argument. */ +/*********************************************************************************/ +PBVAL BJNX::MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PBVAL *top) +{ + char *sap = (args->arg_count > i) ? args->args[i] : NULL; + int n, len; + int ci; + long long bigint; + PBVAL jvp = NewVal(); + + if (top) + *top = NULL; + + if (sap) switch (args->arg_type[i]) { + case STRING_RESULT: + if ((len = args->lengths[i])) { + if ((n = IsArgJson(args, i)) < 3) + sap = MakePSZ(g, args, i); + + if (n) { + if (n == 3) { +// if (top) +// *top = ((PBSON)sap)->Top; + +// jvp = ((PBSON)sap)->Jsp; + } else { + if (n == 2) { + if (!(sap = GetJsonFile(g, sap))) { + PUSH_WARNING(g->Message); + return NewVal(); + } // endif sap + + len = strlen(sap); + } // endif n + + if (!(jvp = ParseJson(g, sap, strlen(sap)))) + PUSH_WARNING(g->Message); + else if (top) + *top = jvp; + + } // endif's n + + } else { + ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; + SetString(jvp, sap, ci); + } // endif n + + } // endif len + + break; + case INT_RESULT: + bigint = *(long long*)sap; + + if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) || + (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) + SetBool(jvp, (char)bigint); + else + SetBigint(jvp, bigint); + + break; + case REAL_RESULT: + SetFloat(jvp, *(double*)sap); + break; + case DECIMAL_RESULT: + SetFloat(jvp, atof(MakePSZ(g, args, i))); + break; + case TIME_RESULT: + case ROW_RESULT: + default: + break; + } // endswitch arg_type + + return jvp; +} // end of MakeValue /*********************************************************************************/ /* Make a BVAL value from the passed argument. */ /*********************************************************************************/ -static PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) +PBVAL BJNX::MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) { char* sap = (args->arg_count > i) ? args->args[i] : NULL; int n, len; int ci; longlong bigint; - BDOC doc(g); - PBVAL bp, bvp = doc.NewVal(); + PBVAL bp, bvp = NewVal(); if (sap) { if (args->arg_type[i] == STRING_RESULT) { if ((len = args->lengths[i])) { - if ((n = IsJson(args, i)) < 3) + if ((n = IsArgJson(args, i)) < 3) sap = MakePSZ(g, args, i); if (n) { @@ -1171,7 +1348,7 @@ static PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) len = strlen(sap); } // endif 2 - if (!(bp = doc.ParseJson(g, sap, strlen(sap)))) { + if (!(bp = ParseJson(g, sap, strlen(sap)))) { PUSH_WARNING(g->Message); return NULL; } else @@ -1181,11 +1358,11 @@ static PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) // Check whether this string is a valid json string JsonMemSave(g); - if (!(bp = doc.ParseJson(g, sap, strlen(sap)))) { + if (!(bp = ParseJson(g, sap, strlen(sap)))) { // Recover suballocated memory JsonSubSet(g); ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; - doc.SetString(bvp, sap, ci); + SetString(bvp, sap, ci); } else bvp = bp; @@ -1199,30 +1376,252 @@ static PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) bigint = *(longlong*)sap; if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) || - (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) - doc.SetBool(bvp, (bool)bigint); + (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) + SetBool(bvp, (bool)bigint); else - doc.SetBigint(bvp, bigint); + SetBigint(bvp, bigint); break; case REAL_RESULT: - doc.SetFloat(bvp, *(double*)sap); + SetFloat(bvp, *(double*)sap); break; case DECIMAL_RESULT: - doc.SetFloat(bvp, atof(MakePSZ(g, args, i))); + SetFloat(bvp, atof(MakePSZ(g, args, i))); break; case TIME_RESULT: case ROW_RESULT: default: bvp->Type = TYPE_UNKNOWN; break; - } // endswitch arg_type + } // endswitch arg_type } // endif sap return bvp; } // end of MakeBinValue +/*********************************************************************************/ +/* Try making a JSON value of the passed type from the passed argument. */ +/*********************************************************************************/ +PBVAL BJNX::MakeTypedValue(PGLOBAL g, UDF_ARGS *args, uint i, JTYP type, PBVAL *top) +{ + char *sap; + PBVAL jsp; + PBVAL jvp = MakeValue(g, args, i, top); + + //if (type == TYPE_JSON) { + // if (jvp->GetValType() >= TYPE_JSON) + // return jvp; + + //} else if (jvp->GetValType() == type) + // return jvp; + + if (jvp->Type == TYPE_STRG) { + sap = GetString(jvp); + + if ((jsp = ParseJson(g, sap, strlen(sap)))) { + if ((type == TYPE_JSON && jsp->Type != TYPE_JVAL) || jsp->Type == type) { + if (top) + *top = jvp; + + SetValueVal(jvp, jsp); + } // endif Type + + } // endif jsp + + } // endif Type + + return jvp; +} // end of MakeTypedValue + +/*********************************************************************************/ +/* Parse a json file. */ +/*********************************************************************************/ +PBVAL BJNX::ParseJsonFile(PGLOBAL g, char *fn, int& pty, size_t& len) +{ + char *memory; + HANDLE hFile; + MEMMAP mm; + PBVAL jsp; + + // Create the mapping file object + hFile = CreateFileMap(g, fn, &mm, MODE_READ, false); + + if (hFile == INVALID_HANDLE_VALUE) { + DWORD rc = GetLastError(); + + if (!(*g->Message)) + sprintf(g->Message, MSG(OPEN_MODE_ERROR), "map", (int)rc, fn); + + return NULL; + } // endif hFile + + // Get the file size + len = (size_t)mm.lenL; + + if (mm.lenH) + len += ((size_t)mm.lenH * 0x000000001LL); + + memory = (char *)mm.memory; + + if (!len) { // Empty or deleted file + CloseFileHandle(hFile); + return NULL; + } // endif len + + if (!memory) { + CloseFileHandle(hFile); + sprintf(g->Message, MSG(MAP_VIEW_ERROR), fn, GetLastError()); + return NULL; + } // endif Memory + + CloseFileHandle(hFile); // Not used anymore + + // Parse the json file and allocate its tree structure + g->Message[0] = 0; + jsp = ParseJson(g, memory, len); + pty = pretty; + CloseMemMap(memory, len); + return jsp; +} // end of ParseJsonFile + +/* -----------------------------Utility functions ------------------------------ */ + +/*********************************************************************************/ +/* GetMemPtr: returns the memory pointer used by this argument. */ +/*********************************************************************************/ +static PGLOBAL GetMemPtr(PGLOBAL g, UDF_ARGS *args, uint i) +{ + return (IsArgJson(args, i) == 3) ? ((PBSON)args->args[i])->G : g; +} // end of GetMemPtr + +/*********************************************************************************/ +/* Returns a pointer to the first integer argument found from the nth argument. */ +/*********************************************************************************/ +static int *GetIntArgPtr(PGLOBAL g, UDF_ARGS *args, uint& n) +{ + int *x = NULL; + + for (uint i = n; i < args->arg_count; i++) + if (args->arg_type[i] == INT_RESULT) { + if (args->args[i]) { + if ((x = (int*)PlgDBSubAlloc(g, NULL, sizeof(int)))) + *x = (int)*(longlong*)args->args[i]; + else + PUSH_WARNING(g->Message); + + } // endif args + + n = i + 1; + break; + } // endif arg_type + + return x; +} // end of GetIntArgPtr + +/*********************************************************************************/ +/* Returns not 0 if the argument is a JSON item or file name. */ +/*********************************************************************************/ +int IsArgJson(UDF_ARGS *args, uint i) +{ + int n = 0; + + if (i >= args->arg_count || args->arg_type[i] != STRING_RESULT) { + } else if (!strnicmp(args->attributes[i], "Bson_", 5) || + !strnicmp(args->attributes[i], "Json_", 5)) { + if (!args->args[i] || strchr("[{ \t\r\n", *args->args[i])) + n = 1; // arg should be is a json item + else + n = 2; // A file name may have been returned + + } else if (!strnicmp(args->attributes[i], "Bbin_", 5)) { + if (args->lengths[i] == sizeof(BSON)) + n = 3; // arg is a binary json item + else + n = 2; // A file name may have been returned + + } else if (!strnicmp(args->attributes[i], "Bfile_", 6) || + !strnicmp(args->attributes[i], "Jfile_", 6)) { + n = 2; // arg is a json file name +#if 0 + } else if (args->lengths[i]) { + PGLOBAL g = PlugInit(NULL, (size_t)args->lengths[i] * M + 1024); + char *sap = MakePSZ(g, args, i); + + if (ParseJson(g, sap, strlen(sap))) + n = 4; + + JsonFreeMem(g); +#endif // 0 + } // endif's + + return n; +} // end of IsArgJson + +/*********************************************************************************/ +/* Make the result according to the first argument type. */ +/*********************************************************************************/ +static char *MakeResult(PGLOBAL g, UDF_ARGS *args, PBVAL top, uint n = 2) +{ + char *str = NULL; + BDOC doc(g); + + if (IsArgJson(args, 0) == 2) { + // Make the change in the json file + int pretty = 2; + + for (uint i = n; i < args->arg_count; i++) + if (args->arg_type[i] == INT_RESULT) { + pretty = (int)*(longlong*)args->args[i]; + break; + } // endif type + + if (!doc.Serialize(g, top, MakePSZ(g, args, 0), pretty)) + PUSH_WARNING(g->Message); + + str = NULL; + } else if (IsArgJson(args, 0) == 3) { +#if 0 + PBSON bsp = (PBSON)args->args[0]; + + if (bsp->Filename) { + // Make the change in the json file + if (!Serialize(g, top, bsp->Filename, bsp->Pretty)) + PUSH_WARNING(g->Message); + + str = bsp->Filename; + } else if (!(str = Serialize(g, top, NULL, 0))) + PUSH_WARNING(g->Message); + + SetChanged(bsp); +#endif + } else if (!(str = doc.Serialize(g, top, NULL, 0))) + PUSH_WARNING(g->Message); + + return str; +} // end of MakeResult + +/*********************************************************************************/ +/* GetFileLength: returns file size in number of bytes. */ +/*********************************************************************************/ +static long GetFileLength(char *fn) +{ + int h; + long len; + + h= open(fn, _O_RDONLY); + + if (h != -1) { + if ((len = _filelength(h)) < 0) + len = 0; + + close(h); + } else + len = 0; + + return len; +} // end of GetFileLength + /* ------------------------- Now the new Bin UDF's ----------------------------- */ /*********************************************************************************/ @@ -1249,10 +1648,10 @@ char* bsonvalue(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, 1, false)) { - BDOC doc(g); - PBVAL bvp = MakeBinValue(g, args, 0); + BJNX bnx(g); + PBVAL bvp = bnx.MakeBinValue(g, args, 0); - if (!(str = doc.Serialize(g, bvp, NULL, 0))) + if (!(str = bnx.Serialize(g, bvp, NULL, 0))) str = strcpy(result, g->Message); } else @@ -1290,13 +1689,13 @@ char* bson_make_array(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, args->arg_count, false)) { - BDOC doc(g); - PBVAL bvp = NULL, arp = doc.NewVal(TYPE_JAR); + BJNX bnx(g); + PBVAL bvp = NULL, arp = bnx.NewVal(TYPE_JAR); for (uint i = 0; i < args->arg_count; i++) - doc.AddArrayValue(arp, MakeBinValue(g, args, i)); + bnx.AddArrayValue(arp, bnx.MakeBinValue(g, args, i)); - if (!(str = doc.Serialize(g, arp, NULL, 0))) + if (!(str = bnx.Serialize(g, arp, NULL, 0))) str = strcpy(result, g->Message); } else @@ -1324,7 +1723,7 @@ my_bool bson_array_add_values_init(UDF_INIT* initid, UDF_ARGS* args, char* messa if (args->arg_count < 2) { strcpy(message, "This function must have at least 2 arguments"); return true; - //} else if (!IsJson(args, 0, true)) { + //} else if (!IsArgJson(args, 0, true)) { // strcpy(message, "First argument must be a valid json string or item"); // return true; } else @@ -1337,7 +1736,7 @@ my_bool bson_array_add_values_init(UDF_INIT* initid, UDF_ARGS* args, char* messa g->N = (initid->const_item) ? 1 : 0; // This is to avoid double execution when using prepared statements - if (IsJson(args, 0) > 1) + if (IsArgJson(args, 0) > 1) initid->const_item = 0; return false; @@ -1354,19 +1753,19 @@ char* bson_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, args->arg_count, true)) { uint i = 0; - BDOC doc(g); - PBVAL arp, bvp = MakeBinValue(g, args, 0); + BJNX bnx(g); + PBVAL arp, bvp = bnx.MakeBinValue(g, args, 0); if (bvp->Type == TYPE_JAR) { arp = bvp; i = 1; } else // First argument is not an array - arp = doc.NewVal(TYPE_JAR); + arp = bnx.NewVal(TYPE_JAR); for (; i < args->arg_count; i++) - doc.AddArrayValue(arp, MakeBinValue(g, args, i)); + bnx.AddArrayValue(arp, bnx.MakeBinValue(g, args, i)); - str = doc.Serialize(g, arp, NULL, 0); + str = bnx.Serialize(g, arp, NULL, 0); } // endif CheckMemory if (!str) { @@ -1392,6 +1791,875 @@ void bson_array_add_values_deinit(UDF_INIT* initid) { JsonFreeMem((PGLOBAL)initid->ptr); } // end of bson_array_add_values_deinit +/*********************************************************************************/ +/* Add one value to a Json array. */ +/*********************************************************************************/ +my_bool bson_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + //} else if (!IsArgJson(args, 0, true)) { + // strcpy(message, "First argument is not a valid Json item"); + // return true; + } else + CalcLen(args, false, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_array_add_init + +char *bson_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + str = (char*)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 2, false, false, true)) { + int *x; + uint n = 2; + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL jsp, top; + PBVAL arp, jvp = bnx.MakeTypedValue(g, args, 0, TYPE_JAR, &top); + + jsp = jvp; + x = GetIntArgPtr(g, args, n); + + if (bnx.CheckPath(g, args, jsp, jvp, 2)) + PUSH_WARNING(g->Message); + else if (jvp) { + PGLOBAL gb = GetMemPtr(g, args, 0); + + if (jvp->Type != TYPE_JAR) { + if ((arp = bnx.NewVal(TYPE_JAR))) { + bnx.AddArrayValue(arp, jvp); + + if (!top) + top = arp; + + } // endif arp + + } else + arp = jvp; + + if (arp) { + bnx.AddArrayValue(arp, bnx.MakeValue(gb, args, 1), x); + str = MakeResult(g, args, top, n); + } else + PUSH_WARNING(gb->Message); + + } else { + PUSH_WARNING("Target is not an array"); + // if (g->Mrr) *error = 1; (only if no path) + } // endif jvp + + } // endif CheckMemory + + // In case of error or file, return unchanged argument + if (!str) + str = MakePSZ(g, args, 0); + + if (g->N) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *res_length = 0; + *is_null = 1; + *error = 1; + } else + *res_length = strlen(str); + + return str; +} // end of bson_array_add + +void bson_array_add_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_array_add_deinit + +/*********************************************************************************/ +/* Delete a value from a Json array. */ +/*********************************************************************************/ +my_bool bson_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + } else + CalcLen(args, false, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_array_delete_init + +char *bson_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + str = (char*)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 1, false, false, true)) { + int *x; + uint n = 1; + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL arp, top; + PBVAL jvp = bnx.MakeTypedValue(g, args, 0, TYPE_JSON, &top); + + if (!(x = GetIntArgPtr(g, args, n))) + PUSH_WARNING("Missing or null array index"); + else if (bnx.CheckPath(g, args, jvp, arp, 1)) + PUSH_WARNING(g->Message); + else if (arp && arp->Type == TYPE_JAR) { + bnx.DeleteValue(arp, *x); + str = MakeResult(g, args, top, n); + } else { + PUSH_WARNING("First argument target is not an array"); + // if (g->Mrr) *error = 1; + } // endif jvp + + } // endif CheckMemory + + // In case of error or file, return unchanged argument + if (!str) + str = MakePSZ(g, args, 0); + + if (g->N) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_array_delete + +void bson_array_delete_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_array_delete_deinit + +/*********************************************************************************/ +/* Make a Json Object containing all the parameters. */ +/*********************************************************************************/ +my_bool bson_make_object_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + CalcLen(args, true, reslen, memlen); + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bson_make_object_init + +char *bson_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *, char *) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, false, false, true)) { + BJNX bnx(g); + PBVAL objp; + + if ((objp = bnx.NewVal(TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i++) + bnx.SetKeyValue(objp, bnx.MakeValue(g, args, i), bnx.MakeKey(args, i)); + + str = bnx.Serialize(g, objp, NULL, 0); + } // endif objp + + } // endif CheckMemory + + if (!str) + str = strcpy(result, g->Message); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? str : NULL; + } else + str = (char*)g->Xchk; + + *res_length = strlen(str); + return str; +} // end of bson_make_object + +void bson_make_object_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_make_object_deinit + +/*********************************************************************************/ +/* Make a Json Object containing all not null parameters. */ +/*********************************************************************************/ +my_bool bson_object_nonull_init(UDF_INIT *initid, UDF_ARGS *args, + char *message) +{ + unsigned long reslen, memlen; + + CalcLen(args, true, reslen, memlen); + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bson_object_nonull_init + +char *bson_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *, char *) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, false, true)) { + BJNX bnx(g); + PBVAL jvp, objp; + + if ((objp = bnx.NewVal(TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i++) + if (!bnx.IsValueNull(jvp = bnx.MakeValue(g, args, i))) + bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i)); + + str = bnx.Serialize(g, objp, NULL, 0); + } // endif objp + + } // endif CheckMemory + + if (!str) + str = strcpy(result, g->Message); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? str : NULL; + } else + str = (char*)g->Xchk; + + *res_length = strlen(str); + return str; +} // end of bson_object_nonull + +void bson_object_nonull_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_nonull_deinit + +/*********************************************************************************/ +/* Make a Json Object containing all the key/value parameters. */ +/*********************************************************************************/ +my_bool bson_object_key_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count % 2) { + strcpy(message, "This function must have an even number of arguments"); + return true; + } // endif arg_count + + CalcLen(args, true, reslen, memlen); + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bson_object_key_init + +char *bson_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *, char *) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, false, true)) { + BJNX bnx(g); + PBVAL objp; + + if ((objp = bnx.NewVal(TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i += 2) + bnx.SetKeyValue(objp, bnx.MakeValue(g, args, i + 1), MakePSZ(g, args, i)); + + str = bnx.Serialize(g, objp, NULL, 0); + } // endif objp + + } // endif CheckMemory + + if (!str) + str = strcpy(result, g->Message); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? str : NULL; + } else + str = (char*)g->Xchk; + + *res_length = strlen(str); + return str; +} // end of bson_object_key + +void bson_object_key_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_key_deinit + +/*********************************************************************************/ +/* Add or replace a value in a Json Object. */ +/*********************************************************************************/ +my_bool bson_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + } else if (!IsArgJson(args, 0)) { + strcpy(message, "First argument must be a json item"); + return true; + } else + CalcLen(args, true, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_object_add_init + +char *bson_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PSZ key; + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + str = (char*)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 2, false, true, true)) { + BJNX bnx(g, NULL, TYPE_STRG); + PBVAL jvp, objp; + PBVAL jsp, top; + + jsp = bnx.MakeValue(g, args, 0, &top); + + if (bnx.CheckPath(g, args, jsp, jvp, 2)) + PUSH_WARNING(g->Message); + else if (jvp && jvp->Type == TYPE_JOB) { + objp = jvp; + jvp = bnx.MakeValue(g, args, 1); + key = bnx.MakeKey(args, 1); + bnx.SetKeyValue(objp, jvp, key); + str = MakeResult(g, args, top); + } else { + PUSH_WARNING("First argument target is not an object"); + // if (g->Mrr) *error = 1; (only if no path) + } // endif jvp + + } // endif CheckMemory + + // In case of error or file, return unchanged argument + if (!str) + str = MakePSZ(g, args, 0); + + if (g->N) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_object_add + +void bson_object_add_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_add_deinit + +/*********************************************************************************/ +/* Delete a value from a Json object. */ +/*********************************************************************************/ +my_bool bson_object_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have 2 or 3 arguments"); + return true; + } else if (!IsArgJson(args, 0)) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument must be a key string"); + return true; + } else + CalcLen(args, true, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_object_delete_init + +char *bson_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + str = (char*)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 1, false, true, true)) { + BJNX bnx(g, NULL, TYPE_STRG); + PSZ key; + PBVAL jsp, objp, top; + PBVAL jvp = bnx.MakeValue(g, args, 0, &top); + + jsp = jvp; + + if (bnx.CheckPath(g, args, jsp, jvp, 2)) + PUSH_WARNING(g->Message); + else if (jvp && jvp->Type == TYPE_JOB) { +// key = MakeKey(GetMemPtr(g, args, 0), args, 1); + key = bnx.MakeKey(args, 1); + objp = jvp; + bnx.DeleteKey(objp, key); + str = MakeResult(g, args, top); + } else { + PUSH_WARNING("First argument target is not an object"); + // if (g->Mrr) *error = 1; (only if no path) + } // endif jvp + + } // endif CheckMemory + + // In case of error or file, return unchanged argument + if (!str) + str = MakePSZ(g, args, 0); + + if (g->N) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_object_delete + +void bson_object_delete_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_delete_deinit + +/*********************************************************************************/ +/* Returns an array of the Json object keys. */ +/*********************************************************************************/ +my_bool bson_object_list_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count != 1) { + strcpy(message, "This function must have 1 argument"); + return true; + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "Argument must be a json item"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bson_object_list_init + +char *bson_object_list(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->N) { + if (!CheckMemory(g, initid, args, 1, true, true)) { + BJNX bnx(g); + char *p; + PBVAL jsp, jarp; + PBVAL jvp = bnx.MakeValue(g, args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + return NULL; + } // endif jsp + + } else + jsp = jvp; + + if (jsp->Type == TYPE_JOB) { + jarp = bnx.GetKeyList(jsp); + + if (!(str = bnx.Serialize(g, jarp, NULL, 0))) + PUSH_WARNING(g->Message); + + } else { + PUSH_WARNING("First argument is not an object"); + if (g->Mrr) *error = 1; + } // endif jvp + + } // endif CheckMemory + + if (initid->const_item) { + // Keep result of constant function + g->Xchk = str; + g->N = 1; // str can be NULL + } // endif const_item + + } else + str = (char*)g->Xchk; + + if (!str) { + *is_null = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_object_list + +void bson_object_list_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_list_deinit + +/*********************************************************************************/ +/* Returns an array of the Json object values. */ +/*********************************************************************************/ +my_bool bson_object_values_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count != 1) { + strcpy(message, "This function must have 1 argument"); + return true; + } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "Argument must be a json object"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bson_object_values_init + +char *bson_object_values(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->N) { + if (!CheckMemory(g, initid, args, 1, true, true)) { + BJNX bnx(g); + char *p; + PBVAL jsp, jarp; + PBVAL jvp = bnx.MakeValue(g, args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + return NULL; + } // endif jsp + + } else + jsp = jvp; + + if (jsp->Type == TYPE_JOB) { + jarp = bnx.GetObjectValList(jsp); + + if (!(str = bnx.Serialize(g, jarp, NULL, 0))) + PUSH_WARNING(g->Message); + + } else { + PUSH_WARNING("First argument is not an object"); + if (g->Mrr) *error = 1; + } // endif jvp + + } // endif CheckMemory + + if (initid->const_item) { + // Keep result of constant function + g->Xchk = str; + g->N = 1; // str can be NULL + } // endif const_item + + } else + str = (char*)g->Xchk; + + if (!str) { + *is_null = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_object_values + +void bson_object_values_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_values_deinit + +/*********************************************************************************/ +/* Set the value of JsonGrpSize. */ +/*********************************************************************************/ +my_bool bsonset_grp_size_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + if (args->arg_count != 1 || args->arg_type[0] != INT_RESULT) { + strcpy(message, "This function must have 1 integer argument"); + return true; + } else + return false; + +} // end of bsonset_grp_size_init + +long long bsonset_grp_size(UDF_INIT *initid, UDF_ARGS *args, char *, char *) +{ + long long n = *(long long*)args->args[0]; + + JsonGrpSize = (uint)n; + return (long long)GetJsonGroupSize(); +} // end of bsonset_grp_size + +/*********************************************************************************/ +/* Get the value of JsonGrpSize. */ +/*********************************************************************************/ +my_bool bsonget_grp_size_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + if (args->arg_count != 0) { + strcpy(message, "This function must have no arguments"); + return true; + } else + return false; + +} // end of bsonget_grp_size_init + +long long bsonget_grp_size(UDF_INIT *initid, UDF_ARGS *args, char *, char *) +{ + return (long long)GetJsonGroupSize(); +} // end of bsonget_grp_size + +/*********************************************************************************/ +/* Make a Json array from values coming from rows. */ +/*********************************************************************************/ +my_bool bson_array_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, n = GetJsonGroupSize(); + + if (args->arg_count != 1) { + strcpy(message, "This function can only accept 1 argument"); + return true; + } else if (IsArgJson(args, 0) == 3) { + strcpy(message, "This function does not support Jbin arguments"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + reslen *= n; + memlen += ((memlen - MEMFIX) * (n - 1)); + + if (JsonInit(initid, args, message, false, reslen, memlen)) + return true; + + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = new(g) BJNX(g); + + JsonMemSave(g); + return false; +} // end of bson_array_grp_init + +void bson_array_grp_clear(UDF_INIT *initid, char*, char*) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER)); + + JsonSubSet(g); + g->Activityp = (PACTIVITY)bxp->NewVal(TYPE_JAR); + g->N = GetJsonGroupSize(); +} // end of bson_array_grp_clear + +void bson_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER)); + PBVAL arp = (PBVAL)g->Activityp; + + if (arp && g->N-- > 0) + bxp->AddArrayValue(arp, bxp->MakeValue(g, args, 0)); + +} // end of bson_array_grp_add + +char *bson_array_grp(UDF_INIT *initid, UDF_ARGS *, char *result, + unsigned long *res_length, char *, char *) +{ + char *str; + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER)); + PBVAL arp = (PBVAL)g->Activityp; + + if (g->N < 0) + PUSH_WARNING("Result truncated to json_grp_size values"); + + if (!arp || !(str = bxp->Serialize(g, arp, NULL, 0))) + str = strcpy(result, g->Message); + + *res_length = strlen(str); + return str; +} // end of bson_array_grp + +void bson_array_grp_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_array_grp_deinit + +/*********************************************************************************/ +/* Make a Json object from values coming from rows. */ +/*********************************************************************************/ +my_bool bson_object_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, n = GetJsonGroupSize(); + + if (args->arg_count != 2) { + strcpy(message, "This function requires 2 arguments (key, value)"); + return true; + } else if (IsJson(args, 0) == 3) { + strcpy(message, "This function does not support Jbin arguments"); + return true; + } else + CalcLen(args, true, reslen, memlen); + + reslen *= n; + memlen += ((memlen - MEMFIX) * (n - 1)); + + if (JsonInit(initid, args, message, false, reslen, memlen)) + return true; + + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = new(g) BJNX(g); + + JsonMemSave(g); + return false; +} // end of bson_object_grp_init + +void bson_object_grp_clear(UDF_INIT *initid, char*, char*) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER)); + + JsonSubSet(g); + g->Activityp = (PACTIVITY)bxp->NewVal(TYPE_JOB); + g->N = GetJsonGroupSize(); +} // end of bson_object_grp_clear + +void bson_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER)); + PBVAL bop = (PBVAL)g->Activityp; + + if (g->N-- > 0) + bxp->SetKeyValue(bop, bxp->MakeValue(g, args, 0), MakePSZ(g, args, 1)); + +} // end of bson_object_grp_add + +char *bson_object_grp(UDF_INIT *initid, UDF_ARGS *, char *result, + unsigned long *res_length, char *, char *) +{ + char *str; + PGLOBAL g = (PGLOBAL)initid->ptr; + PBJNX bxp = (PBJNX)((char*)g->Sarea + sizeof(POOLHEADER)); + PBVAL bop = (PBVAL)g->Activityp; + + if (g->N < 0) + PUSH_WARNING("Result truncated to json_grp_size values"); + + if (!bop || !(str = bxp->Serialize(g, bop, NULL, 0))) + str = strcpy(result, g->Message); + + *res_length = strlen(str); + return str; +} // end of bson_object_grp + +void bson_object_grp_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_object_grp_deinit + /*********************************************************************************/ /* Test BJSON parse and serialize. */ /*********************************************************************************/ @@ -1401,7 +2669,7 @@ my_bool bson_test_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { if (args->arg_count == 0) { strcpy(message, "At least 1 argument required (json)"); return true; - } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { strcpy(message, "First argument must be a json item"); return true; } else @@ -1416,7 +2684,6 @@ char* bson_test(UDF_INIT* initid, UDF_ARGS* args, char* result, int pretty = 1; PBVAL bvp; PGLOBAL g = (PGLOBAL)initid->ptr; - BDOC doc(g); if (g->N) { str = (char*)g->Activityp; @@ -1425,12 +2692,14 @@ char* bson_test(UDF_INIT* initid, UDF_ARGS* args, char* result, g->N = 1; try { + BJNX bnx(g); + if (!g->Xchk) { if (CheckMemory(g, initid, args, 1, !g->Xchk)) { PUSH_WARNING("CheckMemory error"); *error = 1; goto err; - } else if (!(bvp = MakeBinValue(g, args, 0))) { + } else if (!(bvp = bnx.MakeBinValue(g, args, 0))) { PUSH_WARNING(g->Message); goto err; } // endif bvp @@ -1450,7 +2719,7 @@ char* bson_test(UDF_INIT* initid, UDF_ARGS* args, char* result, pretty = (int)*(longlong*)args->args[i]; // Serialize the parse tree - str = doc.Serialize(g, bvp, fn, pretty); + str = bnx.Serialize(g, bvp, fn, pretty); if (initid->const_item) // Keep result of constant function @@ -1491,7 +2760,7 @@ my_bool bsonlocate_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { if (args->arg_count < 2) { strcpy(message, "At least 2 arguments required"); return true; - } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { strcpy(message, "First argument must be a json item"); return true; } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) { @@ -1502,7 +2771,7 @@ my_bool bsonlocate_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { CalcLen(args, false, reslen, memlen); // TODO: calculate this - if (IsJson(args, 0) == 3) + if (IsArgJson(args, 0) == 3) more = 0; return JsonInit(initid, args, message, true, reslen, memlen, more); @@ -1513,7 +2782,6 @@ char* bsonlocate(UDF_INIT* initid, UDF_ARGS* args, char* result, char *path = NULL; int k; PBVAL bvp, bvp2; - PBJNX bnxp; PGLOBAL g = (PGLOBAL)initid->ptr; if (g->N) { @@ -1531,13 +2799,15 @@ char* bsonlocate(UDF_INIT* initid, UDF_ARGS* args, char* result, g->N = 1; try { + BJNX bnx(g); + if (!g->Xchk) { if (CheckMemory(g, initid, args, 1, !g->Xchk)) { PUSH_WARNING("CheckMemory error"); *error = 1; goto err; } else - bvp = MakeBinValue(g, args, 0); + bvp = bnx.MakeBinValue(g, args, 0); if (!bvp) { PUSH_WARNING("First argument is not a valid JSON item"); @@ -1553,15 +2823,15 @@ char* bsonlocate(UDF_INIT* initid, UDF_ARGS* args, char* result, bvp = (PBVAL)g->Xchk; // The item to locate - if (!(bvp2 = MakeBinValue(g, args, 1))) { + if (!(bvp2 = bnx.MakeBinValue(g, args, 1))) { PUSH_WARNING("Invalid second argument"); goto err; } // endif bvp k = (args->arg_count > 2) ? (int)*(long long*)args->args[2] : 1; - bnxp = new(g) BJNX(g, bvp, TYPE_STRING); - path = bnxp->Locate(g, bvp, bvp2, k); +// bnxp = new(g) BJNX(g, bvp, TYPE_STRING); + path = bnx.Locate(g, bvp, bvp2, k); if (initid->const_item) // Keep result of constant function @@ -1602,7 +2872,7 @@ my_bool bson_locate_all_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { if (args->arg_count < 2) { strcpy(message, "At least 2 arguments required"); return true; - } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { strcpy(message, "First argument must be a json item"); return true; } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) { @@ -1613,7 +2883,7 @@ my_bool bson_locate_all_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { CalcLen(args, false, reslen, memlen); // TODO: calculate this - if (IsJson(args, 0) == 3) + if (IsArgJson(args, 0) == 3) more = 0; return JsonInit(initid, args, message, true, reslen, memlen, more); @@ -1624,7 +2894,6 @@ char* bson_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result, char* path = NULL; int mx = 10; PBVAL bvp, bvp2; - PBJNX bnxp; PGLOBAL g = (PGLOBAL)initid->ptr; if (g->N) { @@ -1643,13 +2912,15 @@ char* bson_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result, g->N = 1; try { + BJNX bnx(g); + if (!g->Xchk) { if (CheckMemory(g, initid, args, 1, true)) { PUSH_WARNING("CheckMemory error"); *error = 1; goto err; } else - bvp = MakeBinValue(g, args, 0); + bvp = bnx.MakeBinValue(g, args, 0); if (!bvp) { PUSH_WARNING("First argument is not a valid JSON item"); @@ -1665,7 +2936,7 @@ char* bson_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result, bvp = (PBVAL)g->Xchk; // The item to locate - if (!(bvp2 = MakeBinValue(g, args, 1))) { + if (!(bvp2 = bnx.MakeBinValue(g, args, 1))) { PUSH_WARNING("Invalid second argument"); goto err; } // endif bvp @@ -1673,8 +2944,8 @@ char* bson_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result, if (args->arg_count > 2) mx = (int)*(long long*)args->args[2]; - bnxp = new(g) BJNX(g, bvp, TYPE_STRING); - path = bnxp->LocateAll(g, bvp, bvp2, mx); +// bnxp = new(g) BJNX(g, bvp, TYPE_STRING); + path = bnx.LocateAll(g, bvp, bvp2, mx); if (initid->const_item) // Keep result of constant function @@ -1706,6 +2977,1252 @@ void bson_locate_all_deinit(UDF_INIT* initid) { JsonFreeMem((PGLOBAL)initid->ptr); } // end of bson_locate_all_deinit +/*********************************************************************************/ +/* Check whether the document contains a value or item. */ +/*********************************************************************************/ +my_bool bson_contains_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more = 1024; + int n = IsArgJson(args, 0); + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!n && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_count > 2 && args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third argument is not an integer (index)"); + return true; + } else if (args->arg_count > 3) { + if (args->arg_type[3] == INT_RESULT && args->args[3]) + more += (unsigned long)*(long long*)args->args[3]; + else + strcpy(message, "Fourth argument is not an integer (memory)"); + + } // endif's + + CalcLen(args, false, reslen, memlen); + //memlen += more; + + // TODO: calculate this + more += (IsJson(args, 0) != 3 ? 1000 : 0); + + return JsonInit(initid, args, message, false, reslen, memlen, more); +} // end of bson contains_init + +long long bson_contains(UDF_INIT *initid, UDF_ARGS *args, char *, char *error) +{ + char isn, res[256]; + unsigned long reslen; + + isn = 0; + bsonlocate(initid, args, res, &reslen, &isn, error); + return (isn) ? 0LL : 1LL; +} // end of bson_contains + +void bson_contains_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_contains_deinit + +/*********************************************************************************/ +/* Check whether the document contains a path. */ +/*********************************************************************************/ +my_bool bsoncontains_path_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more = 1024; + int n = IsArgJson(args, 0); + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!n && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument is not a string (path)"); + return true; + } else if (args->arg_count > 2) { + if (args->arg_type[2] == INT_RESULT && args->args[2]) + more += (unsigned long)*(long long*)args->args[2]; + else + strcpy(message, "Third argument is not an integer (memory)"); + + } // endif's + + CalcLen(args, false, reslen, memlen); + //memlen += more; + + // TODO: calculate this + more += (IsJson(args, 0) != 3 ? 1000 : 0); + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bsoncontains_path_init + +long long bsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *, char *error) +{ + char *p, *path; + long long n; + PBVAL jsp; + PBVAL jvp; + PBJNX bxp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + if (!g->Activityp) { + return 0LL; + } else + return *(long long*)g->Activityp; + + } else if (initid->const_item) + g->N = 1; + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + goto err; + } else { + BJNX bnx(g); + + jvp = bnx.MakeValue(g, args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto err; + } // endif jsp + + } else + jsp = jvp; + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr + + } // endelse CheckMemory + + } else + jsp = (PBVAL)g->Xchk; + + bxp = new(g) BJNX(g, jsp, TYPE_BIGINT); + path = MakePSZ(g, args, 1); + + if (bxp->SetJpath(g, path)) { + PUSH_WARNING(g->Message); + goto err; + } // endif SetJpath + + n = (bxp->CheckPath(g)) ? 1LL : 0LL; + + if (initid->const_item) { + // Keep result of constant function + long long *np = (long long*)PlgDBSubAlloc(g, NULL, sizeof(long long)); + + if (np) { + *np = n; + g->Activityp = (PACTIVITY)np; + } else + PUSH_WARNING(g->Message); + + } // endif const_item + + return n; + +err: + if (g->Mrr) *error = 1; + return 0LL; +} // end of bsoncontains_path + +void bsoncontains_path_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bsoncontains_path_deinit + +/*********************************************************************************/ +/* Merge two arrays or objects. */ +/*********************************************************************************/ +my_bool bson_item_merge_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + } else for (int i = 0; i < 2; i++) + if (!IsArgJson(args, i) && args->arg_type[i] != STRING_RESULT) { + sprintf(message, "Argument %d must be a json item", i); + return true; + } // endif type + + CalcLen(args, false, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_item_merge_init + +char *bson_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + str = (char*)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 2, false, false, true)) { + JTYP type; + BJNX bnx(g); + PBVAL jvp, top = NULL; + PBVAL jsp[2] = {NULL, NULL}; + + for (int i = 0; i < 2; i++) { + jvp = bnx.MakeBinValue(g, args, i); + + if (i) { + if (jvp->Type != type) { + PUSH_WARNING("Argument types mismatch"); + goto fin; + } // endif type + + } else { + type = (JTYP)jvp->Type; + + if (type != TYPE_JAR && type != TYPE_JOB) { + PUSH_WARNING("First argument is not an array or object"); + goto fin; + } else + top = jvp; + + } // endif i + + jsp[i] = jvp; + } // endfor i + + if (type == TYPE_JAR) + bnx.MergeArray(jsp[0], jsp[1]); + else + bnx.MergeObject(jsp[0], jsp[1]); + + str = MakeResult(g, args, top); + } // endif CheckMemory + + // In case of error or file, return unchanged first argument + if (!str) + str = MakePSZ(g, args, 0); + + if (g->N) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *res_length = 0; + *error = 1; + *is_null = 1; + } else + *res_length = strlen(str); + + return str; +} // end of bson_item_merge + +void bson_item_merge_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_item_merge_deinit + +/*********************************************************************************/ +/* Get a Json item from a Json document. */ +/*********************************************************************************/ +my_bool bson_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more; + int n = IsArgJson(args, 0); + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + } else if (!n && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument is not a string (jpath)"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + if (n == 2 && args->args[0]) { + char fn[_MAX_PATH]; + long fl; + + memcpy(fn, args->args[0], args->lengths[0]); + fn[args->lengths[0]] = 0; + fl = GetFileLength(fn); + more = fl * 3; + } else if (n != 3) { + more = args->lengths[0] * 3; + } else + more = 0; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bson_get_item_init + +char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *) +{ + char *p, *path, *str = NULL; + PBVAL jsp, jvp; + PBJNX bxp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + str = (char*)g->Activityp; + goto fin; + } else if (initid->const_item) + g->N = 1; + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true, true)) { + PUSH_WARNING("CheckMemory error"); + goto fin; + } else { + BJNX bnx(g); + + jvp = bnx.MakeValue(g, args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto fin; + } // endif jsp + + } else + jsp = jvp; + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr + + } // endelse CheckMemory + + } else + jsp = (PBVAL)g->Xchk; + + path = MakePSZ(g, args, 1); + bxp = new(g) BJNX(g, jsp, TYPE_STRING, initid->max_length); + + if (bxp->SetJpath(g, path, true)) { + PUSH_WARNING(g->Message); + goto fin; + } else + bxp->ReadValue(g); + + if (!bxp->GetValue()->IsNull()) + str = bxp->GetValue()->GetCharValue(); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; + +fin: + if (!str) { + *is_null = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_get_item + +void bson_get_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_get_item_deinit + +/*********************************************************************************/ +/* Get a string value from a Json item. */ +/*********************************************************************************/ +my_bool bsonget_string_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more = 1024; + int n = IsArgJson(args, 0); + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!n && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument is not a string (jpath)"); + return true; + } else if (args->arg_count > 2) { + if (args->arg_type[2] == INT_RESULT && args->args[2]) + more += (unsigned long)*(long long*)args->args[2]; + else + strcpy(message, "Third argument is not an integer (memory)"); + + } // endif's + + CalcLen(args, false, reslen, memlen); + //memlen += more; + + if (n == 2 && args->args[0]) { + char fn[_MAX_PATH]; + long fl; + + memcpy(fn, args->args[0], args->lengths[0]); + fn[args->lengths[0]] = 0; + fl = GetFileLength(fn); + more += fl * 3; + } else if (n != 3) + more += args->lengths[0] * 3; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bsonget_string_init + +char *bsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *) +{ + char *p, *path, *str = NULL; + PBVAL jsp, jvp; + PBJNX bxp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + str = (char*)g->Activityp; + goto err; + } else if (initid->const_item) + g->N = 1; + + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + goto err; + } else { + BJNX bnx(g); + + jvp = bnx.MakeValue(g, args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto err; + } // endif jsp + + } else + jsp = jvp; + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr + + } // endelse CheckMemory + + } else + jsp = (PBVAL)g->Xchk; + + path = MakePSZ(g, args, 1); + bxp = new(g) BJNX(g, jsp, TYPE_STRING, initid->max_length); + + if (bxp->SetJpath(g, path)) { + PUSH_WARNING(g->Message); + goto err; + } else + bxp->ReadValue(g); + + if (!bxp->GetValue()->IsNull()) + str = bxp->GetValue()->GetCharValue(); + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + + PUSH_WARNING(g->Message); + str = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + str = NULL; + } // end catch + +err: + if (!str) { + *is_null = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bsonget_string + +void bsonget_string_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bsonget_string_deinit + +/*********************************************************************************/ +/* Get an integer value from a Json item. */ +/*********************************************************************************/ +my_bool bsonget_int_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more; + + if (args->arg_count != 2) { + strcpy(message, "This function must have 2 arguments"); + return true; + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument is not a (jpath) string"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + // TODO: calculate this + more = (IsJson(args, 0) != 3) ? 1000 : 0; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bsonget_int_init + +long long bsonget_int(UDF_INIT *initid, UDF_ARGS *args, + char *is_null, char *error) +{ + char *p, *path; + long long n; + PBVAL jsp, jvp; + PBJNX bxp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + if (!g->Activityp) { + *is_null = 1; + return 0LL; + } else + return *(long long*)g->Activityp; + + } else if (initid->const_item) + g->N = 1; + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + if (g->Mrr) *error = 1; + *is_null = 1; + return 0LL; + } else { + BJNX bnx(g); + + jvp = bnx.MakeValue(g, args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + if (g->Mrr) *error = 1; + *is_null = 1; + return 0; + } // endif jsp + + } else + jsp = jvp; + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr + + } // endelse CheckMemory + + } else + jsp = (PBVAL)g->Xchk; + + path = MakePSZ(g, args, 1); + bxp = new(g) BJNX(g, jsp, TYPE_BIGINT); + + if (bxp->SetJpath(g, path)) { + PUSH_WARNING(g->Message); + *is_null = 1; + return 0; + } else + bxp->ReadValue(g); + + if (bxp->GetValue()->IsNull()) { + *is_null = 1; + return 0; + } // endif IsNull + + n = bxp->GetValue()->GetBigintValue(); + + if (initid->const_item) { + // Keep result of constant function + long long *np = (long long*)PlgDBSubAlloc(g, NULL, sizeof(long long)); + + if (np) { + *np = n; + g->Activityp = (PACTIVITY)np; + } else + PUSH_WARNING(g->Message); + + } // endif const_item + + return n; +} // end of bsonget_int + +void bsonget_int_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bsonget_int_deinit + +/*********************************************************************************/ +/* Get a double value from a Json item. */ +/*********************************************************************************/ +my_bool bsonget_real_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more; + + if (args->arg_count < 2) { + strcpy(message, "At least 2 arguments required"); + return true; + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument is not a (jpath) string"); + return true; + } else if (args->arg_count > 2) { + if (args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third argument is not an integer (decimals)"); + return true; + } else + initid->decimals = (uint)*(longlong*)args->args[2]; + + } else + initid->decimals = 15; + + CalcLen(args, false, reslen, memlen); + + // TODO: calculate this + more = (IsJson(args, 0) != 3) ? 1000 : 0; + + return JsonInit(initid, args, message, true, reslen, memlen, more); +} // end of bsonget_real_init + +double bsonget_real(UDF_INIT *initid, UDF_ARGS *args, + char *is_null, char *error) +{ + char *p, *path; + double d; + PBVAL jsp, jvp; + PBJNX bxp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + if (!g->Activityp) { + *is_null = 1; + return 0.0; + } else + return *(double*)g->Activityp; + + } else if (initid->const_item) + g->N = 1; + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + if (g->Mrr) *error = 1; + *is_null = 1; + return 0.0; + } else { + BJNX bnx(g); + + jvp = bnx.MakeValue(g, args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + *is_null = 1; + return 0.0; + } // endif jsp + + } else + jsp = jvp; + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr + } // endelse CheckMemory + + } else + jsp = (PBVAL)g->Xchk; + + path = MakePSZ(g, args, 1); + bxp = new(g) BJNX(g, jsp, TYPE_DOUBLE); + + if (bxp->SetJpath(g, path)) { + PUSH_WARNING(g->Message); + *is_null = 1; + return 0.0; + } else + bxp->ReadValue(g); + + if (bxp->GetValue()->IsNull()) { + *is_null = 1; + return 0.0; + } // endif IsNull + + d = bxp->GetValue()->GetFloatValue(); + + if (initid->const_item) { + // Keep result of constant function + double *dp; + + if ((dp = (double*)PlgDBSubAlloc(g, NULL, sizeof(double)))) { + *dp = d; + g->Activityp = (PACTIVITY)dp; + } else { + PUSH_WARNING(g->Message); + *is_null = 1; + return 0.0; + } // endif dp + + } // endif const_item + + return d; +} // end of jsonget_real + +void bsonget_real_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bsonget_real_deinit + +/*********************************************************************************/ +/* This function is used by the json_set/insert/update_item functions. */ +/*********************************************************************************/ +static char *bson_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *p, *path, *str = NULL; + int w; + my_bool b = true; + PBJNX bxp; + PBVAL jsp, jvp; + PGLOBAL g = (PGLOBAL)initid->ptr; +//PGLOBAL gb = GetMemPtr(g, args, 0); + PGLOBAL gb = g; + + if (g->Alchecked) { + str = (char*)g->Activityp; + goto fin; + } else if (g->N) + g->Alchecked = 1; + + if (!strcmp(result, "$set")) + w = 0; + else if (!strcmp(result, "$insert")) + w = 1; + else if (!strcmp(result, "$update")) + w = 2; + else { + PUSH_WARNING("Logical error, please contact CONNECT developer"); + goto fin; + } // endelse + + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true, false, true)) { + PUSH_WARNING("CheckMemory error"); + throw 1; + } else { + BJNX bnx(g); + + jvp = bnx.MakeValue(g, args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + throw 2; + } // endif jsp + + } else + jsp = jvp; + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + JsonMemSave(g); + } // endif Mrr + } // endelse CheckMemory + + } else + jsp = (PBVAL)g->Xchk; + + bxp = new(g)BJNX(g, jsp, TYPE_STRING, initid->max_length, 0, true); + + for (uint i = 1; i + 1 < args->arg_count; i += 2) { + jvp = bxp->MakeValue(gb, args, i); + path = MakePSZ(g, args, i + 1); + + if (bxp->SetJpath(g, path, false)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath + + if (w) { + bxp->ReadValue(g); + b = bxp->GetValue()->IsNull(); + b = (w == 1) ? b : !b; + } // endif w + + if (b && bxp->WriteValue(gb, jvp)) + PUSH_WARNING(g->Message); + + } // endfor i + + // In case of error or file, return unchanged argument + if (!(str = MakeResult(g, args, jsp, INT_MAX32))) + str = MakePSZ(g, args, 0); + + if (g->N) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + + PUSH_WARNING(g->Message); + str = NULL; + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + str = NULL; + } // end catch + +fin: + if (!str) { + *is_null = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_handle_item + +/*********************************************************************************/ +/* Set Json items of a Json document according to path. */ +/*********************************************************************************/ +my_bool bson_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, more = 0; + int n = IsArgJson(args, 0); + + if (!(args->arg_count % 2)) { + strcpy(message, "This function must have an odd number of arguments"); + return true; + } else if (!n && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + if (n == 2 && args->args[0]) { + char fn[_MAX_PATH]; + long fl; + + memcpy(fn, args->args[0], args->lengths[0]); + fn[args->lengths[0]] = 0; + fl = GetFileLength(fn); + more += fl * 3; + } else if (n != 3) + more += args->lengths[0] * 3; + + if (!JsonInit(initid, args, message, true, reslen, memlen, more)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsJson(args, 0) > 1) + initid->const_item = 0; + + g->Alchecked = 0; + return false; + } else + return true; + +} // end of bson_set_item_init + +char *bson_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *p) +{ + strcpy(result, "$set"); + return bson_handle_item(initid, args, result, res_length, is_null, p); +} // end of bson_set_item + +void bson_set_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_set_item_deinit + +/*********************************************************************************/ +/* Insert Json items of a Json document according to path. */ +/*********************************************************************************/ +my_bool bson_insert_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_set_item_init(initid, args, message); +} // end of bson_insert_item_init + +char *bson_insert_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *p) +{ + strcpy(result, "$insert"); + return bson_handle_item(initid, args, result, res_length, is_null, p); +} // end of bson_insert_item + +void bson_insert_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_insert_item_deinit + +/*********************************************************************************/ +/* Update Json items of a Json document according to path. */ +/*********************************************************************************/ +my_bool bson_update_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_set_item_init(initid, args, message); +} // end of bson_update_item_init + +char *bson_update_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *p) +{ + strcpy(result, "$update"); + return bson_handle_item(initid, args, result, res_length, is_null, p); +} // end of bson_update_item + +void bson_update_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_update_item_deinit + +/*********************************************************************************/ +/* Returns a json file as a json string. */ +/*********************************************************************************/ +my_bool bson_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen, fl, more = 1024; + + if (args->arg_count < 1 || args->arg_count > 4) { + strcpy(message, "This function only accepts 1 to 4 arguments"); + return true; + } else if (args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a string (file name)"); + return true; + } // endif's args[0] + + for (unsigned int i = 1; i < args->arg_count; i++) { + if (!(args->arg_type[i] == INT_RESULT || args->arg_type[i] == STRING_RESULT)) { + sprintf(message, "Argument %d is not an integer or a string (pretty or path)", i); + return true; + } // endif arg_type + + // Take care of eventual memory argument + if (args->arg_type[i] == INT_RESULT && args->args[i]) + more += (ulong)*(longlong*)args->args[i]; + + } // endfor i + + initid->maybe_null = 1; + CalcLen(args, false, reslen, memlen); + + if (args->args[0]) + fl = GetFileLength(args->args[0]); + else + fl = 100; // What can be done here? + + reslen += fl; + + if (initid->const_item) + more += fl; + + if (args->arg_count > 1) + more += fl * M; + + memlen += more; + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bson_file_init + +char *bson_file(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *fn, *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->N) { + str = (char*)g->Xchk; + goto fin; + } else if (initid->const_item) + g->N = 1; + + PlugSubSet(g->Sarea, g->Sarea_Size); + fn = MakePSZ(g, args, 0); + + if (args->arg_count > 1) { + int pretty = 3, pty = 3; + size_t len; + PBVAL jsp, jvp = NULL; + BJNX bnx(g); + + for (unsigned int i = 1; i < args->arg_count; i++) + if (args->arg_type[i] == INT_RESULT && *(longlong*)args->args[i] < 4) { + pretty = (int) * (longlong*)args->args[i]; + break; + } // endif type + + // Parse the json file and allocate its tree structure + if (!(jsp = bnx.ParseJsonFile(g, fn, pty, len))) { + PUSH_WARNING(g->Message); + goto fin; + } // endif jsp + + if (pty == 3) + PUSH_WARNING("File pretty format cannot be determined"); + else if (pretty != 3 && pty != pretty) + PUSH_WARNING("File pretty format doesn't match the specified pretty value"); + else if (pretty == 3) + pretty = pty; + + // Check whether a path was specified + if (bnx.CheckPath(g, args, jsp, jvp, 1)) { + PUSH_WARNING(g->Message); + goto fin; + } else if (jvp) + jsp = jvp; + + if (!(str = bnx.Serialize(g, jsp, NULL, 0))) + PUSH_WARNING(g->Message); + + } else + if (!(str = GetJsonFile(g, fn))) + PUSH_WARNING(g->Message); + + if (initid->const_item) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(str); + + return str; +} // end of bson_file + +void bson_file_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_file_deinit + +/*********************************************************************************/ +/* Make a json file from a json item. */ +/*********************************************************************************/ +my_bool bfile_make_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 1 || args->arg_count > 3) { + strcpy(message, "Wrong number of arguments"); + return true; + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + strcpy(message, "First argument must be a json item"); + return true; + } // endif + + CalcLen(args, false, reslen, memlen); + memlen = memlen + 5000; // To take care of not pretty files + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bfile_make_init + +char *bfile_make(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *) +{ + char *p, *str = NULL, *fn = NULL; + int n, pretty = 2; + PBVAL jsp, jvp; + PGLOBAL g = (PGLOBAL)initid->ptr; + BJNX bnx(g); + + if (g->N) { + str = (char*)g->Activityp; + goto fin; + } else if (initid->const_item) + g->N = 1; + +// if ((n = IsArgJson(args, 0)) == 3) { + // Get default file name and pretty +// PBSON bsp = (PBSON)args->args[0]; + +// fn = bsp->Filename; +// pretty = bsp->Pretty; +// } else + if ((n = IsArgJson(args, 0)) == 2) + fn = args->args[0]; + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + goto fin; + } else + jvp = bnx.MakeValue(g, args, 0); + + if ((p = bnx.GetString(jvp))) { + if (!strchr("[{ \t\r\n", *p)) { + // Is this a file name? + if (!(p = GetJsonFile(g, p))) { + PUSH_WARNING(g->Message); + goto fin; + } else + fn = bnx.GetString(jvp); + + } // endif p + + if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { + PUSH_WARNING(g->Message); + goto fin; + } // endif jsp + + bnx.SetValueVal(jvp, jsp); + } // endif p + + if (g->Mrr) { // First argument is a constant + g->Xchk = jvp; + JsonMemSave(g); + } // endif Mrr + + } else + jvp = (PBVAL)g->Xchk; + + for (uint i = 1; i < args->arg_count; i++) + switch (args->arg_type[i]) { + case STRING_RESULT: + fn = MakePSZ(g, args, i); + break; + case INT_RESULT: + pretty = (int)*(longlong*)args->args[i]; + break; + default: + PUSH_WARNING("Unexpected argument type in bfile_make"); + } // endswitch arg_type + + if (fn) { + if (!bnx.Serialize(g, jvp, fn, pretty)) + PUSH_WARNING(g->Message); + } else + PUSH_WARNING("Missing file name"); + + str = fn; + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)str; + +fin: + if (!str) { + *res_length = 0; + *is_null = 1; + } else + *res_length = strlen(str); + + return str; +} // end of bfile_make + +void bfile_make_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bfile_make_deinit + +/*********************************************************************************/ +/* Convert a prettiest Json file to Pretty=0. */ +/*********************************************************************************/ +my_bool bfile_convert_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + unsigned long reslen, memlen; + + if (args->arg_count != 3) { + strcpy(message, "This function must have 3 arguments"); + return true; + } else if (args->arg_type[2] != INT_RESULT) { + strcpy(message, "Third Argument must be an integer (LRECL)"); + return true; + } else for (int i = 0; i < 2; i++) + if (args->arg_type[i] != STRING_RESULT) { + sprintf(message, "Arguments %d must be a string (file name)", i+1); + return true; + } // endif args + + CalcLen(args, false, reslen, memlen); + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bfile_convert_init + +char *bfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long *res_length, char *is_null, char *error) { + char *str, *fn, *ofn; + int lrecl = (int)*(longlong*)args->args[2]; + PGLOBAL g = (PGLOBAL)initid->ptr; + + PlugSubSet(g->Sarea, g->Sarea_Size); + fn = MakePSZ(g, args, 0); + ofn = MakePSZ(g, args, 1); + + if (!g->Xchk) { + JUP* jup = new(g) JUP(g); + + str = jup->UnprettyJsonFile(g, fn, ofn, lrecl); + g->Xchk = str; + } else + str = (char*)g->Xchk; + + if (!str) { + PUSH_WARNING(g->Message ? g->Message : "Unexpected error"); + *is_null = 1; + *error = 1; + *res_length = 0; + } else { + strcpy(result, str); + *res_length = strlen(str); + } // endif str + + return str; +} // end of bfile_convert + +void bfile_convert_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bfile_convert_deinit + /*********************************************************************************/ /* Convert a pretty=0 Json file to binary BJSON. */ /*********************************************************************************/ diff --git a/storage/connect/bsonudf.h b/storage/connect/bsonudf.h index 1675d36cee5..251af86a32b 100644 --- a/storage/connect/bsonudf.h +++ b/storage/connect/bsonudf.h @@ -9,7 +9,77 @@ #include "jsonudf.h" #include "bson.h" +#if 0 +#define UDF_EXEC_ARGS \ + UDF_INIT*, UDF_ARGS*, char*, unsigned long*, char*, char* + +// BSON size should be equal on Linux and Windows +#define BMX 255 +typedef struct BSON* PBSON; + +/***********************************************************************/ +/* Structure used to return binary json to Json UDF functions. */ +/***********************************************************************/ +struct BSON { + char Msg[BMX + 1]; + char *Filename; + PGLOBAL G; + int Pretty; + ulong Reslen; + my_bool Changed; + PJSON Top; + PJSON Jsp; + PBSON Bsp; +}; // end of struct BSON + +PBSON JbinAlloc(PGLOBAL g, UDF_ARGS* args, ulong len, PJSON jsp); + +/*********************************************************************************/ +/* The JSON tree node. Can be an Object or an Array. */ +/*********************************************************************************/ +typedef struct _jnode { + PSZ Key; // The key used for object + OPVAL Op; // Operator used for this node + PVAL CncVal; // To cont value used for OP_CNC + PVAL Valp; // The internal array VALUE + int Rank; // The rank in array + int Rx; // Read row number + int Nx; // Next to read row number +} JNODE, *PJNODE; + +/*********************************************************************************/ +/* The JSON utility functions. */ +/*********************************************************************************/ +bool IsNum(PSZ s); +char *NextChr(PSZ s, char sep); +char *GetJsonNull(void); +uint GetJsonGrpSize(void); +my_bool JsonSubSet(PGLOBAL g, my_bool b = false); +my_bool CalcLen(UDF_ARGS* args, my_bool obj, unsigned long& reslen, + unsigned long& memlen, my_bool mod = false); +my_bool JsonInit(UDF_INIT* initid, UDF_ARGS* args, char* message, my_bool mbn, + unsigned long reslen, unsigned long memlen, + unsigned long more = 0); +my_bool CheckMemory(PGLOBAL g, UDF_INIT* initid, UDF_ARGS* args, uint n, + my_bool m, my_bool obj = false, my_bool mod = false); +PSZ MakePSZ(PGLOBAL g, UDF_ARGS* args, int i); +int IsArgJson(UDF_ARGS* args, uint i); +char *GetJsonFile(PGLOBAL g, char* fn); + +/*********************************************************************************/ +/* Structure JPN. Used to make the locate path. */ +/*********************************************************************************/ +typedef struct _jpn { + int Type; + PCSZ Key; + int N; +} JPN, *PJPN; + +#endif // 0 + /* --------------------------- New Testing BJSON Stuff --------------------------*/ +extern uint JsonGrpSize; +uint GetJsonGroupSize(void); typedef class BJNX* PBJNX; @@ -19,11 +89,13 @@ typedef class BJNX* PBJNX; class BJNX : public BDOC { public: // Constructors + BJNX(PGLOBAL g); BJNX(PGLOBAL g, PBVAL row, int type, int len = 64, int prec = 0, my_bool wr = false); // Implementation int GetPrecision(void) { return Prec; } PVAL GetValue(void) { return Value; } + void SetRow(PBVAL vp) { Row = vp; } // Methods my_bool SetJpath(PGLOBAL g, char* path, my_bool jb = false); @@ -32,9 +104,16 @@ public: PBVAL GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b = true); PBVAL GetJson(PGLOBAL g); my_bool CheckPath(PGLOBAL g); + my_bool CheckPath(PGLOBAL g, UDF_ARGS* args, PBVAL jsp, PBVAL& jvp, int n); my_bool WriteValue(PGLOBAL g, PBVAL jvalp); - char* Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k = 1); - char* LocateAll(PGLOBAL g, PBVAL jsp, PBVAL jvp, int mx = 10); + char *Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k = 1); + char *LocateAll(PGLOBAL g, PBVAL jsp, PBVAL jvp, int mx = 10); + PSZ MakeKey(UDF_ARGS* args, int i); + PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i); + PBVAL MakeValue(PGLOBAL g, UDF_ARGS* args, uint i, PBVAL* top = NULL); + PBVAL MakeTypedValue(PGLOBAL g, UDF_ARGS* args, uint i, + JTYP type, PBVAL* top = NULL); + PBVAL ParseJsonFile(PGLOBAL g, char* fn, int& pty, size_t& len); protected: my_bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm); @@ -99,6 +178,14 @@ extern "C" { DllExport char* bson_array_add_values(UDF_EXEC_ARGS); DllExport void bson_array_add_values_deinit(UDF_INIT*); + DllExport my_bool bson_array_add_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_array_add(UDF_EXEC_ARGS); + DllExport void bson_array_add_deinit(UDF_INIT*); + + DllExport my_bool bson_array_delete_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_array_delete(UDF_EXEC_ARGS); + DllExport void bson_array_delete_deinit(UDF_INIT*); + DllExport my_bool bsonlocate_init(UDF_INIT*, UDF_ARGS*, char*); DllExport char* bsonlocate(UDF_EXEC_ARGS); DllExport void bsonlocate_deinit(UDF_INIT*); @@ -107,6 +194,104 @@ extern "C" { DllExport char* bson_locate_all(UDF_EXEC_ARGS); DllExport void bson_locate_all_deinit(UDF_INIT*); + DllExport my_bool bson_contains_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bson_contains(UDF_INIT*, UDF_ARGS*, char*, char*); + DllExport void bson_contains_deinit(UDF_INIT*); + + DllExport my_bool bsoncontains_path_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bsoncontains_path(UDF_INIT*, UDF_ARGS*, char*, char*); + DllExport void bsoncontains_path_deinit(UDF_INIT*); + + DllExport my_bool bson_make_object_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_make_object(UDF_EXEC_ARGS); + DllExport void bson_make_object_deinit(UDF_INIT*); + + DllExport my_bool bson_object_nonull_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_object_nonull(UDF_EXEC_ARGS); + DllExport void bson_object_nonull_deinit(UDF_INIT*); + + DllExport my_bool bson_object_key_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_object_key(UDF_EXEC_ARGS); + DllExport void bson_object_key_deinit(UDF_INIT*); + + DllExport my_bool bson_object_add_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_object_add(UDF_EXEC_ARGS); + DllExport void bson_object_add_deinit(UDF_INIT*); + + DllExport my_bool bson_object_delete_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_object_delete(UDF_EXEC_ARGS); + DllExport void bson_object_delete_deinit(UDF_INIT*); + + DllExport my_bool bson_object_list_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_object_list(UDF_EXEC_ARGS); + DllExport void bson_object_list_deinit(UDF_INIT*); + + DllExport my_bool bson_object_values_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_object_values(UDF_EXEC_ARGS); + DllExport void bson_object_values_deinit(UDF_INIT*); + + DllExport my_bool bson_item_merge_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_item_merge(UDF_EXEC_ARGS); + DllExport void bson_item_merge_deinit(UDF_INIT*); + + DllExport my_bool bson_get_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bson_get_item(UDF_EXEC_ARGS); + DllExport void bson_get_item_deinit(UDF_INIT*); + + DllExport my_bool bsonget_string_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bsonget_string(UDF_EXEC_ARGS); + DllExport void bsonget_string_deinit(UDF_INIT*); + + DllExport my_bool bsonget_int_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bsonget_int(UDF_INIT*, UDF_ARGS*, char*, char*); + DllExport void bsonget_int_deinit(UDF_INIT*); + + DllExport my_bool bsonget_real_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport double bsonget_real(UDF_INIT*, UDF_ARGS*, char*, char*); + DllExport void bsonget_real_deinit(UDF_INIT*); + + DllExport my_bool bsonset_grp_size_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bsonset_grp_size(UDF_INIT*, UDF_ARGS*, char*, char*); + + DllExport my_bool bsonget_grp_size_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bsonget_grp_size(UDF_INIT*, UDF_ARGS*, char*, char*); + + DllExport my_bool bson_array_grp_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport void bson_array_grp_clear(UDF_INIT *, char *, char *); + DllExport void bson_array_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *); + DllExport char *bson_array_grp(UDF_EXEC_ARGS); + DllExport void bson_array_grp_deinit(UDF_INIT*); + + DllExport my_bool bson_object_grp_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport void bson_object_grp_clear(UDF_INIT *, char *, char *); + DllExport void bson_object_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *); + DllExport char *bson_object_grp(UDF_EXEC_ARGS); + DllExport void bson_object_grp_deinit(UDF_INIT*); + + DllExport my_bool bson_set_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bson_set_item(UDF_EXEC_ARGS); + DllExport void bson_set_item_deinit(UDF_INIT*); + + DllExport my_bool bson_insert_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bson_insert_item(UDF_EXEC_ARGS); + DllExport void bson_insert_item_deinit(UDF_INIT*); + + DllExport my_bool bson_update_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bson_update_item(UDF_EXEC_ARGS); + DllExport void bson_update_item_deinit(UDF_INIT*); + + DllExport my_bool bson_file_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bson_file(UDF_EXEC_ARGS); + DllExport void bson_file_deinit(UDF_INIT*); + + DllExport my_bool bfile_make_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bfile_make(UDF_EXEC_ARGS); + DllExport void bfile_make_deinit(UDF_INIT*); + + DllExport my_bool bfile_convert_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bfile_convert(UDF_EXEC_ARGS); + DllExport void bfile_convert_deinit(UDF_INIT*); + DllExport my_bool bfile_bjson_init(UDF_INIT*, UDF_ARGS*, char*); DllExport char* bfile_bjson(UDF_EXEC_ARGS); DllExport void bfile_bjson_deinit(UDF_INIT*); diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index 35f1102cf5d..3c2c49de8b7 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -812,7 +812,7 @@ int DOSFAM::ReadBuffer(PGLOBAL g) p = To_Buf + strlen(To_Buf) - 1; if (trace(2)) - htrc(" Read: To_Buf=%p p=%c\n", To_Buf, To_Buf, p); + htrc(" Read: To_Buf=%p p=%c\n", To_Buf, p); #if defined(__WIN__) if (Bin) { diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index bbe7cba28cc..3d6de7ab3d5 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -35,7 +35,7 @@ static PJSON JsonNew(PGLOBAL g, JTYP type); static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp = NULL); static PJSNX JsnxNew(PGLOBAL g, PJSON jsp, int type, int len = 64); -static uint JsonGrpSize = 10; +uint JsonGrpSize = 10; /*********************************************************************************/ /* SubAlloc a new JSNX class with protection against memory exhaustion. */ @@ -1166,7 +1166,7 @@ static void SetChanged(PBSON bsp) /*********************************************************************************/ /* Replaces GetJsonGrpSize not usable when CONNECT is not installed. */ /*********************************************************************************/ -static uint GetJsonGroupSize(void) +uint GetJsonGroupSize(void) { return (JsonGrpSize) ? JsonGrpSize : GetJsonGrpSize(); } // end of GetJsonGroupSize @@ -5837,11 +5837,11 @@ my_bool jfile_convert_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { } // endif args CalcLen(args, false, reslen, memlen); - return JsonInit(initid, args, message, false, reslen, memlen); + return JsonInit(initid, args, message, true, reslen, memlen); } // end of jfile_convert_init char *jfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result, - unsigned long *res_length, char *, char *error) { + unsigned long *res_length, char *is_null, char *error) { char *str, *fn, *ofn; int lrecl = (int)*(longlong*)args->args[2]; PGLOBAL g = (PGLOBAL)initid->ptr; @@ -5853,20 +5853,21 @@ char *jfile_convert(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!g->Xchk) { JUP* jup = new(g) JUP(g); - str = strcpy(result, jup->UnprettyJsonFile(g, fn, ofn, lrecl)); + str = jup->UnprettyJsonFile(g, fn, ofn, lrecl); g->Xchk = str; } else str = (char*)g->Xchk; if (!str) { - if (g->Message) - str = strcpy(result, g->Message); - else - str = strcpy(result, "Unexpected error"); + PUSH_WARNING(g->Message ? g->Message : "Unexpected error"); + *is_null = 1; + *error = 1; + *res_length = 0; + } else { + strcpy(result, str); + *res_length = strlen(str); + } // endif str - } // endif str - - *res_length = strlen(str); return str; } // end of jfile_convert From 8f34d45404817a4fe63251ac2ab74da96b6849fa Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 8 Jan 2021 22:18:52 +0100 Subject: [PATCH 045/150] - Add the new BSON temporary type for testing modified: storage/connect/CMakeLists.txt modified: storage/connect/bson.cpp modified: storage/connect/bson.h modified: storage/connect/bsonudf.cpp modified: storage/connect/bsonudf.h modified: storage/connect/global.h modified: storage/connect/json.cpp modified: storage/connect/jsonudf.cpp modified: storage/connect/mysql-test/connect/disabled.def modified: storage/connect/mysql-test/connect/t/mongo_test.inc modified: storage/connect/plugutil.cpp modified: storage/connect/tabbson.cpp modified: storage/connect/tabjson.cpp --- storage/connect/CMakeLists.txt | 13 + storage/connect/bson.cpp | 183 +- storage/connect/bson.h | 17 +- storage/connect/bsonudf.cpp | 2132 +++++++++++++++-- storage/connect/bsonudf.h | 102 +- storage/connect/global.h | 2 +- storage/connect/json.cpp | 88 +- storage/connect/jsonudf.cpp | 2 +- .../connect/mysql-test/connect/disabled.def | 5 +- .../connect/mysql-test/connect/r/bson.result | 517 ++++ .../mysql-test/connect/r/bson_java_2.result | 385 +++ .../mysql-test/connect/r/bson_java_3.result | 385 +++ .../mysql-test/connect/r/bson_mongo_c.result | 385 +++ .../connect/mysql-test/connect/t/bson.test | 294 +++ .../mysql-test/connect/t/bson_java_2.test | 14 + .../mysql-test/connect/t/bson_java_3.test | 14 + .../mysql-test/connect/t/bson_mongo_c.test | 10 + .../mysql-test/connect/t/mongo_test.inc | 4 + storage/connect/plugutil.cpp | 11 +- storage/connect/tabbson.cpp | 44 +- storage/connect/tabjson.cpp | 46 +- 21 files changed, 4227 insertions(+), 426 deletions(-) create mode 100644 storage/connect/mysql-test/connect/r/bson.result create mode 100644 storage/connect/mysql-test/connect/r/bson_java_2.result create mode 100644 storage/connect/mysql-test/connect/r/bson_java_3.result create mode 100644 storage/connect/mysql-test/connect/r/bson_mongo_c.result create mode 100644 storage/connect/mysql-test/connect/t/bson.test create mode 100644 storage/connect/mysql-test/connect/t/bson_java_2.test create mode 100644 storage/connect/mysql-test/connect/t/bson_java_3.test create mode 100644 storage/connect/mysql-test/connect/t/bson_mongo_c.test diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index 7eedba08bee..e8ffeebafcc 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -80,6 +80,19 @@ ELSE(NOT UNIX) ENDIF() ENDIF(UNIX) +# +# BSON: this the new version of JSON that is temporarily included here for testing +# When fully tested, it will replace the old support (and be renamed to JSON) +# + +OPTION(CONNECT_WITH_BSON "Compile CONNECT storage engine with BSON support" ON) + +IF(CONNECT_WITH_BSON) + SET(CONNECT_SOURCES ${CONNECT_SOURCES} + bson.cpp tabbson.cpp bsonudf.cpp bson.h tabbson.h bsonudf.h) + add_definitions(-DBSON_SUPPORT) +ENDIF(CONNECT_WITH_BSON) + # # VCT: the VEC format might be not supported in future versions diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index 2588657089f..f3ad919993f 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -544,9 +544,9 @@ fin: buf[n] = 0; if (has_dot || has_e) { - double dv = strtod(buf, NULL); + double dv = atof(buf); - if (nd > 5 || dv > FLT_MAX || dv < FLT_MIN) { + if (nd >= 6 || dv > FLT_MAX || dv < FLT_MIN) { double* dvp = (double*)PlugSubAlloc(G, NULL, sizeof(double)); *dvp = dv; @@ -557,7 +557,7 @@ fin: vlp->Type = TYPE_FLOAT; } // endif nd - vlp->Nd = nd; + vlp->Nd = MY_MIN(nd, 16); } else { longlong iv = strtoll(buf, NULL, 10); @@ -765,6 +765,8 @@ bool BDOC::SerializeValue(PBVAL jvp) return jp->WriteStr(buf); case TYPE_NULL: return jp->WriteStr("null"); + case TYPE_JVAL: + return SerializeValue(MVP(jvp->To_Val)); default: return jp->WriteStr("???"); // TODO } // endswitch Type @@ -793,7 +795,12 @@ void* BJSON::BsonSubAlloc(size_t size) "Not enough memory for request of %zd (used=%zd free=%zd)", size, pph->To_Free, pph->FreeBlk); xtrc(1, "BsonSubAlloc: %s\n", G->Message); - throw(1234); + + if (Throw) + throw(1234); + else + return NULL; + } /* endif size OS32 code */ // Do the suballocation the simplest way @@ -1066,7 +1073,7 @@ PBVAL BJSON::MergeObject(PBVAL bop1, PBVAL bop2) /***********************************************************************/ /* Delete a value corresponding to the given key. */ /***********************************************************************/ -void BJSON::DeleteKey(PBVAL bop, PCSZ key) +bool BJSON::DeleteKey(PBVAL bop, PCSZ key) { CheckType(bop, TYPE_JOB); PBPR brp, pbrp = NULL; @@ -1079,10 +1086,11 @@ void BJSON::DeleteKey(PBVAL bop, PCSZ key) bop->To_Val = brp->Vlp.Next; bop->Nd--; - break; + return true;; } else pbrp = brp; + return false; } // end of DeleteKey /***********************************************************************/ @@ -1247,24 +1255,25 @@ PSZ BJSON::GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text) /***********************************************************************/ /* Delete a Value from the Arrays Value list. */ /***********************************************************************/ -void BJSON::DeleteValue(PBVAL bap, int n) +bool BJSON::DeleteValue(PBVAL bap, int n) { CheckType(bap, TYPE_JAR); int i = 0; PBVAL bvp, pvp = NULL; - for (bvp = GetArray(bap); bvp; i++, bvp = GetNext(bvp)) - if (i == n) { - if (pvp) - pvp->Next = bvp->Next; - else - bap->To_Val = bvp->Next; + for (bvp = GetArray(bap); bvp; i++, bvp = GetNext(bvp)) + if (i == n) { + if (pvp) + pvp->Next = bvp->Next; + else + bap->To_Val = bvp->Next; - bap->Nd--; - break; - } else - pvp = bvp; + bap->Nd--; + return true;; + } else + pvp = bvp; + return false; } // end of DeleteValue /***********************************************************************/ @@ -1510,7 +1519,7 @@ double BJSON::GetDouble(PBVAL vp) } // endswitch Type return d; -} // end of GetFloat +} // end of GetDouble /***********************************************************************/ /* Return the Value's String value. */ @@ -1603,61 +1612,64 @@ PBVAL BJSON::SetValue(PBVAL vlp, PVAL valp) if (!valp || valp->IsNull()) { vlp->Type = TYPE_NULL; } else switch (valp->GetType()) { - case TYPE_DATE: - if (((DTVAL*)valp)->IsFormatted()) + case TYPE_DATE: + if (((DTVAL*)valp)->IsFormatted()) + vlp->To_Val = DupStr(valp->GetCharValue()); + else { + char buf[32]; + + vlp->To_Val = DupStr(valp->GetCharString(buf)); + } // endif Formatted + + vlp->Type = TYPE_DTM; + break; + case TYPE_STRING: vlp->To_Val = DupStr(valp->GetCharValue()); - else { - char buf[32]; + vlp->Type = TYPE_STRG; + break; + case TYPE_DOUBLE: + case TYPE_DECIM: + { double d = valp->GetFloatValue(); + int nd = (IsTypeNum(valp->GetType())) ? valp->GetValPrec() : 0; - vlp->To_Val = DupStr(valp->GetCharString(buf)); - } // endif Formatted + if (nd <= 6 && d >= FLT_MIN && d <= FLT_MAX) { + vlp->F = (float)valp->GetFloatValue(); + vlp->Type = TYPE_FLOAT; + } else { + double* dp = (double*)BsonSubAlloc(sizeof(double)); - vlp->Type = TYPE_DTM; - break; - case TYPE_STRING: - vlp->To_Val = DupStr(valp->GetCharValue()); - vlp->Type = TYPE_STRG; - break; - case TYPE_DOUBLE: - case TYPE_DECIM: - vlp->Nd = (IsTypeNum(valp->GetType())) ? valp->GetValPrec() : 0; + *dp = d; + vlp->To_Val = MOF(dp); + vlp->Type = TYPE_DBL; + } // endif Nd - if (vlp->Nd <= 6) { - vlp->F = (float)valp->GetFloatValue(); - vlp->Type = TYPE_FLOAT; - } else { - double *dp = (double*)BsonSubAlloc(sizeof(double)); - - *dp = valp->GetFloatValue(); - vlp->To_Val = MOF(dp); - vlp->Type = TYPE_DBL; - } // endif Nd - - break; - case TYPE_TINY: - vlp->B = valp->GetTinyValue() != 0; - vlp->Type = TYPE_BOOL; - case TYPE_INT: - vlp->N = valp->GetIntValue(); - vlp->Type = TYPE_INTG; - break; - case TYPE_BIGINT: - if (valp->GetBigintValue() >= INT_MIN32 && - valp->GetBigintValue() <= INT_MAX32) { + vlp->Nd = MY_MIN(nd, 16); + } break; + case TYPE_TINY: + vlp->B = valp->GetTinyValue() != 0; + vlp->Type = TYPE_BOOL; + break; + case TYPE_INT: vlp->N = valp->GetIntValue(); vlp->Type = TYPE_INTG; - } else { - longlong* llp = (longlong*)BsonSubAlloc(sizeof(longlong)); + break; + case TYPE_BIGINT: + if (valp->GetBigintValue() >= INT_MIN32 && + valp->GetBigintValue() <= INT_MAX32) { + vlp->N = valp->GetIntValue(); + vlp->Type = TYPE_INTG; + } else { + longlong* llp = (longlong*)BsonSubAlloc(sizeof(longlong)); - *llp = valp->GetBigintValue(); - vlp->To_Val = MOF(llp); - vlp->Type = TYPE_BINT; - } // endif BigintValue + *llp = valp->GetBigintValue(); + vlp->To_Val = MOF(llp); + vlp->Type = TYPE_BINT; + } // endif BigintValue - break; - default: - sprintf(G->Message, "Unsupported typ %d\n", valp->GetType()); - throw(777); + break; + default: + sprintf(G->Message, "Unsupported typ %d\n", valp->GetType()); + throw(777); } // endswitch Type return vlp; @@ -1702,16 +1714,44 @@ void BJSON::SetBigint(PBVAL vlp, longlong ll) /***********************************************************************/ /* Set the Value's value as the given DOUBLE. */ /***********************************************************************/ -void BJSON::SetFloat(PBVAL vlp, double f) { - vlp->F = (float)f; - vlp->Nd = 6; - vlp->Type = TYPE_FLOAT; +void BJSON::SetFloat(PBVAL vlp, double d, int nd) +{ + double* dp = (double*)BsonSubAlloc(sizeof(double)); + + *dp = d; + vlp->To_Val = MOF(dp); + vlp->Nd = MY_MIN(nd, 16); + vlp->Type = TYPE_DBL; } // end of SetFloat /***********************************************************************/ +/* Set the Value's value as the given DOUBLE representation. */ +/***********************************************************************/ +void BJSON::SetFloat(PBVAL vlp, PSZ s) +{ + char *p = strchr(s, '.'); + int nd = 0; + double d = atof(s); + + if (p) { + for (++p; isdigit(*p); nd++, p++); + for (--p; *p == '0'; nd--, p--); + } // endif p + + if (nd < 6 && d >= FLT_MIN && d <= FLT_MAX) { + vlp->F = (float)d; + vlp->Nd = nd; + vlp->Type = TYPE_FLOAT; + } else + SetFloat(vlp, d, nd); + +} // end of SetFloat + + /***********************************************************************/ /* Set the Value's value as the given string. */ /***********************************************************************/ -void BJSON::SetString(PBVAL vlp, PSZ s, int ci) { +void BJSON::SetString(PBVAL vlp, PSZ s, int ci) +{ vlp->To_Val = MOF(s); vlp->Nd = ci; vlp->Type = TYPE_STRG; @@ -1720,7 +1760,8 @@ void BJSON::SetString(PBVAL vlp, PSZ s, int ci) { /***********************************************************************/ /* True when its JSON or normal value is null. */ /***********************************************************************/ -bool BJSON::IsValueNull(PBVAL vlp) { +bool BJSON::IsValueNull(PBVAL vlp) +{ bool b; switch (vlp->Type) { diff --git a/storage/connect/bson.h b/storage/connect/bson.h index dd299c7c53e..235168a36ce 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -62,7 +62,8 @@ DllExport bool IsNum(PSZ s); class BJSON : public BLOCK { public: // Constructor - BJSON(PGLOBAL g, PBVAL vp = NULL) { G = g, Base = G->Sarea; Bvp = vp; } + BJSON(PGLOBAL g, PBVAL vp = NULL) + { G = g, Base = G->Sarea; Bvp = vp; Throw = true; } // Utility functions inline OFFSET MOF(void *p) {return MakeOff(Base, p);} @@ -73,6 +74,7 @@ public: inline longlong LLN(OFFSET o) {return *(longlong*)MakePtr(Base, o);} inline double DBL(OFFSET o) {return *(double*)MakePtr(Base, o);} + void Reset(void) {Base = G->Sarea;} void* GetBase(void) { return Base; } void SubSet(bool b = false); void MemSave(void) {G->Saved_Size = ((PPOOLHEADER)G->Sarea)->To_Free;} @@ -102,7 +104,7 @@ public: PBVAL GetArrayValue(PBVAL bap, int i); PSZ GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text); void MergeArray(PBVAL bap1,PBVAL bap2); - void DeleteValue(PBVAL bap, int n); + bool DeleteValue(PBVAL bap, int n); void AddArrayValue(PBVAL bap, OFFSET nvp = NULL, int* x = NULL); inline void AddArrayValue(PBVAL bap, PBVAL nvp = NULL, int* x = NULL) {AddArrayValue(bap, MOF(nvp), x);} @@ -126,7 +128,7 @@ public: void SetKeyValue(PBVAL bop, OFFSET bvp, PSZ key); inline void SetKeyValue(PBVAL bop, PBVAL vlp, PSZ key) {SetKeyValue(bop, MOF(vlp), key);} - void DeleteKey(PBVAL bop, PCSZ k); + bool DeleteKey(PBVAL bop, PCSZ k); bool IsObjectNull(PBVAL bop); // Value functions @@ -147,17 +149,20 @@ public: void SetString(PBVAL vlp, PSZ s, int ci = 0); void SetInteger(PBVAL vlp, int n); void SetBigint(PBVAL vlp, longlong ll); - void SetFloat(PBVAL vlp, double f); + void SetFloat(PBVAL vlp, double f, int nd = 16); + void SetFloat(PBVAL vlp, PSZ s); void SetBool(PBVAL vlp, bool b); void Clear(PBVAL vlp) { vlp->N = 0; vlp->Nd = 0; vlp->Next = 0; } bool IsValueNull(PBVAL vlp); - bool IsJson(PBVAL vlp) - {return vlp ? vlp->Type == TYPE_JAR || vlp->Type == TYPE_JOB : false;} + bool IsJson(PBVAL vlp) {return vlp ? vlp->Type == TYPE_JAR || + vlp->Type == TYPE_JOB || + vlp->Type == TYPE_JVAL : false;} // Members PGLOBAL G; PBVAL Bvp; void *Base; + bool Throw; protected: // Default constructor not to be used diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 9c80b881e52..4bdeafa0c33 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -1,6 +1,6 @@ /****************** bsonudf C++ Program Source Code File (.CPP) ******************/ /* PROGRAM NAME: bsonudf Version 1.0 */ -/* (C) Copyright to the author Olivier BERTRAND 2020 */ +/* (C) Copyright to the author Olivier BERTRAND 2020 - 2021 */ /* This program are the BSON User Defined Functions. */ /*********************************************************************************/ @@ -28,6 +28,7 @@ #define M 6 int IsArgJson(UDF_ARGS* args, uint i); +void SetChanged(PBSON bsp); /* --------------------------------- JSON UDF ---------------------------------- */ @@ -46,6 +47,29 @@ inline void JsonFreeMem(PGLOBAL g) { g = PlugExit(g); } /* end of JsonFreeMem */ +/*********************************************************************************/ +/* Allocate and initialize a BSON structure. */ +/*********************************************************************************/ +PBSON BbinAlloc(PGLOBAL g, ulong len, PBVAL jsp) +{ + PBSON bsp = (PBSON)PlgDBSubAlloc(g, NULL, sizeof(BSON)); + + if (bsp) { + strcpy(bsp->Msg, "Binary Json"); + bsp->Msg[BMX] = 0; + bsp->Filename = NULL; + bsp->G = g; + bsp->Pretty = 2; + bsp->Reslen = len; + bsp->Changed = false; + bsp->Top = bsp->Jsp = (PJSON)jsp; + bsp->Bsp = NULL; + } else + PUSH_WARNING(g->Message); + + return bsp; +} /* end of BbinAlloc */ + /* --------------------------- New Testing BJSON Stuff --------------------------*/ /*********************************************************************************/ @@ -97,6 +121,8 @@ BJNX::BJNX(PGLOBAL g) : BDOC(g) Found = false; Wr = false; Jb = false; + Changed = false; + Throw = false; } // end of BJNX constructor /*********************************************************************************/ @@ -126,6 +152,8 @@ BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) : BDOC Found = false; Wr = wr; Jb = false; + Changed = false; + Throw = false; } // end of BJNX constructor /*********************************************************************************/ @@ -209,9 +237,7 @@ my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) case '<': jnp->Op = OP_MIN; break; case '!': jnp->Op = OP_SEP; break; // Average case '#': jnp->Op = OP_NUM; break; - case '*': // Expand this array - strcpy(g->Message, "Expand not supported by this function"); - return true; + case '*': jnp->Op = OP_EXP; break; default: sprintf(g->Message, "Invalid function specification %c", *p); return true; @@ -521,6 +547,9 @@ PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) Jb = b; // return DupVal(g, row); return row; // or last line ??? + } else if (Nodes[i].Op == OP_EXP) { + PUSH_WARNING("Expand not supported by this function"); + return NULL; } else switch (row->Type) { case TYPE_JOB: if (!Nodes[i].Key) { @@ -685,7 +714,10 @@ PBVAL BJNX::GetRow(PGLOBAL g) for (int i = 0; i < Nod - 1 && row; i++) { if (Nodes[i].Op == OP_XX) break; - else switch (row->Type) { + else if (Nodes[i].Op == OP_EXP) { + PUSH_WARNING("Expand not supported by this function"); + return NULL; + } else switch (row->Type) { case TYPE_JOB: if (!Nodes[i].Key) // Expected Array was not there, wrap the value @@ -789,6 +821,90 @@ my_bool BJNX::WriteValue(PGLOBAL g, PBVAL jvalp) return false; } // end of WriteValue +/*********************************************************************************/ +/* GetRowValue: */ +/*********************************************************************************/ +my_bool BJNX::DeleteItem(PGLOBAL g, PBVAL row) +{ + int n = -1; + my_bool b = false; + bool loop; + PBVAL vlp, pvp, rwp; + + do { + loop = false; + vlp = NULL; + pvp = rwp = row; + + for (int i = 0; i < Nod && rwp; i++) { + if (Nodes[i].Op == OP_XX) + break; + else switch (rwp->Type) { + case TYPE_JOB: + if (!Nodes[i].Key) { + vlp = NULL; + } else + vlp = GetKeyValue(rwp, Nodes[i].Key); + + break; + case TYPE_JAR: + if (!Nodes[i].Key) { + if (Nodes[i].Op == OP_EXP) { + if (loop) { + PUSH_WARNING("Only one expand can be handled"); + return b; + } // endif loop + n++; + loop = true; + } else + n = Nodes[i].Rank; + + vlp = GetArrayValue(rwp, n); + } else + vlp = NULL; + + break; + case TYPE_JVAL: + vlp = rwp; + break; + default: + vlp = NULL; + } // endswitch Type + + pvp = rwp; + rwp = vlp; + vlp = NULL; + } // endfor i + + if (rwp) { + if (Nodes[Nod - 1].Op == OP_XX) { + if (!IsJson(rwp)) + rwp->Type = TYPE_NULL; + + rwp->To_Val = 0; + } else switch (pvp->Type) { + case TYPE_JOB: + b = DeleteKey(pvp, Nodes[Nod - 1].Key); + break; + case TYPE_JAR: + if (Nodes[Nod - 1].Op == OP_EXP) { + pvp->To_Val = 0; + loop = false; + } else + b = DeleteValue(pvp, n); + + break; + default: + break; + } // endswitch Type + + } // endif rwp + + } while (loop); + + return b; +} // end of DeleteItem + /*********************************************************************************/ /* CheckPath: Checks whether the path exists in the document. */ /*********************************************************************************/ @@ -1248,12 +1364,13 @@ my_bool BJNX::AddPath(void) /*********************************************************************************/ /* Make a JSON value from the passed argument. */ /*********************************************************************************/ -PBVAL BJNX::MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PBVAL *top) +PBVAL BJNX::MakeValue(UDF_ARGS *args, uint i, bool b, PBVAL *top) { char *sap = (args->arg_count > i) ? args->args[i] : NULL; int n, len; int ci; long long bigint; + PGLOBAL& g = G; PBVAL jvp = NewVal(); if (top) @@ -1267,15 +1384,25 @@ PBVAL BJNX::MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PBVAL *top) if (n) { if (n == 3) { -// if (top) -// *top = ((PBSON)sap)->Top; + if (i == 0) { + PBSON bsp = (PBSON)sap; + + if (top) + *top = (PBVAL)bsp->Top; + + jvp = (PBVAL)bsp->Jsp; + G = bsp->G; + Base = G->Sarea; + } else { + PUSH_WARNING("Only first argument can be binary"); + return jvp; + } // endelse i -// jvp = ((PBSON)sap)->Jsp; } else { if (n == 2) { if (!(sap = GetJsonFile(g, sap))) { PUSH_WARNING(g->Message); - return NewVal(); + return jvp; } // endif sap len = strlen(sap); @@ -1289,8 +1416,38 @@ PBVAL BJNX::MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PBVAL *top) } // endif's n } else { - ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; - SetString(jvp, sap, ci); + PBVAL bp = NULL; + + if (b) { + if (strchr("[{ \t\r\n", *sap)) { + // Check whether this string is a valid json string + JsonMemSave(g); + + if (!(bp = ParseJson(g, sap, strlen(sap)))) + JsonSubSet(g); // Recover suballocated memory + + g->Saved_Size = 0; + } else { + // Perhaps a file name + char* s = GetJsonFile(g, sap); + + if (s) + bp = ParseJson(g, s, strlen(s)); + + } // endif's + + } // endif b + + if (!bp) { + ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; + SetString(jvp, sap, ci); + } else { + if (top) + *top = bp; + + jvp = bp; + } // endif bp + } // endif n } // endif len @@ -1310,7 +1467,7 @@ PBVAL BJNX::MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PBVAL *top) SetFloat(jvp, *(double*)sap); break; case DECIMAL_RESULT: - SetFloat(jvp, atof(MakePSZ(g, args, i))); + SetFloat(jvp, MakePSZ(g, args, i)); break; case TIME_RESULT: case ROW_RESULT: @@ -1321,85 +1478,6 @@ PBVAL BJNX::MakeValue(PGLOBAL g, UDF_ARGS *args, uint i, PBVAL *top) return jvp; } // end of MakeValue -/*********************************************************************************/ -/* Make a BVAL value from the passed argument. */ -/*********************************************************************************/ -PBVAL BJNX::MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i) -{ - char* sap = (args->arg_count > i) ? args->args[i] : NULL; - int n, len; - int ci; - longlong bigint; - PBVAL bp, bvp = NewVal(); - - if (sap) { - if (args->arg_type[i] == STRING_RESULT) { - if ((len = args->lengths[i])) { - if ((n = IsArgJson(args, i)) < 3) - sap = MakePSZ(g, args, i); - - if (n) { - if (n == 2) { - if (!(sap = GetJsonFile(g, sap))) { - PUSH_WARNING(g->Message); - return NULL; - } // endif sap - - len = strlen(sap); - } // endif 2 - - if (!(bp = ParseJson(g, sap, strlen(sap)))) { - PUSH_WARNING(g->Message); - return NULL; - } else - bvp = bp; - - } else { - // Check whether this string is a valid json string - JsonMemSave(g); - - if (!(bp = ParseJson(g, sap, strlen(sap)))) { - // Recover suballocated memory - JsonSubSet(g); - ci = (strnicmp(args->attributes[i], "ci", 2)) ? 0 : 1; - SetString(bvp, sap, ci); - } else - bvp = bp; - - g->Saved_Size = 0; - } // endif n - - } // endif len - - } else switch (args->arg_type[i]) { - case INT_RESULT: - bigint = *(longlong*)sap; - - if ((bigint == 0LL && !strcmp(args->attributes[i], "FALSE")) || - (bigint == 1LL && !strcmp(args->attributes[i], "TRUE"))) - SetBool(bvp, (bool)bigint); - else - SetBigint(bvp, bigint); - - break; - case REAL_RESULT: - SetFloat(bvp, *(double*)sap); - break; - case DECIMAL_RESULT: - SetFloat(bvp, atof(MakePSZ(g, args, i))); - break; - case TIME_RESULT: - case ROW_RESULT: - default: - bvp->Type = TYPE_UNKNOWN; - break; - } // endswitch arg_type - - } // endif sap - - return bvp; -} // end of MakeBinValue - /*********************************************************************************/ /* Try making a JSON value of the passed type from the passed argument. */ /*********************************************************************************/ @@ -1407,7 +1485,7 @@ PBVAL BJNX::MakeTypedValue(PGLOBAL g, UDF_ARGS *args, uint i, JTYP type, PBVAL * { char *sap; PBVAL jsp; - PBVAL jvp = MakeValue(g, args, i, top); + PBVAL jvp = MakeValue(args, i, false, top); //if (type == TYPE_JSON) { // if (jvp->GetValType() >= TYPE_JSON) @@ -1485,15 +1563,94 @@ PBVAL BJNX::ParseJsonFile(PGLOBAL g, char *fn, int& pty, size_t& len) return jsp; } // end of ParseJsonFile -/* -----------------------------Utility functions ------------------------------ */ +/*********************************************************************************/ +/* Make the result according to the first argument type. */ +/*********************************************************************************/ +char *BJNX::MakeResult(UDF_ARGS *args, PBVAL top, uint n) +{ + char *str = NULL; + PGLOBAL& g = G; + + if (IsArgJson(args, 0) == 2) { + // Make the change in the json file + PSZ fn = MakePSZ(g, args, 0); + + if (Changed) { + int pretty = 2; + + for (uint i = n; i < args->arg_count; i++) + if (args->arg_type[i] == INT_RESULT) { + pretty = (int)*(longlong*)args->args[i]; + break; + } // endif type + + if (!Serialize(g, top, fn, pretty)) + PUSH_WARNING(g->Message); + + Changed = false; + } // endif Changed + + str = fn; + } else if (IsArgJson(args, 0) == 3) { + PBSON bsp = (PBSON)args->args[0]; + + if (bsp->Filename) { + if (Changed) { + // Make the change in the json file + if (!Serialize(g, (PBVAL)top, bsp->Filename, bsp->Pretty)) + PUSH_WARNING(g->Message); + + Changed = false; + } // endif Changed + + str = bsp->Filename; + } else if (!(str = Serialize(g, (PBVAL)top, NULL, 0))) + PUSH_WARNING(g->Message); + + } else if (!(str = Serialize(g, top, NULL, 0))) + PUSH_WARNING(g->Message); + + return str; +} // end of MakeResult /*********************************************************************************/ -/* GetMemPtr: returns the memory pointer used by this argument. */ +/* Make the binary result according to the first argument type. */ /*********************************************************************************/ -static PGLOBAL GetMemPtr(PGLOBAL g, UDF_ARGS *args, uint i) +PBSON BJNX::MakeBinResult(PGLOBAL g, UDF_ARGS *args, PBVAL top, ulong len, int n) { - return (IsArgJson(args, i) == 3) ? ((PBSON)args->args[i])->G : g; -} // end of GetMemPtr + char* filename = NULL; + int pretty = 2; + PBSON bnp = NULL; + + if (IsArgJson(args, 0) == 3) { + bnp = (PBSON)args->args[0]; + + if (bnp->Top != (PJSON)top) + bnp->Top = bnp->Jsp = (PJSON)top; + + return bnp; + } // endif 3 + + if (IsArgJson(args, 0) == 2) { + for (uint i = n; i < args->arg_count; i++) + if (args->arg_type[i] == INT_RESULT) { + pretty = (int)*(longlong*)args->args[i]; + break; + } // endif type + + filename = (char*)args->args[0]; + } // endif 2 + + if ((bnp = BbinAlloc(g, len, top))) { + bnp->Filename = filename; + bnp->Pretty = pretty; + strcpy(bnp->Msg, "Json Binary item"); + } //endif bnp + + return bnp; +} // end of MakeBinResult + +/* -----------------------------Utility functions ------------------------------ */ /*********************************************************************************/ /* Returns a pointer to the first integer argument found from the nth argument. */ @@ -1558,49 +1715,6 @@ int IsArgJson(UDF_ARGS *args, uint i) return n; } // end of IsArgJson -/*********************************************************************************/ -/* Make the result according to the first argument type. */ -/*********************************************************************************/ -static char *MakeResult(PGLOBAL g, UDF_ARGS *args, PBVAL top, uint n = 2) -{ - char *str = NULL; - BDOC doc(g); - - if (IsArgJson(args, 0) == 2) { - // Make the change in the json file - int pretty = 2; - - for (uint i = n; i < args->arg_count; i++) - if (args->arg_type[i] == INT_RESULT) { - pretty = (int)*(longlong*)args->args[i]; - break; - } // endif type - - if (!doc.Serialize(g, top, MakePSZ(g, args, 0), pretty)) - PUSH_WARNING(g->Message); - - str = NULL; - } else if (IsArgJson(args, 0) == 3) { -#if 0 - PBSON bsp = (PBSON)args->args[0]; - - if (bsp->Filename) { - // Make the change in the json file - if (!Serialize(g, top, bsp->Filename, bsp->Pretty)) - PUSH_WARNING(g->Message); - - str = bsp->Filename; - } else if (!(str = Serialize(g, top, NULL, 0))) - PUSH_WARNING(g->Message); - - SetChanged(bsp); -#endif - } else if (!(str = doc.Serialize(g, top, NULL, 0))) - PUSH_WARNING(g->Message); - - return str; -} // end of MakeResult - /*********************************************************************************/ /* GetFileLength: returns file size in number of bytes. */ /*********************************************************************************/ @@ -1649,7 +1763,7 @@ char* bsonvalue(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, 1, false)) { BJNX bnx(g); - PBVAL bvp = bnx.MakeBinValue(g, args, 0); + PBVAL bvp = bnx.MakeValue(args, 0, true); if (!(str = bnx.Serialize(g, bvp, NULL, 0))) str = strcpy(result, g->Message); @@ -1693,7 +1807,7 @@ char* bson_make_array(UDF_INIT* initid, UDF_ARGS* args, char* result, PBVAL bvp = NULL, arp = bnx.NewVal(TYPE_JAR); for (uint i = 0; i < args->arg_count; i++) - bnx.AddArrayValue(arp, bnx.MakeBinValue(g, args, i)); + bnx.AddArrayValue(arp, bnx.MakeValue(args, i, true)); if (!(str = bnx.Serialize(g, arp, NULL, 0))) str = strcpy(result, g->Message); @@ -1752,20 +1866,19 @@ char* bson_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, args->arg_count, true)) { - uint i = 0; BJNX bnx(g); - PBVAL arp, bvp = bnx.MakeBinValue(g, args, 0); + PBVAL arp = bnx.MakeValue(args, 0, true); - if (bvp->Type == TYPE_JAR) { - arp = bvp; - i = 1; - } else // First argument is not an array - arp = bnx.NewVal(TYPE_JAR); + if (arp->Type != TYPE_JAR) { + PUSH_WARNING("First argument is not an array"); + goto fin; + } // endif arp - for (; i < args->arg_count; i++) - bnx.AddArrayValue(arp, bnx.MakeBinValue(g, args, i)); + for (uint i = 1; i < args->arg_count; i++) + bnx.AddArrayValue(arp, bnx.MakeValue(args, i)); - str = bnx.Serialize(g, arp, NULL, 0); + bnx.SetChanged(true); + str = bnx.MakeResult(args, arp, INT_MAX); } // endif CheckMemory if (!str) { @@ -1778,6 +1891,7 @@ char* bson_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result, } else str = (char*)g->Xchk; + fin: if (!str) { *res_length = 0; *is_null = 1; @@ -1840,7 +1954,7 @@ char *bson_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, uint n = 2; BJNX bnx(g, NULL, TYPE_STRING); PBVAL jsp, top; - PBVAL arp, jvp = bnx.MakeTypedValue(g, args, 0, TYPE_JAR, &top); + PBVAL arp, jvp = bnx.MakeValue(args, 0, true, &top); jsp = jvp; x = GetIntArgPtr(g, args, n); @@ -1848,8 +1962,6 @@ char *bson_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, if (bnx.CheckPath(g, args, jsp, jvp, 2)) PUSH_WARNING(g->Message); else if (jvp) { - PGLOBAL gb = GetMemPtr(g, args, 0); - if (jvp->Type != TYPE_JAR) { if ((arp = bnx.NewVal(TYPE_JAR))) { bnx.AddArrayValue(arp, jvp); @@ -1863,10 +1975,11 @@ char *bson_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, arp = jvp; if (arp) { - bnx.AddArrayValue(arp, bnx.MakeValue(gb, args, 1), x); - str = MakeResult(g, args, top, n); + bnx.AddArrayValue(arp, bnx.MakeValue(args, 1), x); + bnx.SetChanged(true); + str = bnx.MakeResult(args, top, n); } else - PUSH_WARNING(gb->Message); + PUSH_WARNING(g->Message); } else { PUSH_WARNING("Target is not an array"); @@ -1919,7 +2032,7 @@ my_bool bson_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message) g->N = (initid->const_item) ? 1 : 0; // This is to avoid double execution when using prepared statements - if (IsJson(args, 0) > 1) + if (IsArgJson(args, 0) > 1) initid->const_item = 0; return false; @@ -1943,9 +2056,9 @@ char *bson_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!CheckMemory(g, initid, args, 1, false, false, true)) { int *x; uint n = 1; - BJNX bnx(g, NULL, TYPE_STRING); + BJNX bnx(g); PBVAL arp, top; - PBVAL jvp = bnx.MakeTypedValue(g, args, 0, TYPE_JSON, &top); + PBVAL jvp = bnx.MakeValue(args, 0, true, &top); if (!(x = GetIntArgPtr(g, args, n))) PUSH_WARNING("Missing or null array index"); @@ -1953,7 +2066,8 @@ char *bson_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, PUSH_WARNING(g->Message); else if (arp && arp->Type == TYPE_JAR) { bnx.DeleteValue(arp, *x); - str = MakeResult(g, args, top, n); + bnx.SetChanged(true); + str = bnx.MakeResult(args, top, n); } else { PUSH_WARNING("First argument target is not an array"); // if (g->Mrr) *error = 1; @@ -2009,7 +2123,7 @@ char *bson_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = bnx.NewVal(TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i++) - bnx.SetKeyValue(objp, bnx.MakeValue(g, args, i), bnx.MakeKey(args, i)); + bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i)); str = bnx.Serialize(g, objp, NULL, 0); } // endif objp @@ -2058,7 +2172,7 @@ char *bson_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = bnx.NewVal(TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i++) - if (!bnx.IsValueNull(jvp = bnx.MakeValue(g, args, i))) + if (!bnx.IsValueNull(jvp = bnx.MakeValue(args, i))) bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i)); str = bnx.Serialize(g, objp, NULL, 0); @@ -2112,7 +2226,7 @@ char *bson_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result, if ((objp = bnx.NewVal(TYPE_JOB))) { for (uint i = 0; i < args->arg_count; i += 2) - bnx.SetKeyValue(objp, bnx.MakeValue(g, args, i + 1), MakePSZ(g, args, i)); + bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i)); str = bnx.Serialize(g, objp, NULL, 0); } // endif objp @@ -2159,7 +2273,7 @@ my_bool bson_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) g->N = (initid->const_item) ? 1 : 0; // This is to avoid double execution when using prepared statements - if (IsJson(args, 0) > 1) + if (IsArgJson(args, 0) > 1) initid->const_item = 0; return false; @@ -2182,20 +2296,21 @@ char *bson_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif Xchk if (!CheckMemory(g, initid, args, 2, false, true, true)) { - BJNX bnx(g, NULL, TYPE_STRG); + BJNX bnx(g, NULL, TYPE_STRING); PBVAL jvp, objp; PBVAL jsp, top; - jsp = bnx.MakeValue(g, args, 0, &top); + jsp = bnx.MakeValue(args, 0, true, &top); if (bnx.CheckPath(g, args, jsp, jvp, 2)) PUSH_WARNING(g->Message); else if (jvp && jvp->Type == TYPE_JOB) { objp = jvp; - jvp = bnx.MakeValue(g, args, 1); + jvp = bnx.MakeValue(args, 1); key = bnx.MakeKey(args, 1); bnx.SetKeyValue(objp, jvp, key); - str = MakeResult(g, args, top); + bnx.SetChanged(true); + str = bnx.MakeResult(args, top); } else { PUSH_WARNING("First argument target is not an object"); // if (g->Mrr) *error = 1; (only if no path) @@ -2253,7 +2368,7 @@ my_bool bson_object_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message) g->N = (initid->const_item) ? 1 : 0; // This is to avoid double execution when using prepared statements - if (IsJson(args, 0) > 1) + if (IsArgJson(args, 0) > 1) initid->const_item = 0; return false; @@ -2275,21 +2390,22 @@ char *bson_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif Xchk if (!CheckMemory(g, initid, args, 1, false, true, true)) { + bool chg; BJNX bnx(g, NULL, TYPE_STRG); PSZ key; PBVAL jsp, objp, top; - PBVAL jvp = bnx.MakeValue(g, args, 0, &top); + PBVAL jvp = bnx.MakeValue(args, 0, false, &top); jsp = jvp; if (bnx.CheckPath(g, args, jsp, jvp, 2)) PUSH_WARNING(g->Message); else if (jvp && jvp->Type == TYPE_JOB) { -// key = MakeKey(GetMemPtr(g, args, 0), args, 1); key = bnx.MakeKey(args, 1); objp = jvp; - bnx.DeleteKey(objp, key); - str = MakeResult(g, args, top); + chg = bnx.DeleteKey(objp, key); + bnx.SetChanged(chg); + str = bnx.MakeResult(args, top); } else { PUSH_WARNING("First argument target is not an object"); // if (g->Mrr) *error = 1; (only if no path) @@ -2349,18 +2465,8 @@ char *bson_object_list(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!g->N) { if (!CheckMemory(g, initid, args, 1, true, true)) { BJNX bnx(g); - char *p; - PBVAL jsp, jarp; - PBVAL jvp = bnx.MakeValue(g, args, 0); - - if ((p = bnx.GetString(jvp))) { - if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - return NULL; - } // endif jsp - - } else - jsp = jvp; + PBVAL jarp; + PBVAL jsp = bnx.MakeValue(args, 0, true); if (jsp->Type == TYPE_JOB) { jarp = bnx.GetKeyList(jsp); @@ -2408,7 +2514,7 @@ my_bool bson_object_values_init(UDF_INIT *initid, UDF_ARGS *args, char *message) if (args->arg_count != 1) { strcpy(message, "This function must have 1 argument"); return true; - } else if (!IsJson(args, 0) && args->arg_type[0] != STRING_RESULT) { + } else if (!IsArgJson(args, 0) && args->arg_type[0] != STRING_RESULT) { strcpy(message, "Argument must be a json object"); return true; } else @@ -2428,7 +2534,7 @@ char *bson_object_values(UDF_INIT *initid, UDF_ARGS *args, char *result, BJNX bnx(g); char *p; PBVAL jsp, jarp; - PBVAL jvp = bnx.MakeValue(g, args, 0); + PBVAL jvp = bnx.MakeValue(args, 0); if ((p = bnx.GetString(jvp))) { if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { @@ -2560,7 +2666,7 @@ void bson_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*) PBVAL arp = (PBVAL)g->Activityp; if (arp && g->N-- > 0) - bxp->AddArrayValue(arp, bxp->MakeValue(g, args, 0)); + bxp->AddArrayValue(arp, bxp->MakeValue(args, 0)); } // end of bson_array_grp_add @@ -2597,7 +2703,7 @@ my_bool bson_object_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) if (args->arg_count != 2) { strcpy(message, "This function requires 2 arguments (key, value)"); return true; - } else if (IsJson(args, 0) == 3) { + } else if (IsArgJson(args, 0) == 3) { strcpy(message, "This function does not support Jbin arguments"); return true; } else @@ -2633,7 +2739,7 @@ void bson_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*) PBVAL bop = (PBVAL)g->Activityp; if (g->N-- > 0) - bxp->SetKeyValue(bop, bxp->MakeValue(g, args, 0), MakePSZ(g, args, 1)); + bxp->SetKeyValue(bop, bxp->MakeValue(args, 0), MakePSZ(g, args, 1)); } // end of bson_object_grp_add @@ -2699,7 +2805,12 @@ char* bson_test(UDF_INIT* initid, UDF_ARGS* args, char* result, PUSH_WARNING("CheckMemory error"); *error = 1; goto err; - } else if (!(bvp = bnx.MakeBinValue(g, args, 0))) { + } else // Sarea may have been reallocated + bnx.Reset(); + + bvp = bnx.MakeValue(args, 0, true); + + if (bvp->Type == TYPE_NULL) { PUSH_WARNING(g->Message); goto err; } // endif bvp @@ -2806,24 +2917,33 @@ char* bsonlocate(UDF_INIT* initid, UDF_ARGS* args, char* result, PUSH_WARNING("CheckMemory error"); *error = 1; goto err; - } else - bvp = bnx.MakeBinValue(g, args, 0); + } else { + bnx.Reset(); // Sarea may have been re-allocated + bvp = bnx.MakeValue(args, 0, true); - if (!bvp) { - PUSH_WARNING("First argument is not a valid JSON item"); - goto err; - } // endif bvp + if (!bvp) { + bnx.GetMsg(g); + PUSH_WARNING(g->Message); + goto err; + } else if (bvp->Type == TYPE_NULL) { + PUSH_WARNING("First argument is not a valid JSON item"); + goto err; + } // endif bvp - if (g->Mrr) { // First argument is a constant - g->Xchk = bvp; - JsonMemSave(g); - } // endif Mrr + if (g->Mrr) { // First argument is a constant + g->Xchk = bvp; + JsonMemSave(g); + } // endif Mrr + + } // endif CheckMemory } else bvp = (PBVAL)g->Xchk; // The item to locate - if (!(bvp2 = bnx.MakeBinValue(g, args, 1))) { + bvp2 = bnx.MakeValue(args, 1, true); + + if (bvp2->Type == TYPE_NULL) { PUSH_WARNING("Invalid second argument"); goto err; } // endif bvp @@ -2920,9 +3040,11 @@ char* bson_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result, *error = 1; goto err; } else - bvp = bnx.MakeBinValue(g, args, 0); + bnx.Reset(); - if (!bvp) { + bvp = bnx.MakeValue(args, 0, true); + + if (bvp->Type == TYPE_NULL) { PUSH_WARNING("First argument is not a valid JSON item"); goto err; } // endif bvp @@ -2936,7 +3058,9 @@ char* bson_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result, bvp = (PBVAL)g->Xchk; // The item to locate - if (!(bvp2 = bnx.MakeBinValue(g, args, 1))) { + bvp2 = bnx.MakeValue(args, 1, true); + + if (bvp2->Type == TYPE_NULL) { PUSH_WARNING("Invalid second argument"); goto err; } // endif bvp @@ -3006,7 +3130,7 @@ my_bool bson_contains_init(UDF_INIT *initid, UDF_ARGS *args, char *message) //memlen += more; // TODO: calculate this - more += (IsJson(args, 0) != 3 ? 1000 : 0); + more += (IsArgJson(args, 0) != 3 ? 1000 : 0); return JsonInit(initid, args, message, false, reslen, memlen, more); } // end of bson contains_init @@ -3055,7 +3179,7 @@ my_bool bsoncontains_path_init(UDF_INIT *initid, UDF_ARGS *args, char *message) //memlen += more; // TODO: calculate this - more += (IsJson(args, 0) != 3 ? 1000 : 0); + more += (IsArgJson(args, 0) != 3 ? 1000 : 0); return JsonInit(initid, args, message, true, reslen, memlen, more); } // end of bsoncontains_path_init @@ -3085,7 +3209,7 @@ long long bsoncontains_path(UDF_INIT *initid, UDF_ARGS *args, char *, char *erro } else { BJNX bnx(g); - jvp = bnx.MakeValue(g, args, 0); + jvp = bnx.MakeValue(args, 0); if ((p = bnx.GetString(jvp))) { if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { @@ -3193,7 +3317,7 @@ char *bson_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result, PBVAL jsp[2] = {NULL, NULL}; for (int i = 0; i < 2; i++) { - jvp = bnx.MakeBinValue(g, args, i); + jvp = bnx.MakeValue(args, i, true); if (i) { if (jvp->Type != type) { @@ -3220,7 +3344,8 @@ char *bson_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result, else bnx.MergeObject(jsp[0], jsp[1]); - str = MakeResult(g, args, top); + bnx.SetChanged(true); + str = bnx.MakeResult(args, top); } // endif CheckMemory // In case of error or file, return unchanged first argument @@ -3286,8 +3411,8 @@ my_bool bson_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *) { - char *p, *path, *str = NULL; - PBVAL jsp, jvp; + char *path, *str = NULL; + PBVAL jvp; PBJNX bxp = NULL; PGLOBAL g = (PGLOBAL)initid->ptr; @@ -3304,29 +3429,20 @@ char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, } else { BJNX bnx(g); - jvp = bnx.MakeValue(g, args, 0); - - if ((p = bnx.GetString(jvp))) { - if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { - PUSH_WARNING(g->Message); - goto fin; - } // endif jsp - - } else - jsp = jvp; + jvp = bnx.MakeValue(args, 0, true); if (g->Mrr) { // First argument is a constant - g->Xchk = jsp; + g->Xchk = jvp; JsonMemSave(g); } // endif Mrr } // endelse CheckMemory } else - jsp = (PBVAL)g->Xchk; + jvp = (PBVAL)g->Xchk; path = MakePSZ(g, args, 1); - bxp = new(g) BJNX(g, jsp, TYPE_STRING, initid->max_length); + bxp = new(g) BJNX(g, jvp, TYPE_STRING, initid->max_length); if (bxp->SetJpath(g, path, true)) { PUSH_WARNING(g->Message); @@ -3420,7 +3536,7 @@ char *bsonget_string(UDF_INIT *initid, UDF_ARGS *args, char *result, } else { BJNX bnx(g); - jvp = bnx.MakeValue(g, args, 0); + jvp = bnx.MakeValue(args, 0); if ((p = bnx.GetString(jvp))) { if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { @@ -3504,7 +3620,7 @@ my_bool bsonget_int_init(UDF_INIT *initid, UDF_ARGS *args, char *message) CalcLen(args, false, reslen, memlen); // TODO: calculate this - more = (IsJson(args, 0) != 3) ? 1000 : 0; + more = (IsArgJson(args, 0) != 3) ? 1000 : 0; return JsonInit(initid, args, message, true, reslen, memlen, more); } // end of bsonget_int_init @@ -3537,7 +3653,7 @@ long long bsonget_int(UDF_INIT *initid, UDF_ARGS *args, } else { BJNX bnx(g); - jvp = bnx.MakeValue(g, args, 0); + jvp = bnx.MakeValue(args, 0); if ((p = bnx.GetString(jvp))) { if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { @@ -3626,7 +3742,7 @@ my_bool bsonget_real_init(UDF_INIT *initid, UDF_ARGS *args, char *message) CalcLen(args, false, reslen, memlen); // TODO: calculate this - more = (IsJson(args, 0) != 3) ? 1000 : 0; + more = (IsArgJson(args, 0) != 3) ? 1000 : 0; return JsonInit(initid, args, message, true, reslen, memlen, more); } // end of bsonget_real_init @@ -3659,7 +3775,7 @@ double bsonget_real(UDF_INIT *initid, UDF_ARGS *args, } else { BJNX bnx(g); - jvp = bnx.MakeValue(g, args, 0); + jvp = bnx.MakeValue(args, 0); if ((p = bnx.GetString(jvp))) { if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { @@ -3720,20 +3836,131 @@ void bsonget_real_deinit(UDF_INIT* initid) JsonFreeMem((PGLOBAL)initid->ptr); } // end of bsonget_real_deinit +/*********************************************************************************/ +/* Delete items from a Json document. */ +/*********************************************************************************/ +my_bool bson_delete_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + if (IsArgJson(args, 0) != 3) { + strcpy(message, "This function must have at least 2 arguments or one binary"); + return true; + } // endif args + + } // endif count + + CalcLen(args, false, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // Is this a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bson_delete_item_init + +char *bson_delete_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *path, *str = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + str = (char*)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 1, false, false, true)) { + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL top, jar = NULL; + PBVAL jvp = bnx.MakeValue(args, 0, true, &top); + + if (args->arg_count == 1) { + // This should be coming from bbin_locate_all + jar = jvp; // This is the array of paths + jvp = top; // And this is the document + } else if(!bnx.IsJson(jvp)) { + PUSH_WARNING("First argument is not a JSON document"); + goto fin; + } else if (args->arg_count == 2) { + // Check whether this is an array of paths + jar = bnx.MakeValue(args, 1, true); + + if (jar && jar->Type != TYPE_JAR) + jar = NULL; + + } // endif arg_count + + if (jar) { + // Do the deletion in reverse order + for(int i = bnx.GetArraySize(jar) - 1; i >= 0; i--) { + path = bnx.GetString(bnx.GetArrayValue(jar, i)); + + if (bnx.SetJpath(g, path, false)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath + + bnx.SetChanged(bnx.DeleteItem(g, jvp)); + } // endfor i + + } else for (uint i = 1; i < args->arg_count; i++) { + path = MakePSZ(g, args, i); + + if (bnx.SetJpath(g, path, false)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath + + bnx.SetChanged(bnx.DeleteItem(g, jvp)); + } // endfor i + + str = bnx.MakeResult(args, top, INT_MAX); + } // endif CheckMemory + + if (g->N) + // Keep result of constant function + g->Xchk = str; + +fin: + if (!str) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = strlen(str); + + return str; +} // end of bson_delete_item + +void bson_delete_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_delete_item_deinit + /*********************************************************************************/ /* This function is used by the json_set/insert/update_item functions. */ /*********************************************************************************/ static char *bson_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) { - char *p, *path, *str = NULL; + char *path, *str = NULL; int w; my_bool b = true; PBJNX bxp; PBVAL jsp, jvp; PGLOBAL g = (PGLOBAL)initid->ptr; -//PGLOBAL gb = GetMemPtr(g, args, 0); - PGLOBAL gb = g; if (g->Alchecked) { str = (char*)g->Activityp; @@ -3760,21 +3987,14 @@ static char *bson_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, } else { BJNX bnx(g); - jvp = bnx.MakeValue(g, args, 0); - - if ((p = bnx.GetString(jvp))) { - if (!(jsp = bnx.ParseJson(g, p, strlen(p)))) { - throw 2; - } // endif jsp - - } else - jsp = jvp; + jsp = bnx.MakeValue(args, 0, true); if (g->Mrr) { // First argument is a constant g->Xchk = jsp; JsonMemSave(g); } // endif Mrr - } // endelse CheckMemory + + } // endif CheckMemory } else jsp = (PBVAL)g->Xchk; @@ -3782,7 +4002,7 @@ static char *bson_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, bxp = new(g)BJNX(g, jsp, TYPE_STRING, initid->max_length, 0, true); for (uint i = 1; i + 1 < args->arg_count; i += 2) { - jvp = bxp->MakeValue(gb, args, i); + jvp = bxp->MakeValue(args, i); path = MakePSZ(g, args, i + 1); if (bxp->SetJpath(g, path, false)) { @@ -3796,13 +4016,16 @@ static char *bson_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, b = (w == 1) ? b : !b; } // endif w - if (b && bxp->WriteValue(gb, jvp)) + if (b && bxp->WriteValue(g, jvp)) { PUSH_WARNING(g->Message); + continue; + } // endif SetJpath + bxp->SetChanged(true); } // endfor i - // In case of error or file, return unchanged argument - if (!(str = MakeResult(g, args, jsp, INT_MAX32))) + // In case of error or file, return unchanged argument + if (!(str = bxp->MakeResult(args, jsp, INT_MAX32))) str = MakePSZ(g, args, 0); if (g->N) @@ -3866,7 +4089,7 @@ my_bool bson_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) g->N = (initid->const_item) ? 1 : 0; // This is to avoid double execution when using prepared statements - if (IsJson(args, 0) > 1) + if (IsArgJson(args, 0) > 1) initid->const_item = 0; g->Alchecked = 0; @@ -4083,24 +4306,25 @@ char *bfile_make(UDF_INIT *initid, UDF_ARGS *args, char *result, } else if (initid->const_item) g->N = 1; -// if ((n = IsArgJson(args, 0)) == 3) { + if ((n = IsArgJson(args, 0)) == 3) { // Get default file name and pretty -// PBSON bsp = (PBSON)args->args[0]; + PBSON bsp = (PBSON)args->args[0]; -// fn = bsp->Filename; -// pretty = bsp->Pretty; -// } else - if ((n = IsArgJson(args, 0)) == 2) + fn = bsp->Filename; + pretty = bsp->Pretty; + } else if ((n = IsArgJson(args, 0)) == 2) fn = args->args[0]; if (!g->Xchk) { if (CheckMemory(g, initid, args, 1, true)) { PUSH_WARNING("CheckMemory error"); goto fin; - } else - jvp = bnx.MakeValue(g, args, 0); + } else + bnx.Reset(); - if ((p = bnx.GetString(jvp))) { + jvp = bnx.MakeValue(args, 0); + + if (!n && (p = bnx.GetString(jvp))) { if (!strchr("[{ \t\r\n", *p)) { // Is this a file name? if (!(p = GetJsonFile(g, p))) { @@ -4345,4 +4569,1392 @@ void bfile_bjson_deinit(UDF_INIT* initid) { JsonFreeMem((PGLOBAL)initid->ptr); } // end of bfile_bjson_deinit +/*********************************************************************************/ +/* Serialize a Json document. . */ +/*********************************************************************************/ +my_bool bson_serialize_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->args[0] && IsArgJson(args, 0) != 3) { + strcpy(message, "Argument must be a Jbin tree"); + return true; + } else + CalcLen(args, false, reslen, memlen); + + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bson_serialize_init + +char *bson_serialize(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *, char *error) +{ + char *str; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (!g->Xchk) { + if (IsArgJson(args, 0) == 3) { + PBSON bsp = (PBSON)args->args[0]; + BJNX bnx(bsp->G); + PBVAL bvp = (args->arg_count == 1) ? (PBVAL)bsp->Jsp : (PBVAL)bsp->Top; + + if (!(str = bnx.Serialize(g, bvp, bsp->Filename, bsp->Pretty))) + str = strcpy(result, g->Message); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? str : NULL; + } else { + // *error = 1; + str = strcpy(result, "Argument is not a Jbin tree"); + } // endif + + } else + str = (char*)g->Xchk; + + *res_length = strlen(str); + return str; +} // end of bson_serialize + +void bson_serialize_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bson_serialize_deinit + +/*********************************************************************************/ +/* Make and return a binary Json array containing all the parameters. */ +/*********************************************************************************/ +my_bool bbin_make_array_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + CalcLen(args, false, reslen, memlen); + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bbin_make_array_init + +char *bbin_make_array(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, false)) { + BJNX bnx(g); + PBVAL arp; + + if ((arp = bnx.NewVal(TYPE_JAR))) { + for (uint i = 0; i < args->arg_count; i++) + bnx.AddArrayValue(arp, bnx.MakeValue(args, i)); + + if ((bsp = BbinAlloc(g, initid->max_length, arp))) { + strcat(bsp->Msg, " array"); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? bsp : NULL; + } // endif bsp + + } // endif arp + + } // endif CheckMemory + + } else + bsp = (PBSON)g->Xchk; + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_make_array + +void bbin_make_array_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_make_array_deinit + +/*********************************************************************************/ +/* Add one value to a Json array. */ +/*********************************************************************************/ +my_bool bbin_array_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + } else + CalcLen(args, false, reslen, memlen, true); + + if (!JsonInit(initid, args, message, true, reslen, memlen)) { + PGLOBAL g = (PGLOBAL)initid->ptr; + + // This is a constant function + g->N = (initid->const_item) ? 1 : 0; + + // This is to avoid double execution when using prepared statements + if (IsArgJson(args, 0) > 1) + initid->const_item = 0; + + return false; + } else + return true; + +} // end of bbin_array_add_init + +char *bbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (g->Xchk) { + // This constant function was recalled + bsp = (PBSON)g->Xchk; + *res_length = sizeof(BSON); + return (char*)bsp; + } else if (!CheckMemory(g, initid, args, 2, false, false, true)) { + uint n = 2; + int* x = GetIntArgPtr(g, args, n); + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL jarp, top, jvp = NULL; + PBVAL jsp = bnx.MakeValue(args, 0, true, &top); + + if (bnx.CheckPath(g, args, jsp, jvp, 2)) + PUSH_WARNING(g->Message); + else if (jvp && jvp->Type != TYPE_JAR) { + if ((jarp = bnx.NewVal(TYPE_JAR))) { + bnx.AddArrayValue(jarp, jvp); + + if (!top) + top = jarp; + + } // endif jarp + + } else + jarp = jvp; + + if (jarp) { + bnx.AddArrayValue(jarp, bnx.MakeValue(args, 1), x); + bnx.SetChanged(true); + bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + + if (initid->const_item) + // Keep result of constant function + g->Xchk = bsp; + + } else + PUSH_WARNING(g->Message); + + } // endif CheckMemory + + if (!bsp) { + *res_length = 0; + *is_null = 1; + *error = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_array_add + +void bbin_array_add_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_array_add_deinit + +/*********************************************************************************/ +/* Add one or several values to a Bson array. */ +/*********************************************************************************/ +my_bool bbin_array_add_values_init(UDF_INIT* initid, UDF_ARGS* args, char* message) +{ + return bson_array_add_values_init(initid, args, message); +} // end of bbin_array_add_values_init + +char* bbin_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (!g->Xchk) { + if (!CheckMemory(g, initid, args, args->arg_count, true)) { + uint i = 0; + BJNX bnx(g); + PBVAL arp, top, jvp = NULL; + PBVAL bvp = bnx.MakeValue(args, 0, true, &top); + + if (bvp->Type == TYPE_JAR) { + arp = bvp; + i = 1; + } else // First argument is not an array + arp = bnx.NewVal(TYPE_JAR); + + for (; i < args->arg_count; i++) + bnx.AddArrayValue(arp, bnx.MakeValue(args, i)); + + bnx.SetChanged(true); + bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + } // endif CheckMemory + + // Keep result of constant function + g->Xchk = (g->N) ? bsp : NULL; + } else + bsp = (PBSON)g->Xchk; + + if (!bsp) { + *res_length = 0; + *is_null = 1; + *error = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_array_add_values + +void bbin_array_add_values_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_array_add_values_deinit + +/*********************************************************************************/ +/* Make a Json array from values coming from rows. */ +/*********************************************************************************/ +my_bool bbin_array_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_array_grp_init(initid, args, message); +} // end of bbin_array_grp_init + +void bbin_array_grp_clear(UDF_INIT *initid, char *a, char *b) +{ + bson_array_grp_clear(initid, a, b); +} // end of bbin_array_grp_clear + +void bbin_array_grp_add(UDF_INIT *initid, UDF_ARGS *args, char *a, char *b) +{ + bson_array_grp_add(initid, args, a, b); +} // end of bbin_array_grp_add + +char *bbin_array_grp(UDF_INIT *initid, UDF_ARGS *, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PBSON bsp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + PBVAL arp = (PBVAL)g->Activityp; + + if (g->N < 0) + PUSH_WARNING("Result truncated to json_grp_size values"); + + if (arp) + if ((bsp = BbinAlloc(g, initid->max_length, arp))) + strcat(bsp->Msg, " array"); + + if (!bsp) { + *res_length = 0; + *is_null = 1; + *error = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_array_grp + +void bbin_array_grp_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_array_grp_deinit + +/*********************************************************************************/ +/* Make a Json object from values coming from rows. */ +/*********************************************************************************/ +my_bool bbin_object_grp_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_object_grp_init(initid, args, message); +} // end of bbin_object_grp_init + +void bbin_object_grp_clear(UDF_INIT *initid, char *a, char *b) +{ + bson_object_grp_clear(initid, a, b); +} // end of bbin_object_grp_clear + +void bbin_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char *a, char *b) +{ + bson_object_grp_add(initid, args, a, b); +} // end of bbin_object_grp_add + +char *bbin_object_grp(UDF_INIT *initid, UDF_ARGS *, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PBSON bsp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + PBVAL bop = (PBVAL)g->Activityp; + + if (g->N < 0) + PUSH_WARNING("Result truncated to json_grp_size values"); + + if (bop) + if ((bsp = BbinAlloc(g, initid->max_length, bop))) + strcat(bsp->Msg, " object"); + + if (!bsp) { + *res_length = 0; + *is_null = 1; + *error = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_grp + +void bbin_object_grp_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_grp_deinit + +/*********************************************************************************/ +/* Make a Json Object containing all the parameters. */ +/*********************************************************************************/ +my_bool bbin_make_object_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + CalcLen(args, true, reslen, memlen); + return JsonInit(initid, args, message, false, reslen, memlen); +} // end of bbin_make_object_init + +char *bbin_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = (PBSON)g->Xchk; + + if (!bsp) { + if (!CheckMemory(g, initid, args, args->arg_count, true)) { + BJNX bnx(g); + PBVAL objp; + + if ((objp = bnx.NewVal(TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i++) + bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i)); + + if ((bsp = BbinAlloc(g, initid->max_length, objp))) { + strcat(bsp->Msg, " object"); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? bsp : NULL; + } // endif bsp + + } // endif objp + + } // endif CheckMemory + + } // endif Xchk + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_make_object + +void bbin_make_object_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_make_object_deinit + +/*********************************************************************************/ +/* Make a Json Object containing all not null parameters. */ +/*********************************************************************************/ +my_bool bbin_object_nonull_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + CalcLen(args, true, reslen, memlen); + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bbin_object_nonull_init + +char *bbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = (PBSON)g->Xchk; + + if (!bsp) { + if (!CheckMemory(g, initid, args, args->arg_count, false, true)) { + BJNX bnx(g); + PBVAL jvp, objp; + + if ((objp = bnx.NewVal(TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i++) + if (!bnx.IsValueNull(jvp = bnx.MakeValue(args, i))) + bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i)); + + if ((bsp = BbinAlloc(g, initid->max_length, objp))) { + strcat(bsp->Msg, " object"); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? bsp : NULL; + } // endif bsp + + } // endif objp + + } // endif CheckMemory + + } // endif Xchk + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_nonull + +void bbin_object_nonull_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_nonull_deinit + +/*********************************************************************************/ +/* Make a Json Object containing all the key/value parameters. */ +/*********************************************************************************/ +my_bool bbin_object_key_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count % 2) { + strcpy(message, "This function must have an even number of arguments"); + return true; + } // endif arg_count + + CalcLen(args, true, reslen, memlen); + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bbin_object_key_init + +char *bbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = (PBSON)g->Xchk; + + if (!bsp) { + if (!CheckMemory(g, initid, args, args->arg_count, false, true)) { + BJNX bnx(g); + PBVAL objp; + + if ((objp = bnx.NewVal(TYPE_JOB))) { + for (uint i = 0; i < args->arg_count; i += 2) + bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i)); + + if ((bsp = BbinAlloc(g, initid->max_length, objp))) { + strcat(bsp->Msg, " object"); + + // Keep result of constant function + g->Xchk = (initid->const_item) ? bsp : NULL; + } // endif bsp + + } // endif objp + + } // endif CheckMemory + + } // endif Xchk + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_key + +void bbin_object_key_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_key_deinit + +/*********************************************************************************/ +/* Add or replace a value in a Json Object. */ +/*********************************************************************************/ +my_bool bbin_object_add_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have at least 2 arguments"); + return true; + } else if (!IsArgJson(args, 0)) { + strcpy(message, "First argument must be a json item"); + return true; + } else + CalcLen(args, true, reslen, memlen, true); + + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bbin_object_add_init + +char *bbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (g->Xchk) { + // This constant function was recalled + bsp = (PBSON)g->Xchk; + *res_length = sizeof(BSON); + return (char*)bsp; + } else if (!CheckMemory(g, initid, args, 2, false, true, true)) { + PSZ key; + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL top; + PBVAL jobp = bnx.MakeValue(args, 0, true, &top); + PBVAL jvp = jobp; + + if (bnx.CheckPath(g, args, jvp, jobp, 2)) + PUSH_WARNING(g->Message); + else if (jobp && jobp->Type == TYPE_JOB) { + jvp = bnx.MakeValue(args, 1); + key = bnx.MakeKey(args, 1); + bnx.SetKeyValue(jobp, jvp, key); + bnx.SetChanged(true); + } else { + PUSH_WARNING("First argument target is not an object"); + // if (g->Mrr) *error = 1; (only if no path) + } // endif jobp + + // In case of error unchanged argument will be returned + bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + + if (initid->const_item) + // Keep result of constant function + g->Xchk = bsp; + + } // endif CheckMemory + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_add + +void bbin_object_add_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_add_deinit + +/*********************************************************************************/ +/* Delete a value from a Json array. */ +/*********************************************************************************/ +my_bool bbin_array_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_array_delete_init(initid, args, message); +} // end of bbin_array_delete_init + +char *bbin_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (g->Xchk) { + // This constant function was recalled + bsp = (PBSON)g->Xchk; + } else if (!CheckMemory(g, initid, args, 1, false, false, true)) { + int* x; + uint n = 1; + BJNX bnx(g); + PBVAL arp, top; + PBVAL jvp = bnx.MakeValue(args, 0, true, &top); + + if (!(x = GetIntArgPtr(g, args, n))) + PUSH_WARNING("Missing or null array index"); + else if (bnx.CheckPath(g, args, jvp, arp, 1)) + PUSH_WARNING(g->Message); + else if (arp && arp->Type == TYPE_JAR) { + bnx.SetChanged(bnx.DeleteValue(arp, *x)); + bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + } else { + PUSH_WARNING("First argument target is not an array"); + // if (g->Mrr) *error = 1; + } // endif jvp + + if (g->N) + // Keep result of constant function + g->Xchk = bsp; + + } // endif CheckMemory + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_array_delete + +void bbin_array_delete_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_array_delete_deinit + +/*********************************************************************************/ +/* Delete a value from a Json object. */ +/*********************************************************************************/ +my_bool bbin_object_delete_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + unsigned long reslen, memlen; + + if (args->arg_count < 2) { + strcpy(message, "This function must have 2 or 3 arguments"); + return true; + } else if (!IsArgJson(args, 0)) { + strcpy(message, "First argument must be a json item"); + return true; + } else if (args->arg_type[1] != STRING_RESULT) { + strcpy(message, "Second argument must be a key string"); + return true; + } else + CalcLen(args, true, reslen, memlen, true); + + return JsonInit(initid, args, message, true, reslen, memlen); +} // end of bbin_object_delete_init + +char *bbin_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (g->Xchk) { + // This constant function was recalled + bsp = (PBSON)g->Xchk; + *res_length = sizeof(BSON); + return (char*)bsp; + } else if (!CheckMemory(g, initid, args, 1, false, true, true)) { + PCSZ key; + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL top; + PBVAL jobp = bnx.MakeValue(args, 0, true, &top); + + if (bnx.CheckPath(g, args, top, jobp, 2)) + PUSH_WARNING(g->Message); + else if (jobp && jobp->Type == TYPE_JOB) { + key = bnx.MakeKey(args, 1); + bnx.SetChanged(bnx.DeleteKey(jobp, key)); + } else { + PUSH_WARNING("First argument target is not an object"); + // if (g->Mrr) *error = 1; (only if no path) + } // endif jvp + + // In case of error unchanged argument will be returned + bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + + if (initid->const_item) + // Keep result of constant function + g->Xchk = bsp; + + } // endif CheckMemory + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_delete + +void bbin_object_delete_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_delete_deinit + +/*********************************************************************************/ +/* Returns an array of the Json object keys. */ +/*********************************************************************************/ +my_bool bbin_object_list_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_object_list_init(initid, args, message); +} // end of bbin_object_list_init + +char *bbin_object_list(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = (PBSON)g->Xchk; + + if (!bsp) { + if (!CheckMemory(g, initid, args, 1, true, true)) { + BJNX bnx(g); + PBVAL top, jarp = NULL; + PBVAL jsp = bnx.MakeValue(args, 0, true, &top); + + if (jsp->Type == TYPE_JOB) { + jarp = bnx.GetKeyList(jsp); + } else { + PUSH_WARNING("First argument is not an object"); + if (g->Mrr) *error = 1; + } // endif jsp type + + // In case of error unchanged argument will be returned + bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp->Jsp = (PJSON)jarp; + + } // endif CheckMemory + + // Keep result of constant function + g->Xchk = (initid->const_item) ? bsp : NULL; + } // endif bsp + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_list + +void bbin_object_list_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_list_deinit + +/*********************************************************************************/ +/* Returns an array of the Json object values. */ +/*********************************************************************************/ +my_bool bbin_object_values_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_object_values_init(initid, args, message); +} // end of bbin_object_values_init + +char *bbin_object_values(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = (PBSON)g->Xchk; + + if (!bsp) { + if (!CheckMemory(g, initid, args, 1, true, true)) { + BJNX bnx(g); + PBVAL top, jarp; + PBVAL jvp = bnx.MakeValue(args, 0, true, &top); + + if (jvp->Type == TYPE_JOB) { + jarp = bnx.GetObjectValList(jvp); + } else { + PUSH_WARNING("First argument is not an object"); + if (g->Mrr) *error = 1; + } // endif jvp + + // In case of error unchanged argument will be returned + bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp->Jsp = (PJSON)jarp; + + } // endif CheckMemory + + if (initid->const_item) { + // Keep result of constant function + g->Xchk = bsp; + } // endif const_item + + } // endif bsp + + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_object_values + +void bbin_object_values_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_object_values_deinit + +/*********************************************************************************/ +/* Get a Json item from a Json document. */ +/*********************************************************************************/ +my_bool bbin_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_get_item_init(initid, args, message); +} // end of bbin_get_item_init + +char *bbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PBSON bsp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + bsp = (PBSON)g->Xchk; + } else if (!CheckMemory(g, initid, args, 1, true, true)) { + char *path = MakePSZ(g, args, 1); + BJNX bnx(g, NULL, TYPE_STRING, initid->max_length); + PBVAL top, jvp = NULL; + PBVAL jsp = bnx.MakeValue(args, 0, true, &top); + + if (bnx.CheckPath(g, args, jsp, jvp, 1)) + PUSH_WARNING(g->Message); + else if (jvp) { + bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp->Jsp = (PJSON)jvp; + + if (initid->const_item) + // Keep result of constant function + g->Xchk = bsp; + + } // endif jvp + + } else + PUSH_WARNING("CheckMemory error"); + + if (!bsp) { + *is_null = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_get_item + +void bbin_get_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_get_item_deinit + +/*********************************************************************************/ +/* Merge two arrays or objects. */ +/*********************************************************************************/ +my_bool bbin_item_merge_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_item_merge_init(initid, args, message); +} // end of bbin_item_merge_init + +char *bbin_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + PBSON bsp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + bsp = (PBSON)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 2, false, false, true)) { + JTYP type; + BJNX bnx(g); + PBVAL jvp, top = NULL; + PBVAL jsp[2] = {NULL, NULL}; + + for (int i = 0; i < 2; i++) { + if (i) { + jvp = bnx.MakeValue(args, i, true); + + if (jvp->Type != type) { + PUSH_WARNING("Argument types mismatch"); + goto fin; + } // endif type + + } else { + jvp = bnx.MakeValue(args, i, true, &top); + type = (JTYP)jvp->Type; + + if (type != TYPE_JAR && type != TYPE_JOB) { + PUSH_WARNING("First argument is not an array or object"); + goto fin; + } // endif type + + } // endif i + + jsp[i] = jvp; + } // endfor i + + if (type == TYPE_JAR) + bnx.MergeArray(jsp[0], jsp[1]); + else + bnx.MergeObject(jsp[0], jsp[1]); + + bnx.SetChanged(true); + bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + } // endif CheckMemory + + if (g->N) + // Keep result of constant function + g->Xchk = bsp; + +fin: + if (!bsp) { + *res_length = 0; + *error = 1; + *is_null = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_item_merge + +void bbin_item_merge_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_item_merge_deinit + +/*********************************************************************************/ +/* This function is used by the jbin_set/insert/update_item functions. */ +/*********************************************************************************/ +static char *bbin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *path; + int w; + my_bool b = true; + PBJNX bxp; + PBVAL jsp, jvp, top; + PBSON bsp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Alchecked) { + bsp = (PBSON)g->Activityp; + goto fin; + } else if (g->N) + g->Alchecked = 1; + + if (!strcmp(result, "$set")) + w = 0; + else if (!strcmp(result, "$insert")) + w = 1; + else if (!strcmp(result, "$update")) + w = 2; + else { + PUSH_WARNING("Logical error, please contact CONNECT developer"); + goto fin; + } // endelse + + try { + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true, false, true)) { + throw 1; + } else { + BJNX bnx(g); + + jsp = bnx.MakeValue(args, 0, true, &top); + + if (g->Mrr) { // First argument is a constant + g->Xchk = jsp; + g->More = (size_t)top; + JsonMemSave(g); + } // endif Mrr + + } // endif CheckMemory + + } else { + jsp = (PBVAL)g->Xchk; + top = (PBVAL)g->More; + } // endif Xchk + + bxp = new(g)BJNX(g, jsp, TYPE_STRING, initid->max_length, 0, true); + + for (uint i = 1; i + 1 < args->arg_count; i += 2) { + jvp = bxp->MakeValue(args, i); + path = MakePSZ(g, args, i + 1); + + if (bxp->SetJpath(g, path, false)) + throw 2; + + if (w) { + bxp->ReadValue(g); + b = bxp->GetValue()->IsNull(); + b = (w == 1) ? b : !b; + } // endif w + + if (b && bxp->WriteValue(g, jvp)) + throw 3; + + bxp->SetChanged(true); + } // endfor i + + if (!(bsp = bxp->MakeBinResult(g, args, top, initid->max_length))) + throw 4; + + if (g->N) + // Keep result of constant function + g->Activityp = (PACTIVITY)bsp; + + } catch (int n) { + if (trace(1)) + htrc("Exception %d: %s\n", n, g->Message); + + PUSH_WARNING(g->Message); + } catch (const char *msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + } // end catch + +fin: + if (!bsp) { + *is_null = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_handle_item + +/*********************************************************************************/ +/* Set Json items of a Json document according to path. */ +/*********************************************************************************/ +my_bool bbin_set_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_set_item_init(initid, args, message); +} // end of bbin_set_item_init + +char *bbin_set_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *p) +{ + strcpy(result, "$set"); + return bbin_handle_item(initid, args, result, res_length, is_null, p); +} // end of bbin_set_item + +void bbin_set_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_set_item_deinit + +/*********************************************************************************/ +/* Insert Json items of a Json document according to path. */ +/*********************************************************************************/ +my_bool bbin_insert_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_set_item_init(initid, args, message); +} // end of bbin_insert_item_init + +char *bbin_insert_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *p) +{ + strcpy(result, "$insert"); + return bbin_handle_item(initid, args, result, res_length, is_null, p); +} // end of bbin_insert_item + +void bbin_insert_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_insert_item_deinit + +/*********************************************************************************/ +/* Update Json items of a Json document according to path. */ +/*********************************************************************************/ +my_bool bbin_update_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_set_item_init(initid, args, message); +} // end of bbin_update_item_init + +char *bbin_update_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *p) +{ + strcpy(result, "$update"); + return bbin_handle_item(initid, args, result, res_length, is_null, p); +} // end of bbin_update_item + +void bbin_update_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_update_item_deinit + +/*********************************************************************************/ +/* Delete items from a Json document. */ +/*********************************************************************************/ +my_bool bbin_delete_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_delete_item_init(initid, args, message); +} // end of bbin_delete_item_init + +char *bbin_delete_item(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *path; + PBSON bsp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + + if (g->Xchk) { + // This constant function was recalled + bsp = (PBSON)g->Xchk; + goto fin; + } // endif Xchk + + if (!CheckMemory(g, initid, args, 1, false, false, true)) { + BJNX bnx(g, NULL, TYPE_STRING); + PBVAL top, jar = NULL; + PBVAL jvp = bnx.MakeValue(args, 0, true, &top); + + if (args->arg_count == 1) { + // This should be coming from bbin_locate_all + jar = jvp; // This is the array of paths + jvp = top; // And this is the document + } else if(!bnx.IsJson(jvp)) { + PUSH_WARNING("First argument is not a JSON document"); + goto fin; + } else if (args->arg_count == 2) { + // Check whether this is an array of paths + jar = bnx.MakeValue(args, 1, true); + + if (jar && jar->Type != TYPE_JAR) + jar = NULL; + + } // endif arg_count + + if (jar) { + // Do the deletion in reverse order + for(int i = bnx.GetArraySize(jar) - 1; i >= 0; i--) { + path = bnx.GetString(bnx.GetArrayValue(jar, i)); + + if (bnx.SetJpath(g, path, false)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath + + bnx.SetChanged(bnx.DeleteItem(g, jvp)); + } // endfor i + + } else for (uint i = 1; i < args->arg_count; i++) { + path = MakePSZ(g, args, i); + + if (bnx.SetJpath(g, path, false)) { + PUSH_WARNING(g->Message); + continue; + } // endif SetJpath + + bnx.SetChanged(bnx.DeleteItem(g, jvp)); + } // endfor i + + bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + + if (args->arg_count == 1) + // Here Jsp was not a sub-item of top + bsp->Jsp = (PJSON)top; + + } // endif CheckMemory + + if (g->N) + // Keep result of constant function + g->Xchk = bsp; + +fin: + if (!bsp) { + *is_null = 1; + *error = 1; + *res_length = 0; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_delete_item + +void bbin_delete_item_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_delete_item_deinit + +/*********************************************************************************/ +/* Returns a json file as a json binary tree. */ +/*********************************************************************************/ +my_bool bbin_file_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + return bson_file_init(initid, args, message); +} // end of bbin_file_init + +char *bbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result, + unsigned long *res_length, char *is_null, char *error) +{ + char *fn; + int pretty = 3; + size_t len = 0; + PBVAL jsp, jvp = NULL; + PGLOBAL g = (PGLOBAL)initid->ptr; + BJNX bnx(g); + PBSON bsp = (PBSON)g->Xchk; + + if (bsp) + goto fin; + + fn = MakePSZ(g, args, 0); + + for (unsigned int i = 1; i < args->arg_count; i++) + if (args->arg_type[i] == INT_RESULT && *(longlong*)args->args[i] < 4) { + pretty = (int) * (longlong*)args->args[i]; + break; + } // endif type + + // Parse the json file and allocate its tree structure + if (!(jsp = bnx.ParseJsonFile(g, fn, pretty, len))) { + PUSH_WARNING(g->Message); + *error = 1; + goto fin; + } // endif jsp + +// if (pretty == 3) +// PUSH_WARNING("File pretty format cannot be determined"); +// else if (pretty == 3) +// pretty = pty; + + if ((bsp = BbinAlloc(g, len, jsp))) { + strcat(bsp->Msg, " file"); + bsp->Filename = fn; + bsp->Pretty = pretty; + } else { + *error = 1; + goto fin; + } // endif bsp + + // Check whether a path was specified + if (bnx.CheckPath(g, args, jsp, jvp, 1)) { + PUSH_WARNING(g->Message); + bsp = NULL; + goto fin; + } else if (jvp) + bsp->Jsp = (PJSON)jvp; + + if (initid->const_item) + // Keep result of constant function + g->Xchk = bsp; + +fin: + if (!bsp) { + *res_length = 0; + *is_null = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_file + +void bbin_file_deinit(UDF_INIT* initid) +{ + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_file_deinit + +/*********************************************************************************/ +/* Locate all occurences of a value in a Json tree. */ +/*********************************************************************************/ +my_bool bbin_locate_all_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { + return bson_locate_all_init(initid, args, message); +} // end of bbin_locate_all_init + +char* bbin_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result, + unsigned long* res_length, char* is_null, char* error) { + char *path = NULL; + int mx = 10; + PBVAL bvp, bvp2; + PGLOBAL g = (PGLOBAL)initid->ptr; + PBSON bsp = NULL; + + if (g->N) { + if (g->Activityp) { + bsp = (PBSON)g->Activityp; + *res_length = sizeof(BSON); + return (char*)bsp; + } else { + *error = 1; + *res_length = 0; + *is_null = 1; + return NULL; + } // endif Activityp + + } else if (initid->const_item) + g->N = 1; + + try { + PBVAL top = NULL; + BJNX bnx(g); + + if (!g->Xchk) { + if (CheckMemory(g, initid, args, 1, true)) { + PUSH_WARNING("CheckMemory error"); + *error = 1; + goto err; + } else + bnx.Reset(); + + bvp = bnx.MakeValue(args, 0, true, &top); + + if (bvp->Type == TYPE_NULL) { + PUSH_WARNING("First argument is not a valid JSON item"); + goto err; + } // endif bvp + + if (g->Mrr) { // First argument is a constant + g->Xchk = bvp; + g->More = (size_t)top; + JsonMemSave(g); + } // endif Mrr + + } else { + bvp = (PBVAL)g->Xchk; + top = (PBVAL)g->More; + } // endif Xchk + + // The item to locate + bvp2 = bnx.MakeValue(args, 1, true); + + if (bvp2->Type == TYPE_NULL) { + PUSH_WARNING("Invalid second argument"); + goto err; + } // endif bvp2 + + if (args->arg_count > 2) + mx = (int)*(long long*)args->args[2]; + + if ((path = bnx.LocateAll(g, bvp, bvp2, mx))) { + bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp->Jsp = (PJSON)bnx.ParseJson(g, path, strlen(path)); + } // endif path + + if (initid->const_item) + // Keep result of constant function + g->Activityp = (PACTIVITY)bsp; + + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } catch (const char* msg) { + strcpy(g->Message, msg); + PUSH_WARNING(g->Message); + *error = 1; + path = NULL; + } // end catch + +err: + if (!bsp) { + *res_length = 0; + *is_null = 1; + } else + *res_length = sizeof(BSON); + + return (char*)bsp; +} // end of bbin_locate_all + +void bbin_locate_all_deinit(UDF_INIT* initid) { + JsonFreeMem((PGLOBAL)initid->ptr); +} // end of bbin_locate_all_deinit + diff --git a/storage/connect/bsonudf.h b/storage/connect/bsonudf.h index 251af86a32b..7e743c8a72a 100644 --- a/storage/connect/bsonudf.h +++ b/storage/connect/bsonudf.h @@ -1,7 +1,7 @@ /******************** tabjson H Declares Source Code File (.H) *******************/ /* Name: bsonudf.h Version 1.0 */ /* */ -/* (C) Copyright to the author Olivier BERTRAND 2020 */ +/* (C) Copyright to the author Olivier BERTRAND 2020 - 2021 */ /* */ /* This file contains the BSON UDF function and class declares. */ /*********************************************************************************/ @@ -96,6 +96,7 @@ public: int GetPrecision(void) { return Prec; } PVAL GetValue(void) { return Value; } void SetRow(PBVAL vp) { Row = vp; } + void SetChanged(my_bool b) { Changed = b; } // Methods my_bool SetJpath(PGLOBAL g, char* path, my_bool jb = false); @@ -106,14 +107,16 @@ public: my_bool CheckPath(PGLOBAL g); my_bool CheckPath(PGLOBAL g, UDF_ARGS* args, PBVAL jsp, PBVAL& jvp, int n); my_bool WriteValue(PGLOBAL g, PBVAL jvalp); + my_bool DeleteItem(PGLOBAL g, PBVAL vlp); char *Locate(PGLOBAL g, PBVAL jsp, PBVAL jvp, int k = 1); char *LocateAll(PGLOBAL g, PBVAL jsp, PBVAL jvp, int mx = 10); PSZ MakeKey(UDF_ARGS* args, int i); - PBVAL MakeBinValue(PGLOBAL g, UDF_ARGS* args, uint i); - PBVAL MakeValue(PGLOBAL g, UDF_ARGS* args, uint i, PBVAL* top = NULL); + PBVAL MakeValue(UDF_ARGS* args, uint i, bool b = false, PBVAL* top = NULL); PBVAL MakeTypedValue(PGLOBAL g, UDF_ARGS* args, uint i, JTYP type, PBVAL* top = NULL); PBVAL ParseJsonFile(PGLOBAL g, char* fn, int& pty, size_t& len); + char *MakeResult(UDF_ARGS* args, PBVAL top, uint n = 2); + PBSON MakeBinResult(PGLOBAL g, UDF_ARGS* args, PBVAL top, ulong len, int n = 2); protected: my_bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm); @@ -159,6 +162,7 @@ protected: my_bool Found; // Item found by locate my_bool Wr; // Write mode my_bool Jb; // Must return json item + my_bool Changed; // True when contains was modified }; // end of class BJNX extern "C" { @@ -268,6 +272,10 @@ extern "C" { DllExport char *bson_object_grp(UDF_EXEC_ARGS); DllExport void bson_object_grp_deinit(UDF_INIT*); + DllExport my_bool bson_delete_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bson_delete_item(UDF_EXEC_ARGS); + DllExport void bson_delete_item_deinit(UDF_INIT*); + DllExport my_bool bson_set_item_init(UDF_INIT*, UDF_ARGS*, char*); DllExport char *bson_set_item(UDF_EXEC_ARGS); DllExport void bson_set_item_deinit(UDF_INIT*); @@ -295,4 +303,92 @@ extern "C" { DllExport my_bool bfile_bjson_init(UDF_INIT*, UDF_ARGS*, char*); DllExport char* bfile_bjson(UDF_EXEC_ARGS); DllExport void bfile_bjson_deinit(UDF_INIT*); + + DllExport my_bool bson_serialize_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bson_serialize(UDF_EXEC_ARGS); + DllExport void bson_serialize_deinit(UDF_INIT*); + + DllExport my_bool bbin_make_array_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_make_array(UDF_EXEC_ARGS); + DllExport void bbin_make_array_deinit(UDF_INIT*); + + DllExport my_bool bbin_array_add_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_array_add(UDF_EXEC_ARGS); + DllExport void bbin_array_add_deinit(UDF_INIT*); + + DllExport my_bool bbin_array_add_values_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_array_add_values(UDF_EXEC_ARGS); + DllExport void bbin_array_add_values_deinit(UDF_INIT*); + + DllExport my_bool bbin_array_delete_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_array_delete(UDF_EXEC_ARGS); + DllExport void bbin_array_delete_deinit(UDF_INIT*); + + DllExport my_bool bbin_array_grp_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport void bbin_array_grp_clear(UDF_INIT *, char *, char *); + DllExport void bbin_array_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *); + DllExport char *bbin_array_grp(UDF_EXEC_ARGS); + DllExport void bbin_array_grp_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_grp_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport void bbin_object_grp_clear(UDF_INIT *, char *, char *); + DllExport void bbin_object_grp_add(UDF_INIT *, UDF_ARGS *, char *, char *); + DllExport char *bbin_object_grp(UDF_EXEC_ARGS); + DllExport void bbin_object_grp_deinit(UDF_INIT*); + + DllExport my_bool bbin_make_object_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_make_object(UDF_EXEC_ARGS); + DllExport void bbin_make_object_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_nonull_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_object_nonull(UDF_EXEC_ARGS); + DllExport void bbin_object_nonull_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_key_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_object_key(UDF_EXEC_ARGS); + DllExport void bbin_object_key_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_add_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_object_add(UDF_EXEC_ARGS); + DllExport void bbin_object_add_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_delete_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_object_delete(UDF_EXEC_ARGS); + DllExport void bbin_object_delete_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_list_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_object_list(UDF_EXEC_ARGS); + DllExport void bbin_object_list_deinit(UDF_INIT*); + + DllExport my_bool bbin_object_values_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_object_values(UDF_EXEC_ARGS); + DllExport void bbin_object_values_deinit(UDF_INIT*); + + DllExport my_bool bbin_get_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_get_item(UDF_EXEC_ARGS); + DllExport void bbin_get_item_deinit(UDF_INIT*); + + DllExport my_bool bbin_set_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_set_item(UDF_EXEC_ARGS); + DllExport void bbin_set_item_deinit(UDF_INIT*); + + DllExport my_bool bbin_insert_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_insert_item(UDF_EXEC_ARGS); + DllExport void bbin_insert_item_deinit(UDF_INIT*); + + DllExport my_bool bbin_update_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_update_item(UDF_EXEC_ARGS); + DllExport void bbin_update_item_deinit(UDF_INIT*); + + DllExport my_bool bbin_delete_item_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_delete_item(UDF_EXEC_ARGS); + DllExport void bbin_delete_item_deinit(UDF_INIT*); + + DllExport my_bool bbin_locate_all_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_locate_all(UDF_EXEC_ARGS); + DllExport void bbin_locate_all_deinit(UDF_INIT*); + + DllExport my_bool bbin_file_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char *bbin_file(UDF_EXEC_ARGS); + DllExport void bbin_file_deinit(UDF_INIT*); } // extern "C" diff --git a/storage/connect/global.h b/storage/connect/global.h index d4a46e1c862..8774285e54b 100644 --- a/storage/connect/global.h +++ b/storage/connect/global.h @@ -185,7 +185,7 @@ typedef struct _global { /* Global structure */ size_t Sarea_Size; /* Work area size */ PACTIVITY Activityp; char Message[MAX_STR]; /* Message (result, error, trace) */ - ulong More; /* Used by jsonudf */ + size_t More; /* Used by jsonudf */ size_t Saved_Size; /* Saved work area to_free */ bool Createas; /* To pass multi to ext tables */ void *Xchk; /* indexes in create/alter */ diff --git a/storage/connect/json.cpp b/storage/connect/json.cpp index bcbd71b5031..0152a44fffa 100644 --- a/storage/connect/json.cpp +++ b/storage/connect/json.cpp @@ -333,25 +333,30 @@ bool JOUTSTR::WriteChr(const char c) { /***********************************************************************/ /* Escape and Concatenate a string to the Serialize string. */ /***********************************************************************/ -bool JOUTSTR::Escape(const char* s) { - WriteChr('"'); +bool JOUTSTR::Escape(const char* s) +{ + if (s) { + WriteChr('"'); - for (unsigned int i = 0; s[i]; i++) - switch (s[i]) { - case '"': - case '\\': - case '\t': - case '\n': - case '\r': - case '\b': - case '\f': WriteChr('\\'); - // fall through - default: - WriteChr(s[i]); - break; - } // endswitch s[i] + for (unsigned int i = 0; s[i]; i++) + switch (s[i]) { + case '"': + case '\\': + case '\t': + case '\n': + case '\r': + case '\b': + case '\f': WriteChr('\\'); + // fall through + default: + WriteChr(s[i]); + break; + } // endswitch s[i] + + WriteChr('"'); + } else + WriteStr("null"); - WriteChr('"'); return false; } // end of Escape @@ -360,7 +365,8 @@ bool JOUTSTR::Escape(const char* s) { /***********************************************************************/ /* Write a string to the Serialize file. */ /***********************************************************************/ -bool JOUTFILE::WriteStr(const char* s) { +bool JOUTFILE::WriteStr(const char* s) +{ // This is temporary fputs(s, Stream); return false; @@ -369,7 +375,8 @@ bool JOUTFILE::WriteStr(const char* s) { /***********************************************************************/ /* Write a character to the Serialize file. */ /***********************************************************************/ -bool JOUTFILE::WriteChr(const char c) { +bool JOUTFILE::WriteChr(const char c) +{ // This is temporary fputc(c, Stream); return false; @@ -378,25 +385,30 @@ bool JOUTFILE::WriteChr(const char c) { /***********************************************************************/ /* Escape and Concatenate a string to the Serialize string. */ /***********************************************************************/ -bool JOUTFILE::Escape(const char* s) { +bool JOUTFILE::Escape(const char* s) +{ // This is temporary - fputc('"', Stream); + if (s) { + fputc('"', Stream); - for (unsigned int i = 0; s[i]; i++) - switch (s[i]) { - case '"': fputs("\\\"", Stream); break; - case '\\': fputs("\\\\", Stream); break; - case '\t': fputs("\\t", Stream); break; - case '\n': fputs("\\n", Stream); break; - case '\r': fputs("\\r", Stream); break; - case '\b': fputs("\\b", Stream); break; - case '\f': fputs("\\f", Stream); break; - default: - fputc(s[i], Stream); - break; - } // endswitch s[i] + for (unsigned int i = 0; s[i]; i++) + switch (s[i]) { + case '"': fputs("\\\"", Stream); break; + case '\\': fputs("\\\\", Stream); break; + case '\t': fputs("\\t", Stream); break; + case '\n': fputs("\\n", Stream); break; + case '\r': fputs("\\r", Stream); break; + case '\b': fputs("\\b", Stream); break; + case '\f': fputs("\\f", Stream); break; + default: + fputc(s[i], Stream); + break; + } // endswitch s[i] + + fputc('"', Stream); + } else + fputs("null", Stream); - fputc('"', Stream); return false; } // end of Escape @@ -405,7 +417,8 @@ bool JOUTFILE::Escape(const char* s) { /***********************************************************************/ /* Write a string to the Serialize pretty file. */ /***********************************************************************/ -bool JOUTPRT::WriteStr(const char* s) { +bool JOUTPRT::WriteStr(const char* s) +{ // This is temporary if (B) { fputs(EL, Stream); @@ -424,7 +437,8 @@ bool JOUTPRT::WriteStr(const char* s) { /***********************************************************************/ /* Write a character to the Serialize pretty file. */ /***********************************************************************/ -bool JOUTPRT::WriteChr(const char c) { +bool JOUTPRT::WriteChr(const char c) +{ switch (c) { case ':': fputs(": ", Stream); diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 3d6de7ab3d5..53818cbe00b 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -1155,7 +1155,7 @@ PBSON JbinAlloc(PGLOBAL g, UDF_ARGS *args, ulong len, PJSON jsp) /*********************************************************************************/ /* Set the BSON chain as changed. */ /*********************************************************************************/ -static void SetChanged(PBSON bsp) +void SetChanged(PBSON bsp) { if (bsp->Bsp) SetChanged(bsp->Bsp); diff --git a/storage/connect/mysql-test/connect/disabled.def b/storage/connect/mysql-test/connect/disabled.def index e1f6219f89f..5107de7a930 100644 --- a/storage/connect/mysql-test/connect/disabled.def +++ b/storage/connect/mysql-test/connect/disabled.def @@ -16,9 +16,12 @@ jdbc_postgresql : Variable settings depend on machine configuration json_mongo_c : Need MongoDB running and its C Driver installed json_java_2 : Need MongoDB running and its Java Driver installed json_java_3 : Need MongoDB running and its Java Driver installed +bson_mongo_c : Need MongoDB running and its C Driver installed +bson_java_2 : Need MongoDB running and its Java Driver installed +bson_java_3 : Need MongoDB running and its Java Driver installed mongo_c : Need MongoDB running and its C Driver installed mongo_java_2 : Need MongoDB running and its Java Driver installed mongo_java_3 : Need MongoDB running and its Java Driver installed tbl_thread : Bug MDEV-9844,10179,14214 03/01/2018 OB Option THREAD removed -bson : Development +#bson : Development #vcol : Different error code on different versions diff --git a/storage/connect/mysql-test/connect/r/bson.result b/storage/connect/mysql-test/connect/r/bson.result new file mode 100644 index 00000000000..fd15e020aac --- /dev/null +++ b/storage/connect/mysql-test/connect/r/bson.result @@ -0,0 +1,517 @@ +# +# Testing doc samples +# +CREATE TABLE t1 +( +ISBN CHAR(15), +LANG CHAR(2), +SUBJECT CHAR(32), +AUTHOR CHAR(64), +TITLE CHAR(32), +TRANSLATION CHAR(32), +TRANSLATOR CHAR(80), +PUBLISHER CHAR(32), +DATEPUB int(4) +) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +ISBN LANG SUBJECT AUTHOR TITLE TRANSLATION TRANSLATOR PUBLISHER DATEPUB +9782212090819 fr applications Jean-Christophe Bernadac, Franois Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 +DROP TABLE t1; +# +# Testing Jpath. Get the number of authors +# +CREATE TABLE t1 +( +ISBN CHAR(15), +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +Authors INT(2) JPATH='$.AUTHOR[#]', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +ISBN Language Subject Authors Title Translation Translator Publisher Location Year +9782212090819 fr applications 2 Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications 1 XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 +DROP TABLE t1; +# +# Concatenates the authors +# +CREATE TABLE t1 +( +ISBN CHAR(15), +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year +9782212090819 fr applications Jean-Christophe and Franois Bernadac and Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 +DROP TABLE t1; +# +# Testing expanding authors +# +CREATE TABLE t1 +( +ISBN CHAR(15), +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATION', +Translator CHAR(80) JPATH='$.TRANSLATOR', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year +9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications Franois Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 +UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab'; +SELECT * FROM t1 WHERE ISBN = '9782212090819'; +ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year +9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +# +# To add an author a new table must be created +# +CREATE TABLE t2 ( +FIRSTNAME CHAR(32), +LASTNAME CHAR(32)) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json' OPTION_LIST='Object=$[1].AUTHOR'; +SELECT * FROM t2; +FIRSTNAME LASTNAME +William J. Pardi +INSERT INTO t2 VALUES('Charles','Dickens'); +SELECT * FROM t1; +ISBN Language Subject AuthorFN AuthorLN Title Translation Translator Publisher Location Year +9782212090819 fr applications Jean-Christophe Bernadac Construire une application XML NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications Philippe Knab Construire une application XML NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 +9782840825685 fr applications Charles Dickens XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 1999 +DROP TABLE t1; +DROP TABLE t2; +# +# Check the biblio file has the good format +# +CREATE TABLE t1 +( +line char(255) +) +ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json'; +SELECT * FROM t1; +line +[ + { + "ISBN": "9782212090819", + "LANG": "fr", + "SUBJECT": "applications", + "AUTHOR": [ + { + "FIRSTNAME": "Jean-Christophe", + "LASTNAME": "Bernadac" + }, + { + "FIRSTNAME": "Philippe", + "LASTNAME": "Knab" + } + ], + "TITLE": "Construire une application XML", + "PUBLISHER": { + "NAME": "Eyrolles", + "PLACE": "Paris" + }, + "DATEPUB": 1999 + }, + { + "ISBN": "9782840825685", + "LANG": "fr", + "SUBJECT": "applications", + "AUTHOR": [ + { + "FIRSTNAME": "William J.", + "LASTNAME": "Pardi" + }, + { + "FIRSTNAME": "Charles", + "LASTNAME": "Dickens" + } + ], + "TITLE": "XML en Action", + "TRANSLATION": "adapt de l'anglais par", + "TRANSLATOR": { + "FIRSTNAME": "James", + "LASTNAME": "Guerin" + }, + "PUBLISHER": { + "NAME": "Microsoft Press", + "PLACE": "Paris" + }, + "DATEPUB": 1999 + } +] +DROP TABLE t1; +# +# Testing a pretty=0 file +# +CREATE TABLE t1 +( +ISBN CHAR(15) NOT NULL, +Language CHAR(2) JPATH='$.LANG', +Subject CHAR(32) JPATH='$.SUBJECT', +AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', +AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', +Title CHAR(32) JPATH='$.TITLE', +Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX', +TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME', +TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME', +Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', +Location CHAR(16) JPATH='$.PUBLISHER.PLACE', +Year int(4) JPATH='$.DATEPUB', +INDEX IX(ISBN) +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0'; +SHOW INDEX FROM t1; +Table Non_unique Key_name Seq_in_index Column_name Collation Cardinality Sub_part Packed Null Index_type Comment Index_comment +t1 1 IX 1 ISBN A NULL NULL NULL XINDEX +SELECT * FROM t1; +ISBN Language Subject AuthorFN AuthorLN Title Translation TranslatorFN TranslatorLN Publisher Location Year +9782212090819 fr applications Jean-Michel Bernadac Construire une application XML NULL NULL NULL Eyrolles Paris 1999 +9782212090819 fr applications Franois Knab Construire une application XML NULL NULL NULL Eyrolles Paris 1999 +9782840825685 fr applications William J. Pardi XML en Action adapt de l'anglais par James Guerin Microsoft Press Paris 2001 +DESCRIBE SELECT * FROM t1 WHERE ISBN = '9782212090819'; +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t1 ref IX IX 15 const 1 Using where +UPDATE t1 SET AuthorFN = 'Philippe' WHERE ISBN = '9782212090819'; +ERROR HY000: Got error 122 'Cannot write expanded column when Pretty is not 2' from CONNECT +DROP TABLE t1; +# +# A file with 2 arrays +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +WHO WEEK WHAT AMOUNT +Joe 3 Beer+Food+Food+Car 69.00 +Joe 4 Beer+Beer+Food+Food+Beer 83.00 +Joe 5 Beer+Food 26.00 +Beth 3 Beer 16.00 +Beth 4 Food+Beer 32.00 +Beth 5 Food+Beer 32.00 +Janet 3 Car+Food+Beer 55.00 +Janet 4 Car 17.00 +Janet 5 Beer+Car+Beer+Food 57.00 +DROP TABLE t1; +# +# Now it can be fully expanded +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 3 Beer 16.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +Janet 4 Car 17.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +DROP TABLE t1; +# +# A table showing many calculated results +# +CREATE TABLE t1 ( +WHO CHAR(12) NOT NULL, +WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER', +SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT', +SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT', +AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT', +SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT', +AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT', +AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT', +AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +WHO WEEKS SUMS SUM AVGS SUMAVG AVGSUM AVGAVG AVERAGE +Joe 3, 4, 5 69.00+83.00+26.00 178.00 17.25+16.60+13.00 46.85 59.33 15.62 16.18 +Beth 3, 4, 5 16.00+32.00+32.00 80.00 16.00+16.00+16.00 48.00 26.67 16.00 16.00 +Janet 3, 4, 5 55.00+17.00+57.00 129.00 18.33+17.00+14.25 49.58 43.00 16.53 16.12 +DROP TABLE t1; +# +# Expand expense in 3 one week tables +# +CREATE TABLE t2 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[0].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t2; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Beth 3 Beer 16.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +CREATE TABLE t3 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[1].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t3; +WHO WEEK WHAT AMOUNT +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +CREATE TABLE t4 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[2].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t4; +WHO WEEK WHAT AMOUNT +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +# +# The expanded table is made as a TBL table +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32), +AMOUNT DOUBLE(8,2)) +ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4'; +SELECT * FROM t1; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Beth 3 Beer 16.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +DROP TABLE t1, t2, t3, t4; +# +# Three partial JSON tables +# +CREATE TABLE t2 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp3.json'; +SELECT * FROM t2; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Beth 3 Beer 16.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +CREATE TABLE t3 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp4.json'; +SELECT * FROM t3; +WHO WEEK WHAT AMOUNT +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +CREATE TABLE t4 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp5.json'; +SELECT * FROM t4; +WHO WEEK WHAT AMOUNT +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +# +# The complete table can be a multiple JSON table +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp*.json' MULTIPLE=1; +SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT; +WHO WEEK WHAT AMOUNT +Beth 3 Beer 16.00 +Beth 4 Beer 15.00 +Beth 4 Food 17.00 +Beth 5 Beer 20.00 +Beth 5 Food 12.00 +Janet 3 Beer 18.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 4 Car 17.00 +Janet 5 Beer 14.00 +Janet 5 Beer 19.00 +Janet 5 Car 12.00 +Janet 5 Food 12.00 +Joe 3 Beer 18.00 +Joe 3 Car 20.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 4 Beer 14.00 +Joe 4 Beer 16.00 +Joe 4 Beer 19.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +DROP TABLE t1; +# +# Or also a partition JSON table +# +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp%s.json'; +ALTER TABLE t1 +PARTITION BY LIST COLUMNS(WEEK) ( +PARTITION `3` VALUES IN(3), +PARTITION `4` VALUES IN(4), +PARTITION `5` VALUES IN(5)); +Warnings: +Warning 1105 Data repartition in 3 is unchecked +Warning 1105 Data repartition in 4 is unchecked +Warning 1105 Data repartition in 5 is unchecked +SHOW WARNINGS; +Level Code Message +Warning 1105 Data repartition in 3 is unchecked +Warning 1105 Data repartition in 4 is unchecked +Warning 1105 Data repartition in 5 is unchecked +SELECT * FROM t1; +WHO WEEK WHAT AMOUNT +Joe 3 Beer 18.00 +Joe 3 Food 12.00 +Joe 3 Food 19.00 +Joe 3 Car 20.00 +Beth 3 Beer 16.00 +Janet 3 Car 19.00 +Janet 3 Food 18.00 +Janet 3 Beer 18.00 +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +Joe 5 Beer 14.00 +Joe 5 Food 12.00 +Beth 5 Food 12.00 +Beth 5 Beer 20.00 +Janet 5 Beer 14.00 +Janet 5 Car 12.00 +Janet 5 Beer 19.00 +Janet 5 Food 12.00 +SELECT * FROM t1 WHERE WEEK = 4; +WHO WEEK WHAT AMOUNT +Joe 4 Beer 19.00 +Joe 4 Beer 16.00 +Joe 4 Food 17.00 +Joe 4 Food 17.00 +Joe 4 Beer 14.00 +Beth 4 Food 17.00 +Beth 4 Beer 15.00 +Janet 4 Car 17.00 +DROP TABLE t1, t2, t3, t4; diff --git a/storage/connect/mysql-test/connect/r/bson_java_2.result b/storage/connect/mysql-test/connect/r/bson_java_2.result new file mode 100644 index 00000000000..1c21fc7c54f --- /dev/null +++ b/storage/connect/mysql-test/connect/r/bson_java_2.result @@ -0,0 +1,385 @@ +set connect_enable_mongo=1; +set connect_json_all_path=0; +# +# Test the MONGO table type +# +CREATE TABLE t1 (Document varchar(1024) JPATH='*') +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096 +OPTION_LIST='Driver=Java,Version=2' DATA_CHARSET=utf8; +SELECT * from t1 limit 3; +Document +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51c"},"address":{"building":"1007","coord":[-73.856077,40.848447],"street":"Morris Park Ave","zipcode":"10462"},"borough":"Bronx","cuisine":"Bakery","grades":[{"date":{"$date":"2014-03-03T00:00:00.000Z"},"grade":"A","score":2},{"date":{"$date":"2013-09-11T00:00:00.000Z"},"grade":"A","score":6},{"date":{"$date":"2013-01-24T00:00:00.000Z"},"grade":"A","score":10},{"date":{"$date":"2011-11-23T00:00:00.000Z"},"grade":"A","score":9},{"date":{"$date":"2011-03-10T00:00:00.000Z"},"grade":"B","score":14}],"name":"Morris Park Bake Shop","restaurant_id":"30075445"} +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51d"},"address":{"building":"469","coord":[-73.961704,40.662942],"street":"Flatbush Avenue","zipcode":"11225"},"borough":"Brooklyn","cuisine":"Hamburgers","grades":[{"date":{"$date":"2014-12-30T00:00:00.000Z"},"grade":"A","score":8},{"date":{"$date":"2014-07-01T00:00:00.000Z"},"grade":"B","score":23},{"date":{"$date":"2013-04-30T00:00:00.000Z"},"grade":"A","score":12},{"date":{"$date":"2012-05-08T00:00:00.000Z"},"grade":"A","score":12}],"name":"Wendy'S","restaurant_id":"30112340"} +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51e"},"address":{"building":"351","coord":[-73.98513559999999,40.7676919],"street":"West 57 Street","zipcode":"10019"},"borough":"Manhattan","cuisine":"Irish","grades":[{"date":{"$date":"2014-09-06T00:00:00.000Z"},"grade":"A","score":2},{"date":{"$date":"2013-07-22T00:00:00.000Z"},"grade":"A","score":11},{"date":{"$date":"2012-07-31T00:00:00.000Z"},"grade":"A","score":12},{"date":{"$date":"2011-12-29T00:00:00.000Z"},"grade":"A","score":12}],"name":"Dj Reynolds Pub And Restaurant","restaurant_id":"30191841"} +DROP TABLE t1; +# +# Test catfunc +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CATFUNC=columns +OPTION_LIST='Depth=1,Driver=Java,Version=2' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * from t1; +Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath +_id 1 CHAR 24 24 0 0 _id +address_building 1 CHAR 10 10 0 0 address.building +address_coord 1 CHAR 1024 1024 0 1 address.coord +address_street 1 CHAR 38 38 0 0 address.street +address_zipcode 1 CHAR 5 5 0 0 address.zipcode +borough 1 CHAR 13 13 0 0 +cuisine 1 CHAR 64 64 0 0 +grades_date 1 CHAR 1024 1024 0 1 grades.0.date +grades_grade 1 CHAR 14 14 0 1 grades.0.grade +grades_score 7 INTEGER 2 2 0 1 grades.0.score +name 1 CHAR 98 98 0 0 +restaurant_id 1 CHAR 8 8 0 0 +DROP TABLE t1; +# +# Explicit columns +# +CREATE TABLE t1 ( +_id VARCHAR(24) NOT NULL, +name VARCHAR(255) NOT NULL, +cuisine VARCHAR(255) NOT NULL, +borough VARCHAR(255) NOT NULL, +restaurant_id VARCHAR(255) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8 +OPTION_LIST='Driver=Java,Version=2'; +SELECT * FROM t1 LIMIT 10; +_id name cuisine borough restaurant_id +58ada47de5a51ddfcd5ed51c Morris Park Bake Shop Bakery Bronx 30075445 +58ada47de5a51ddfcd5ed51d Wendy'S Hamburgers Brooklyn 30112340 +58ada47de5a51ddfcd5ed51e Dj Reynolds Pub And Restaurant Irish Manhattan 30191841 +58ada47de5a51ddfcd5ed51f Riviera Caterer American Brooklyn 40356018 +58ada47de5a51ddfcd5ed520 Tov Kosher Kitchen Jewish/Kosher Queens 40356068 +58ada47de5a51ddfcd5ed521 Brunos On The Boulevard American Queens 40356151 +58ada47de5a51ddfcd5ed522 Kosher Island Jewish/Kosher Staten Island 40356442 +58ada47de5a51ddfcd5ed523 Wilken'S Fine Food Delicatessen Brooklyn 40356483 +58ada47de5a51ddfcd5ed524 Regina Caterers American Brooklyn 40356649 +58ada47de5a51ddfcd5ed525 Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn 40356731 +DROP TABLE t1; +# +# Test discovery +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +OPTION_LIST='Depth=1,Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `_id` char(24) NOT NULL `JPATH`='_id', + `address_building` char(10) NOT NULL `JPATH`='address.building', + `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord', + `address_street` char(38) NOT NULL `JPATH`='address.street', + `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode', + `borough` char(13) NOT NULL, + `cuisine` char(64) NOT NULL, + `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date', + `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', + `name` char(98) NOT NULL, + `restaurant_id` char(8) NOT NULL +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=2' `DATA_CHARSET`='utf8' `LRECL`=4096 +SELECT * FROM t1 LIMIT 5; +_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id +58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 2014-03-03T00:00:00.000Z A 2 Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 2014-12-30T00:00:00.000Z A 8 Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 2014-09-06T00:00:00.000Z A 2 Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 2014-06-10T00:00:00.000Z A 5 Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 2014-11-24T00:00:00.000Z Z 20 Tov Kosher Kitchen 40356068 +DROP TABLE t1; +# +# Dropping a column +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8 +COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=2,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1 LIMIT 10; +_id address borough cuisine name restaurant_id +58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 +58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 +58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 +58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 +58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 +DROP TABLE t1; +# +# Specifying Jpath +# +CREATE TABLE t1 ( +_id VARCHAR(24) NOT NULL, +name VARCHAR(64) NOT NULL, +cuisine CHAR(200) NOT NULL, +borough CHAR(16) NOT NULL, +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', +restaurant_id VARCHAR(255) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8 +OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1 LIMIT 1; +_id 58ada47de5a51ddfcd5ed51c +name Morris Park Bake Shop +cuisine Bakery +borough Bronx +street Morris Park Ave +building 1007 +zipcode 10462 +grade A +score 2 +date 1970-01-01 +restaurant_id 30075445 +SELECT name, street, score, date FROM t1 LIMIT 5; +name street score date +Morris Park Bake Shop Morris Park Ave 2 1970-01-01 +Wendy'S Flatbush Avenue 8 1970-01-01 +Dj Reynolds Pub And Restaurant West 57 Street 2 1970-01-01 +Riviera Caterer Stillwell Avenue 5 1970-01-01 +Tov Kosher Kitchen 63 Road 20 1970-01-01 +SELECT name, cuisine, borough FROM t1 WHERE grade = 'A' LIMIT 10; +name cuisine borough +Morris Park Bake Shop Bakery Bronx +Wendy'S Hamburgers Brooklyn +Dj Reynolds Pub And Restaurant Irish Manhattan +Riviera Caterer American Brooklyn +Kosher Island Jewish/Kosher Staten Island +Wilken'S Fine Food Delicatessen Brooklyn +Regina Caterers American Brooklyn +Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn +Wild Asia American Bronx +C & C Catering Service American Brooklyn +SELECT COUNT(*) FROM t1 WHERE grade = 'A'; +COUNT(*) +20687 +SELECT * FROM t1 WHERE cuisine = 'English'; +_id name cuisine borough street building zipcode grade score date restaurant_id +58ada47de5a51ddfcd5ed83d Tea And Sympathy English Manhattan Greenwich Avenue 108 10011 A 8 1970-01-01 40391531 +58ada47de5a51ddfcd5ed85c Tartine English Manhattan West 11 Street 253 10014 A 11 1970-01-01 40392496 +58ada47de5a51ddfcd5ee1f3 The Park Slope Chipshop English Brooklyn 5 Avenue 383 11215 B 17 1970-01-01 40816202 +58ada47de5a51ddfcd5ee7e4 Pound And Pence English Manhattan Liberty Street 55 10005 A 7 1970-01-01 41022701 +58ada47de5a51ddfcd5ee999 Chip Shop English Brooklyn Atlantic Avenue 129 11201 A 9 1970-01-01 41076583 +58ada47ee5a51ddfcd5efe3f The Breslin Bar & Dining Room English Manhattan West 29 Street 16 10001 A 13 1970-01-01 41443706 +58ada47ee5a51ddfcd5efe99 Highlands Restaurant English Manhattan West 10 Street 150 10014 A 12 1970-01-01 41448559 +58ada47ee5a51ddfcd5f0413 The Fat Radish English Manhattan Orchard Street 17 10002 A 12 1970-01-01 41513545 +58ada47ee5a51ddfcd5f0777 Jones Wood Foundry English Manhattan East 76 Street 401 10021 A 12 1970-01-01 41557377 +58ada47ee5a51ddfcd5f0ea2 Whitehall English Manhattan Greenwich Avenue 19 10014 Z 15 1970-01-01 41625263 +58ada47ee5a51ddfcd5f1004 The Churchill Tavern English Manhattan East 28 Street 45 10016 A 13 1970-01-01 41633327 +58ada47ee5a51ddfcd5f13d5 The Monro English Brooklyn 5 Avenue 481 11215 A 7 1970-01-01 41660253 +58ada47ee5a51ddfcd5f1454 The Cock & Bull English Manhattan West 45 Street 23 10036 A 7 1970-01-01 41664704 +58ada47ee5a51ddfcd5f176e Dear Bushwick English Brooklyn Wilson Avenue 41 11237 A 12 1970-01-01 41690534 +58ada47ee5a51ddfcd5f1e91 Snowdonia Pub English Queens 32 Street 34-55 11106 A 12 1970-01-01 50000290 +58ada47ee5a51ddfcd5f2ddc Oscar'S Place English Manhattan Hudson Street 466 10014 A 10 1970-01-01 50011097 +SELECT * FROM t1 WHERE score = building; +_id name cuisine borough street building zipcode grade score date restaurant_id +DROP TABLE t1; +# +# Specifying Filter +# +CREATE TABLE t1 ( +_id CHAR(24) NOT NULL, +name CHAR(64) NOT NULL, +borough CHAR(16) NOT NULL, +restaurant_id CHAR(8) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8 +FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' +OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT name FROM t1 WHERE borough = 'Queens'; +name +La Baraka Restaurant +Air France Lounge +Tournesol +Winegasm +Cafe Henri +Bistro 33 +Domaine Wine Bar +Cafe Triskell +Cannelle Patisserie +La Vie +Dirty Pierres Bistro +Fresca La Crepe +Bliss 46 Bistro +Bear +Cuisine By Claudette +Paris Baguette +The Baroness Bar +Francis Cafe +Madame Sou Sou +Crepe 'N' Tearia +Aperitif Bayside Llc +DROP TABLE t1; +# +# Testing pipeline +# +CREATE TABLE t1 ( +name VARCHAR(64) NOT NULL, +borough CHAR(16) NOT NULL, +date DATETIME NOT NULL, +grade CHAR(1) NOT NULL, +score INT(4) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8 +COLIST='{"pipeline":[{"$match":{"cuisine":"French"}},{"$unwind":"$grades"},{"$project":{"_id":0,"name":1,"borough":1,"date":"$grades.date","grade":"$grades.grade","score":"$grades.score"}}]}' +OPTION_LIST='Driver=Java,Version=2,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1 LIMIT 10; +name borough date grade score +Tout Va Bien Manhattan 1970-01-01 01:33:34 B 15 +Tout Va Bien Manhattan 1970-01-01 01:33:34 A 13 +Tout Va Bien Manhattan 1970-01-01 01:33:33 C 36 +Tout Va Bien Manhattan 1970-01-01 01:33:33 B 22 +Tout Va Bien Manhattan 1970-01-01 01:33:32 C 36 +Tout Va Bien Manhattan 1970-01-01 01:33:32 C 7 +La Grenouille Manhattan 1970-01-01 01:33:34 A 10 +La Grenouille Manhattan 1970-01-01 01:33:33 A 9 +La Grenouille Manhattan 1970-01-01 01:33:32 A 13 +Le Perigord Manhattan 1970-01-01 01:33:34 B 14 +SELECT name, grade, score, date FROM t1 WHERE borough = 'Bronx'; +name grade score date +Bistro Sk A 10 1970-01-01 01:33:34 +Bistro Sk A 12 1970-01-01 01:33:34 +Bistro Sk B 18 1970-01-01 01:33:33 +DROP TABLE t1; +# +# try level 2 discovery +# +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' +COLIST='{"cuisine":0}' CONNECTION='mongodb://localhost:27017' LRECL=4096 +OPTION_LIST='Driver=Java,level=2,version=2'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `_id` char(24) NOT NULL `JPATH`='_id', + `address_building` char(10) NOT NULL `JPATH`='address.building', + `address_coord` double(18,16) DEFAULT NULL `JPATH`='address.coord.0', + `address_street` char(38) NOT NULL `JPATH`='address.street', + `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode', + `borough` char(13) NOT NULL, + `grades_date` char(24) DEFAULT NULL `JPATH`='grades.0.date', + `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', + `name` char(98) NOT NULL, + `restaurant_id` char(8) NOT NULL +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=2' `LRECL`=4096 +SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B'; +name borough address_street score +Le Gamin Brooklyn Vanderbilt Avenue 24 +Bistro 33 Queens Ditmars Boulevard 15 +Dirty Pierres Bistro Queens Station Square 22 +Santos Anne Brooklyn Union Avenue 26 +Le Paddock Brooklyn Prospect Avenue 17 +La Crepe Et La Vie Brooklyn Foster Avenue 24 +Francis Cafe Queens Ditmars Boulevard 19 +DROP TABLE t1; +# +# try CRUD operations +# +false +CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64)) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' +OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096; +DELETE FROM t1; +INSERT INTO t1 VALUES(0,NULL),(1,'One'),(2,'Two'),(3,'Three'); +SELECT * FROM t1; +_id msg +0 NULL +1 One +2 Two +3 Three +UPDATE t1 SET msg = 'Deux' WHERE _id = 2; +DELETE FROM t1 WHERE msg IS NULL; +SELECT * FROM t1; +_id msg +1 One +2 Deux +3 Three +DELETE FROM t1; +DROP TABLE t1; +true +# +# List states whose population is equal or more than 10 millions +# +false +CREATE TABLE t1 ( +_id char(5) NOT NULL, +city char(16) NOT NULL, +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', +pop int(11) NOT NULL, +state char(2) NOT NULL) +ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=BSON TABNAME='cities' +OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET='utf8'; +# Using SQL for grouping +SELECT state, sum(pop) AS totalPop FROM t1 GROUP BY state HAVING totalPop >= 10000000 ORDER BY totalPop DESC; +state totalPop +CA 29754890 +NY 17990402 +TX 16984601 +FL 12686644 +PA 11881643 +IL 11427576 +OH 10846517 +DROP TABLE t1; +# Using a pipeline for grouping +CREATE TABLE t1 (_id CHAR(2) NOT NULL, totalPop INT(11) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='cities' DATA_CHARSET=utf8 +COLIST='{"pipeline":[{"$group":{"_id":"$state","totalPop":{"$sum":"$pop"}}},{"$match":{"totalPop":{"$gte":10000000}}},{"$sort":{"totalPop":-1}}]}' +OPTION_LIST='Driver=Java,Version=2,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1; +_id totalPop +CA 29754890 +NY 17990402 +TX 16984601 +FL 12686644 +PA 11881643 +IL 11427576 +OH 10846517 +DROP TABLE t1; +true +# +# Test making array +# +CREATE TABLE t1 ( +_id int(4) NOT NULL, +item CHAR(8) NOT NULL, +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' DATA_CHARSET=utf8 +OPTION_LIST='Driver=Java,Version=2' CONNECTION='mongodb://localhost:27017' LRECL=4096; +INSERT INTO t1 VALUES +(1,'journal',87,45,63,12,78), +(2,'notebook',123,456,789,NULL,NULL), +(3,'paper',5,7,3,8,NULL), +(4,'planner',25,71,NULL,44,27), +(5,'postcard',5,7,3,8,NULL); +SELECT * FROM t1; +_id item prices_0 prices_1 prices_2 prices_3 prices_4 +1 journal 87 45 63 12 78 +2 notebook 123 456 789 NULL NULL +3 paper 5 7 3 8 NULL +4 planner 25 71 NULL 44 27 +5 postcard 5 7 3 8 NULL +DROP TABLE t1; +# +# Test array aggregation +# +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' +COLIST='{"pipeline":[{"$project":{"_id":0,"item":1,"total":{"$sum":"$prices"},"average":{"$avg":"$prices"}}}]}' +OPTION_LIST='Driver=Java,Version=2,Pipeline=YES' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1; +item total average +journal 285 57.00 +notebook 1368 456.00 +paper 23 5.75 +planner 167 41.75 +postcard 23 5.75 +DROP TABLE t1; +true +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/r/bson_java_3.result b/storage/connect/mysql-test/connect/r/bson_java_3.result new file mode 100644 index 00000000000..d198ee3faa4 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/bson_java_3.result @@ -0,0 +1,385 @@ +set connect_enable_mongo=1; +set connect_json_all_path=0; +# +# Test the MONGO table type +# +CREATE TABLE t1 (Document varchar(1024) JPATH='*') +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=4096 +OPTION_LIST='Driver=Java,Version=3' DATA_CHARSET=utf8; +SELECT * from t1 limit 3; +Document +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51c"},"address":{"building":"1007","coord":[-73.856077,40.848447],"street":"Morris Park Ave","zipcode":"10462"},"borough":"Bronx","cuisine":"Bakery","grades":[{"date":{"$date":1393804800000},"grade":"A","score":2},{"date":{"$date":1378857600000},"grade":"A","score":6},{"date":{"$date":1358985600000},"grade":"A","score":10},{"date":{"$date":1322006400000},"grade":"A","score":9},{"date":{"$date":1299715200000},"grade":"B","score":14}],"name":"Morris Park Bake Shop","restaurant_id":"30075445"} +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51d"},"address":{"building":"469","coord":[-73.961704,40.662942],"street":"Flatbush Avenue","zipcode":"11225"},"borough":"Brooklyn","cuisine":"Hamburgers","grades":[{"date":{"$date":1419897600000},"grade":"A","score":8},{"date":{"$date":1404172800000},"grade":"B","score":23},{"date":{"$date":1367280000000},"grade":"A","score":12},{"date":{"$date":1336435200000},"grade":"A","score":12}],"name":"Wendy'S","restaurant_id":"30112340"} +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51e"},"address":{"building":"351","coord":[-73.98513559999999,40.7676919],"street":"West 57 Street","zipcode":"10019"},"borough":"Manhattan","cuisine":"Irish","grades":[{"date":{"$date":1409961600000},"grade":"A","score":2},{"date":{"$date":1374451200000},"grade":"A","score":11},{"date":{"$date":1343692800000},"grade":"A","score":12},{"date":{"$date":1325116800000},"grade":"A","score":12}],"name":"Dj Reynolds Pub And Restaurant","restaurant_id":"30191841"} +DROP TABLE t1; +# +# Test catfunc +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CATFUNC=columns +OPTION_LIST='Depth=1,Driver=Java,Version=3' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * from t1; +Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath +_id 1 CHAR 24 24 0 0 _id +address_building 1 CHAR 10 10 0 0 address.building +address_coord 1 CHAR 1024 1024 0 1 address.coord +address_street 1 CHAR 38 38 0 0 address.street +address_zipcode 1 CHAR 5 5 0 0 address.zipcode +borough 1 CHAR 13 13 0 0 +cuisine 1 CHAR 64 64 0 0 +grades_date 1 CHAR 1024 1024 0 1 grades.0.date +grades_grade 1 CHAR 14 14 0 1 grades.0.grade +grades_score 7 INTEGER 2 2 0 1 grades.0.score +name 1 CHAR 98 98 0 0 +restaurant_id 1 CHAR 8 8 0 0 +DROP TABLE t1; +# +# Explicit columns +# +CREATE TABLE t1 ( +_id VARCHAR(24) NOT NULL, +name VARCHAR(255) NOT NULL, +cuisine VARCHAR(255) NOT NULL, +borough VARCHAR(255) NOT NULL, +restaurant_id VARCHAR(255) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8 +OPTION_LIST='Driver=Java,Version=3'; +SELECT * FROM t1 LIMIT 10; +_id name cuisine borough restaurant_id +58ada47de5a51ddfcd5ed51c Morris Park Bake Shop Bakery Bronx 30075445 +58ada47de5a51ddfcd5ed51d Wendy'S Hamburgers Brooklyn 30112340 +58ada47de5a51ddfcd5ed51e Dj Reynolds Pub And Restaurant Irish Manhattan 30191841 +58ada47de5a51ddfcd5ed51f Riviera Caterer American Brooklyn 40356018 +58ada47de5a51ddfcd5ed520 Tov Kosher Kitchen Jewish/Kosher Queens 40356068 +58ada47de5a51ddfcd5ed521 Brunos On The Boulevard American Queens 40356151 +58ada47de5a51ddfcd5ed522 Kosher Island Jewish/Kosher Staten Island 40356442 +58ada47de5a51ddfcd5ed523 Wilken'S Fine Food Delicatessen Brooklyn 40356483 +58ada47de5a51ddfcd5ed524 Regina Caterers American Brooklyn 40356649 +58ada47de5a51ddfcd5ed525 Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn 40356731 +DROP TABLE t1; +# +# Test discovery +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +OPTION_LIST='Depth=1,Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET=utf8; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `_id` char(24) NOT NULL `JPATH`='_id', + `address_building` char(10) NOT NULL `JPATH`='address.building', + `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord', + `address_street` char(38) NOT NULL `JPATH`='address.street', + `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode', + `borough` char(13) NOT NULL, + `cuisine` char(64) NOT NULL, + `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date', + `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', + `name` char(98) NOT NULL, + `restaurant_id` char(8) NOT NULL +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=Java,Version=3' `DATA_CHARSET`='utf8' `LRECL`=4096 +SELECT * FROM t1 LIMIT 5; +_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id +58ada47de5a51ddfcd5ed51c 1007 -73.856077, 40.848447 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 -73.961704, 40.662942 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 -73.98513559999999, 40.7676919 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 -73.98241999999999, 40.579505 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 -73.8601152, 40.7311739 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068 +DROP TABLE t1; +# +# Dropping a column +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8 +COLIST='{"grades":0}' OPTION_LIST='Driver=Java,Version=3,level=0' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1 LIMIT 10; +_id address borough cuisine name restaurant_id +58ada47de5a51ddfcd5ed51c 1007 (-73.856077, 40.848447) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 (-73.961704, 40.662942) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 (-73.98513559999999, 40.7676919) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 (-73.98241999999999, 40.579505) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 (-73.8601152, 40.7311739) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed521 8825 (-73.8803827, 40.7643124) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 +58ada47de5a51ddfcd5ed522 2206 (-74.1377286, 40.6119572) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 +58ada47de5a51ddfcd5ed523 7114 (-73.9068506, 40.6199034) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 +58ada47de5a51ddfcd5ed524 6409 (-74.00528899999999, 40.628886) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 +58ada47de5a51ddfcd5ed525 1839 (-73.9482609, 40.6408271) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 +DROP TABLE t1; +# +# Specifying Jpath +# +CREATE TABLE t1 ( +_id VARCHAR(24) NOT NULL, +name VARCHAR(64) NOT NULL, +cuisine CHAR(200) NOT NULL, +borough CHAR(16) NOT NULL, +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', +restaurant_id VARCHAR(255) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8 +OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1 LIMIT 1; +_id 58ada47de5a51ddfcd5ed51c +name Morris Park Bake Shop +cuisine Bakery +borough Bronx +street Morris Park Ave +building 1007 +zipcode 10462 +grade A +score 2 +date 2014-03-03 +restaurant_id 30075445 +SELECT name, street, score, date FROM t1 LIMIT 5; +name street score date +Morris Park Bake Shop Morris Park Ave 2 2014-03-03 +Wendy'S Flatbush Avenue 8 2014-12-30 +Dj Reynolds Pub And Restaurant West 57 Street 2 2014-09-06 +Riviera Caterer Stillwell Avenue 5 2014-06-10 +Tov Kosher Kitchen 63 Road 20 2014-11-24 +SELECT name, cuisine, borough FROM t1 WHERE grade = 'A' LIMIT 10; +name cuisine borough +Morris Park Bake Shop Bakery Bronx +Wendy'S Hamburgers Brooklyn +Dj Reynolds Pub And Restaurant Irish Manhattan +Riviera Caterer American Brooklyn +Kosher Island Jewish/Kosher Staten Island +Wilken'S Fine Food Delicatessen Brooklyn +Regina Caterers American Brooklyn +Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn +Wild Asia American Bronx +C & C Catering Service American Brooklyn +SELECT COUNT(*) FROM t1 WHERE grade = 'A'; +COUNT(*) +20687 +SELECT * FROM t1 WHERE cuisine = 'English'; +_id name cuisine borough street building zipcode grade score date restaurant_id +58ada47de5a51ddfcd5ed83d Tea And Sympathy English Manhattan Greenwich Avenue 108 10011 A 8 2014-10-23 40391531 +58ada47de5a51ddfcd5ed85c Tartine English Manhattan West 11 Street 253 10014 A 11 2014-08-14 40392496 +58ada47de5a51ddfcd5ee1f3 The Park Slope Chipshop English Brooklyn 5 Avenue 383 11215 B 17 2014-09-29 40816202 +58ada47de5a51ddfcd5ee7e4 Pound And Pence English Manhattan Liberty Street 55 10005 A 7 2014-02-11 41022701 +58ada47de5a51ddfcd5ee999 Chip Shop English Brooklyn Atlantic Avenue 129 11201 A 9 2014-10-08 41076583 +58ada47ee5a51ddfcd5efe3f The Breslin Bar & Dining Room English Manhattan West 29 Street 16 10001 A 13 2014-06-09 41443706 +58ada47ee5a51ddfcd5efe99 Highlands Restaurant English Manhattan West 10 Street 150 10014 A 12 2014-10-22 41448559 +58ada47ee5a51ddfcd5f0413 The Fat Radish English Manhattan Orchard Street 17 10002 A 12 2014-07-26 41513545 +58ada47ee5a51ddfcd5f0777 Jones Wood Foundry English Manhattan East 76 Street 401 10021 A 12 2014-12-03 41557377 +58ada47ee5a51ddfcd5f0ea2 Whitehall English Manhattan Greenwich Avenue 19 10014 Z 15 2015-01-16 41625263 +58ada47ee5a51ddfcd5f1004 The Churchill Tavern English Manhattan East 28 Street 45 10016 A 13 2014-08-27 41633327 +58ada47ee5a51ddfcd5f13d5 The Monro English Brooklyn 5 Avenue 481 11215 A 7 2014-06-03 41660253 +58ada47ee5a51ddfcd5f1454 The Cock & Bull English Manhattan West 45 Street 23 10036 A 7 2014-08-07 41664704 +58ada47ee5a51ddfcd5f176e Dear Bushwick English Brooklyn Wilson Avenue 41 11237 A 12 2014-12-27 41690534 +58ada47ee5a51ddfcd5f1e91 Snowdonia Pub English Queens 32 Street 34-55 11106 A 12 2014-10-28 50000290 +58ada47ee5a51ddfcd5f2ddc Oscar'S Place English Manhattan Hudson Street 466 10014 A 10 2014-08-18 50011097 +SELECT * FROM t1 WHERE score = building; +_id name cuisine borough street building zipcode grade score date restaurant_id +DROP TABLE t1; +# +# Specifying Filter +# +CREATE TABLE t1 ( +_id CHAR(24) NOT NULL, +name CHAR(64) NOT NULL, +borough CHAR(16) NOT NULL, +restaurant_id CHAR(8) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8 +FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' +OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT name FROM t1 WHERE borough = 'Queens'; +name +La Baraka Restaurant +Air France Lounge +Tournesol +Winegasm +Cafe Henri +Bistro 33 +Domaine Wine Bar +Cafe Triskell +Cannelle Patisserie +La Vie +Dirty Pierres Bistro +Fresca La Crepe +Bliss 46 Bistro +Bear +Cuisine By Claudette +Paris Baguette +The Baroness Bar +Francis Cafe +Madame Sou Sou +Crepe 'N' Tearia +Aperitif Bayside Llc +DROP TABLE t1; +# +# Testing pipeline +# +CREATE TABLE t1 ( +name VARCHAR(64) NOT NULL, +borough CHAR(16) NOT NULL, +date DATETIME NOT NULL, +grade CHAR(1) NOT NULL, +score INT(4) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8 +COLIST='{"pipeline":[{"$match":{"cuisine":"French"}},{"$unwind":"$grades"},{"$project":{"_id":0,"name":1,"borough":1,"date":"$grades.date","grade":"$grades.grade","score":"$grades.score"}}]}' +OPTION_LIST='Driver=Java,Version=3,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1 LIMIT 10; +name borough date grade score +Tout Va Bien Manhattan 2014-11-10 01:00:00 B 15 +Tout Va Bien Manhattan 2014-04-03 02:00:00 A 13 +Tout Va Bien Manhattan 2013-07-17 02:00:00 C 36 +Tout Va Bien Manhattan 2013-02-06 01:00:00 B 22 +Tout Va Bien Manhattan 2012-07-16 02:00:00 C 36 +Tout Va Bien Manhattan 2012-03-08 01:00:00 C 7 +La Grenouille Manhattan 2014-04-09 02:00:00 A 10 +La Grenouille Manhattan 2013-03-05 01:00:00 A 9 +La Grenouille Manhattan 2012-02-02 01:00:00 A 13 +Le Perigord Manhattan 2014-07-14 02:00:00 B 14 +SELECT name, grade, score, date FROM t1 WHERE borough = 'Bronx'; +name grade score date +Bistro Sk A 10 2014-11-21 01:00:00 +Bistro Sk A 12 2014-02-19 01:00:00 +Bistro Sk B 18 2013-06-12 02:00:00 +DROP TABLE t1; +# +# try level 2 discovery +# +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' +COLIST='{"cuisine":0}' CONNECTION='mongodb://localhost:27017' LRECL=4096 +OPTION_LIST='Driver=Java,level=2,version=3'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `_id` char(24) NOT NULL `JPATH`='_id', + `address_building` char(10) NOT NULL `JPATH`='address.building', + `address_coord` double(18,16) DEFAULT NULL `JPATH`='address.coord.0', + `address_street` char(38) NOT NULL `JPATH`='address.street', + `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode', + `borough` char(13) NOT NULL, + `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date', + `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', + `name` char(98) NOT NULL, + `restaurant_id` char(8) NOT NULL +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `COLIST`='{"cuisine":0}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=Java,level=2,version=3' `LRECL`=4096 +SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B'; +name borough address_street score +Le Gamin Brooklyn Vanderbilt Avenue 24 +Bistro 33 Queens Ditmars Boulevard 15 +Dirty Pierres Bistro Queens Station Square 22 +Santos Anne Brooklyn Union Avenue 26 +Le Paddock Brooklyn Prospect Avenue 17 +La Crepe Et La Vie Brooklyn Foster Avenue 24 +Francis Cafe Queens Ditmars Boulevard 19 +DROP TABLE t1; +# +# try CRUD operations +# +false +CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64)) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' +OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096; +DELETE FROM t1; +INSERT INTO t1 VALUES(0,NULL),(1,'One'),(2,'Two'),(3,'Three'); +SELECT * FROM t1; +_id msg +0 NULL +1 One +2 Two +3 Three +UPDATE t1 SET msg = 'Deux' WHERE _id = 2; +DELETE FROM t1 WHERE msg IS NULL; +SELECT * FROM t1; +_id msg +1 One +2 Deux +3 Three +DELETE FROM t1; +DROP TABLE t1; +true +# +# List states whose population is equal or more than 10 millions +# +false +CREATE TABLE t1 ( +_id char(5) NOT NULL, +city char(16) NOT NULL, +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', +pop int(11) NOT NULL, +state char(2) NOT NULL) +ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=BSON TABNAME='cities' +OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096 DATA_CHARSET='utf8'; +# Using SQL for grouping +SELECT state, sum(pop) AS totalPop FROM t1 GROUP BY state HAVING totalPop >= 10000000 ORDER BY totalPop DESC; +state totalPop +CA 29754890 +NY 17990402 +TX 16984601 +FL 12686644 +PA 11881643 +IL 11427576 +OH 10846517 +DROP TABLE t1; +# Using a pipeline for grouping +CREATE TABLE t1 (_id CHAR(2) NOT NULL, totalPop INT(11) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='cities' DATA_CHARSET=utf8 +COLIST='{"pipeline":[{"$group":{"_id":"$state","totalPop":{"$sum":"$pop"}}},{"$match":{"totalPop":{"$gte":10000000}}},{"$sort":{"totalPop":-1}}]}' +OPTION_LIST='Driver=Java,Version=3,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1; +_id totalPop +CA 29754890 +NY 17990402 +TX 16984601 +FL 12686644 +PA 11881643 +IL 11427576 +OH 10846517 +DROP TABLE t1; +true +# +# Test making array +# +CREATE TABLE t1 ( +_id int(4) NOT NULL, +item CHAR(8) NOT NULL, +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' DATA_CHARSET=utf8 +OPTION_LIST='Driver=Java,Version=3' CONNECTION='mongodb://localhost:27017' LRECL=4096; +INSERT INTO t1 VALUES +(1,'journal',87,45,63,12,78), +(2,'notebook',123,456,789,NULL,NULL), +(3,'paper',5,7,3,8,NULL), +(4,'planner',25,71,NULL,44,27), +(5,'postcard',5,7,3,8,NULL); +SELECT * FROM t1; +_id item prices_0 prices_1 prices_2 prices_3 prices_4 +1 journal 87 45 63 12 78 +2 notebook 123 456 789 NULL NULL +3 paper 5 7 3 8 NULL +4 planner 25 71 NULL 44 27 +5 postcard 5 7 3 8 NULL +DROP TABLE t1; +# +# Test array aggregation +# +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' +COLIST='{"pipeline":[{"$project":{"_id":0,"item":1,"total":{"$sum":"$prices"},"average":{"$avg":"$prices"}}}]}' +OPTION_LIST='Driver=Java,Version=3,Pipeline=YES' CONNECTION='mongodb://localhost:27017' LRECL=4096; +SELECT * FROM t1; +item total average +journal 285 57.00 +notebook 1368 456.00 +paper 23 5.75 +planner 167 41.75 +postcard 23 5.75 +DROP TABLE t1; +true +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/r/bson_mongo_c.result b/storage/connect/mysql-test/connect/r/bson_mongo_c.result new file mode 100644 index 00000000000..83bf7cd1974 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/bson_mongo_c.result @@ -0,0 +1,385 @@ +set connect_enable_mongo=1; +set connect_json_all_path=0; +# +# Test the MONGO table type +# +CREATE TABLE t1 (Document varchar(1024) JPATH='*') +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CONNECTION='mongodb://localhost:27017' LRECL=1024 +OPTION_LIST='Driver=C,Version=0' DATA_CHARSET=utf8; +SELECT * from t1 limit 3; +Document +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51c"},"address":{"building":"1007","coord":[-73.8560769999999991,40.8484470000000002],"street":"Morris Park Ave","zipcode":"10462"},"borough":"Bronx","cuisine":"Bakery","grades":[{"date":{"$date":1393804800000},"grade":"A","score":2},{"date":{"$date":1378857600000},"grade":"A","score":6},{"date":{"$date":1358985600000},"grade":"A","score":10},{"date":{"$date":1322006400000},"grade":"A","score":9},{"date":{"$date":1299715200000},"grade":"B","score":14}],"name":"Morris Park Bake Shop","restaurant_id":"30075445"} +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51d"},"address":{"building":"469","coord":[-73.9617039999999974,40.6629420000000010],"street":"Flatbush Avenue","zipcode":"11225"},"borough":"Brooklyn","cuisine":"Hamburgers","grades":[{"date":{"$date":1419897600000},"grade":"A","score":8},{"date":{"$date":1404172800000},"grade":"B","score":23},{"date":{"$date":1367280000000},"grade":"A","score":12},{"date":{"$date":1336435200000},"grade":"A","score":12}],"name":"Wendy'S","restaurant_id":"30112340"} +{"_id":{"$oid":"58ada47de5a51ddfcd5ed51e"},"address":{"building":"351","coord":[-73.9851355999999925,40.7676919000000026],"street":"West 57 Street","zipcode":"10019"},"borough":"Manhattan","cuisine":"Irish","grades":[{"date":{"$date":1409961600000},"grade":"A","score":2},{"date":{"$date":1374451200000},"grade":"A","score":11},{"date":{"$date":1343692800000},"grade":"A","score":12},{"date":{"$date":1325116800000},"grade":"A","score":12}],"name":"Dj Reynolds Pub And Restaurant","restaurant_id":"30191841"} +DROP TABLE t1; +# +# Test catfunc +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants CATFUNC=columns +OPTION_LIST='Depth=1,Driver=C,Version=0' DATA_CHARSET=utf8 CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT * from t1; +Column_Name Data_Type Type_Name Column_Size Buffer_Length Decimal_Digits Nullable Jpath +_id 1 CHAR 24 24 0 0 _id +address_building 1 CHAR 10 10 0 0 address.building +address_coord 1 CHAR 1024 1024 0 1 address.coord +address_street 1 CHAR 38 38 0 0 address.street +address_zipcode 1 CHAR 5 5 0 0 address.zipcode +borough 1 CHAR 13 13 0 0 +cuisine 1 CHAR 64 64 0 0 +grades_date 1 CHAR 1024 1024 0 1 grades.0.date +grades_grade 1 CHAR 14 14 0 1 grades.0.grade +grades_score 7 INTEGER 2 2 0 1 grades.0.score +name 1 CHAR 98 98 0 0 +restaurant_id 1 CHAR 8 8 0 0 +DROP TABLE t1; +# +# Explicit columns +# +CREATE TABLE t1 ( +_id VARCHAR(24) NOT NULL, +name VARCHAR(255) NOT NULL, +cuisine VARCHAR(255) NOT NULL, +borough VARCHAR(255) NOT NULL, +restaurant_id VARCHAR(255) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8 +OPTION_LIST='Driver=C,Version=0'; +SELECT * FROM t1 LIMIT 10; +_id name cuisine borough restaurant_id +58ada47de5a51ddfcd5ed51c Morris Park Bake Shop Bakery Bronx 30075445 +58ada47de5a51ddfcd5ed51d Wendy'S Hamburgers Brooklyn 30112340 +58ada47de5a51ddfcd5ed51e Dj Reynolds Pub And Restaurant Irish Manhattan 30191841 +58ada47de5a51ddfcd5ed51f Riviera Caterer American Brooklyn 40356018 +58ada47de5a51ddfcd5ed520 Tov Kosher Kitchen Jewish/Kosher Queens 40356068 +58ada47de5a51ddfcd5ed521 Brunos On The Boulevard American Queens 40356151 +58ada47de5a51ddfcd5ed522 Kosher Island Jewish/Kosher Staten Island 40356442 +58ada47de5a51ddfcd5ed523 Wilken'S Fine Food Delicatessen Brooklyn 40356483 +58ada47de5a51ddfcd5ed524 Regina Caterers American Brooklyn 40356649 +58ada47de5a51ddfcd5ed525 Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn 40356731 +DROP TABLE t1; +# +# Test discovery +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +OPTION_LIST='Depth=1,Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET=utf8; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `_id` char(24) NOT NULL `JPATH`='_id', + `address_building` char(10) NOT NULL `JPATH`='address.building', + `address_coord` varchar(1024) DEFAULT NULL `JPATH`='address.coord', + `address_street` char(38) NOT NULL `JPATH`='address.street', + `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode', + `borough` char(13) NOT NULL, + `cuisine` char(64) NOT NULL, + `grades_date` varchar(1024) DEFAULT NULL `JPATH`='grades.0.date', + `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', + `name` char(98) NOT NULL, + `restaurant_id` char(8) NOT NULL +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `OPTION_LIST`='Depth=1,Driver=C,Version=0' `DATA_CHARSET`='utf8' `LRECL`=1024 +SELECT * FROM t1 LIMIT 5; +_id address_building address_coord address_street address_zipcode borough cuisine grades_date grades_grade grades_score name restaurant_id +58ada47de5a51ddfcd5ed51c 1007 -73.8560769999999991, 40.8484470000000002 Morris Park Ave 10462 Bronx Bakery 1393804800 A 2 Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 -73.9617039999999974, 40.6629420000000010 Flatbush Avenue 11225 Brooklyn Hamburgers 1419897600 A 8 Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 -73.9851355999999925, 40.7676919000000026 West 57 Street 10019 Manhattan Irish 1409961600 A 2 Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 -73.9824199999999905, 40.5795049999999975 Stillwell Avenue 11224 Brooklyn American 1402358400 A 5 Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 -73.8601151999999956, 40.7311739000000017 63 Road 11374 Queens Jewish/Kosher 1416787200 Z 20 Tov Kosher Kitchen 40356068 +DROP TABLE t1; +# +# Dropping a column +# +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8 +COLIST='{"projection":{"grades":0}}' OPTION_LIST='Driver=C,Version=0,level=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT * FROM t1 LIMIT 10; +_id address borough cuisine name restaurant_id +58ada47de5a51ddfcd5ed51c 1007 (-73.8560769999999991, 40.8484470000000002) Morris Park Ave 10462 Bronx Bakery Morris Park Bake Shop 30075445 +58ada47de5a51ddfcd5ed51d 469 (-73.9617039999999974, 40.6629420000000010) Flatbush Avenue 11225 Brooklyn Hamburgers Wendy'S 30112340 +58ada47de5a51ddfcd5ed51e 351 (-73.9851355999999925, 40.7676919000000026) West 57 Street 10019 Manhattan Irish Dj Reynolds Pub And Restaurant 30191841 +58ada47de5a51ddfcd5ed51f 2780 (-73.9824199999999905, 40.5795049999999975) Stillwell Avenue 11224 Brooklyn American Riviera Caterer 40356018 +58ada47de5a51ddfcd5ed520 97-22 (-73.8601151999999956, 40.7311739000000017) 63 Road 11374 Queens Jewish/Kosher Tov Kosher Kitchen 40356068 +58ada47de5a51ddfcd5ed521 8825 (-73.8803826999999984, 40.7643124000000014) Astoria Boulevard 11369 Queens American Brunos On The Boulevard 40356151 +58ada47de5a51ddfcd5ed522 2206 (-74.1377286000000026, 40.6119571999999991) Victory Boulevard 10314 Staten Island Jewish/Kosher Kosher Island 40356442 +58ada47de5a51ddfcd5ed523 7114 (-73.9068505999999985, 40.6199033999999983) Avenue U 11234 Brooklyn Delicatessen Wilken'S Fine Food 40356483 +58ada47de5a51ddfcd5ed524 6409 (-74.0052889999999906, 40.6288860000000014) 11 Avenue 11219 Brooklyn American Regina Caterers 40356649 +58ada47de5a51ddfcd5ed525 1839 (-73.9482608999999940, 40.6408271000000028) Nostrand Avenue 11226 Brooklyn Ice Cream, Gelato, Yogurt, Ices Taste The Tropics Ice Cream 40356731 +DROP TABLE t1; +# +# Specifying Jpath +# +CREATE TABLE t1 ( +_id VARCHAR(24) NOT NULL, +name VARCHAR(64) NOT NULL, +cuisine CHAR(200) NOT NULL, +borough CHAR(16) NOT NULL, +street VARCHAR(65) JPATH='address.street', +building CHAR(16) JPATH='address.building', +zipcode CHAR(5) JPATH='address.zipcode', +grade CHAR(1) JPATH='grades.0.grade', +score INT(4) NOT NULL JPATH='grades.0.score', +`date` DATE JPATH='grades.0.date', +restaurant_id VARCHAR(255) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8 +OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT * FROM t1 LIMIT 1; +_id 58ada47de5a51ddfcd5ed51c +name Morris Park Bake Shop +cuisine Bakery +borough Bronx +street Morris Park Ave +building 1007 +zipcode 10462 +grade A +score 2 +date 2014-03-03 +restaurant_id 30075445 +SELECT name, street, score, date FROM t1 LIMIT 5; +name street score date +Morris Park Bake Shop Morris Park Ave 2 2014-03-03 +Wendy'S Flatbush Avenue 8 2014-12-30 +Dj Reynolds Pub And Restaurant West 57 Street 2 2014-09-06 +Riviera Caterer Stillwell Avenue 5 2014-06-10 +Tov Kosher Kitchen 63 Road 20 2014-11-24 +SELECT name, cuisine, borough FROM t1 WHERE grade = 'A' LIMIT 10; +name cuisine borough +Morris Park Bake Shop Bakery Bronx +Wendy'S Hamburgers Brooklyn +Dj Reynolds Pub And Restaurant Irish Manhattan +Riviera Caterer American Brooklyn +Kosher Island Jewish/Kosher Staten Island +Wilken'S Fine Food Delicatessen Brooklyn +Regina Caterers American Brooklyn +Taste The Tropics Ice Cream Ice Cream, Gelato, Yogurt, Ices Brooklyn +Wild Asia American Bronx +C & C Catering Service American Brooklyn +SELECT COUNT(*) FROM t1 WHERE grade = 'A'; +COUNT(*) +20687 +SELECT * FROM t1 WHERE cuisine = 'English'; +_id name cuisine borough street building zipcode grade score date restaurant_id +58ada47de5a51ddfcd5ed83d Tea And Sympathy English Manhattan Greenwich Avenue 108 10011 A 8 2014-10-23 40391531 +58ada47de5a51ddfcd5ed85c Tartine English Manhattan West 11 Street 253 10014 A 11 2014-08-14 40392496 +58ada47de5a51ddfcd5ee1f3 The Park Slope Chipshop English Brooklyn 5 Avenue 383 11215 B 17 2014-09-29 40816202 +58ada47de5a51ddfcd5ee7e4 Pound And Pence English Manhattan Liberty Street 55 10005 A 7 2014-02-11 41022701 +58ada47de5a51ddfcd5ee999 Chip Shop English Brooklyn Atlantic Avenue 129 11201 A 9 2014-10-08 41076583 +58ada47ee5a51ddfcd5efe3f The Breslin Bar & Dining Room English Manhattan West 29 Street 16 10001 A 13 2014-06-09 41443706 +58ada47ee5a51ddfcd5efe99 Highlands Restaurant English Manhattan West 10 Street 150 10014 A 12 2014-10-22 41448559 +58ada47ee5a51ddfcd5f0413 The Fat Radish English Manhattan Orchard Street 17 10002 A 12 2014-07-26 41513545 +58ada47ee5a51ddfcd5f0777 Jones Wood Foundry English Manhattan East 76 Street 401 10021 A 12 2014-12-03 41557377 +58ada47ee5a51ddfcd5f0ea2 Whitehall English Manhattan Greenwich Avenue 19 10014 Z 15 2015-01-16 41625263 +58ada47ee5a51ddfcd5f1004 The Churchill Tavern English Manhattan East 28 Street 45 10016 A 13 2014-08-27 41633327 +58ada47ee5a51ddfcd5f13d5 The Monro English Brooklyn 5 Avenue 481 11215 A 7 2014-06-03 41660253 +58ada47ee5a51ddfcd5f1454 The Cock & Bull English Manhattan West 45 Street 23 10036 A 7 2014-08-07 41664704 +58ada47ee5a51ddfcd5f176e Dear Bushwick English Brooklyn Wilson Avenue 41 11237 A 12 2014-12-27 41690534 +58ada47ee5a51ddfcd5f1e91 Snowdonia Pub English Queens 32 Street 34-55 11106 A 12 2014-10-28 50000290 +58ada47ee5a51ddfcd5f2ddc Oscar'S Place English Manhattan Hudson Street 466 10014 A 10 2014-08-18 50011097 +SELECT * FROM t1 WHERE score = building; +_id name cuisine borough street building zipcode grade score date restaurant_id +DROP TABLE t1; +# +# Specifying Filter +# +CREATE TABLE t1 ( +_id CHAR(24) NOT NULL, +name CHAR(64) NOT NULL, +borough CHAR(16) NOT NULL, +restaurant_id CHAR(8) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants DATA_CHARSET=utf8 +FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' +OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT name FROM t1 WHERE borough = 'Queens'; +name +La Baraka Restaurant +Air France Lounge +Tournesol +Winegasm +Cafe Henri +Bistro 33 +Domaine Wine Bar +Cafe Triskell +Cannelle Patisserie +La Vie +Dirty Pierres Bistro +Fresca La Crepe +Bliss 46 Bistro +Bear +Cuisine By Claudette +Paris Baguette +The Baroness Bar +Francis Cafe +Madame Sou Sou +Crepe 'N' Tearia +Aperitif Bayside Llc +DROP TABLE t1; +# +# Testing pipeline +# +CREATE TABLE t1 ( +name VARCHAR(64) NOT NULL, +borough CHAR(16) NOT NULL, +date DATETIME NOT NULL, +grade CHAR(1) NOT NULL, +score INT(4) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='restaurants' DATA_CHARSET=utf8 +COLIST='{"pipeline":[{"$match":{"cuisine":"French"}},{"$unwind":"$grades"},{"$project":{"_id":0,"name":1,"borough":1,"date":"$grades.date","grade":"$grades.grade","score":"$grades.score"}}]}' +OPTION_LIST='Driver=C,Version=0,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT * FROM t1 LIMIT 10; +name borough date grade score +Tout Va Bien Manhattan 2014-11-10 01:00:00 B 15 +Tout Va Bien Manhattan 2014-04-03 02:00:00 A 13 +Tout Va Bien Manhattan 2013-07-17 02:00:00 C 36 +Tout Va Bien Manhattan 2013-02-06 01:00:00 B 22 +Tout Va Bien Manhattan 2012-07-16 02:00:00 C 36 +Tout Va Bien Manhattan 2012-03-08 01:00:00 C 7 +La Grenouille Manhattan 2014-04-09 02:00:00 A 10 +La Grenouille Manhattan 2013-03-05 01:00:00 A 9 +La Grenouille Manhattan 2012-02-02 01:00:00 A 13 +Le Perigord Manhattan 2014-07-14 02:00:00 B 14 +SELECT name, grade, score, date FROM t1 WHERE borough = 'Bronx'; +name grade score date +Bistro Sk A 10 2014-11-21 01:00:00 +Bistro Sk A 12 2014-02-19 01:00:00 +Bistro Sk B 18 2013-06-12 02:00:00 +DROP TABLE t1; +# +# try level 2 discovery +# +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME=restaurants +FILTER='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' +COLIST='{"projection":{"cuisine":0}}' CONNECTION='mongodb://localhost:27017' LRECL=1024 +OPTION_LIST='Driver=C,level=2,version=0'; +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `_id` char(24) NOT NULL `JPATH`='_id', + `address_building` char(10) NOT NULL `JPATH`='address.building', + `address_coord` double(21,16) DEFAULT NULL `JPATH`='address.coord.0', + `address_street` char(38) NOT NULL `JPATH`='address.street', + `address_zipcode` char(5) NOT NULL `JPATH`='address.zipcode', + `borough` char(13) NOT NULL, + `grades_date` bigint(13) DEFAULT NULL `JPATH`='grades.0.date', + `grades_grade` char(14) DEFAULT NULL `JPATH`='grades.0.grade', + `grades_score` int(2) DEFAULT NULL `JPATH`='grades.0.score', + `name` char(98) NOT NULL, + `restaurant_id` char(8) NOT NULL +) ENGINE=CONNECT DEFAULT CHARSET=latin1 CONNECTION='mongodb://localhost:27017' `TABLE_TYPE`='BSON' `TABNAME`='restaurants' `COLIST`='{"projection":{"cuisine":0}}' `FILTER`='{"cuisine":"French","borough":{"$ne":"Manhattan"}}' `OPTION_LIST`='Driver=C,level=2,version=0' `LRECL`=1024 +SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B'; +name borough address_street score +Le Gamin Brooklyn Vanderbilt Avenue 24 +Bistro 33 Queens Ditmars Boulevard 15 +Dirty Pierres Bistro Queens Station Square 22 +Santos Anne Brooklyn Union Avenue 26 +Le Paddock Brooklyn Prospect Avenue 17 +La Crepe Et La Vie Brooklyn Foster Avenue 24 +Francis Cafe Queens Ditmars Boulevard 19 +DROP TABLE t1; +# +# try CRUD operations +# +false +CREATE TABLE t1 (_id INT(4) NOT NULL, msg CHAR(64)) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' +OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; +DELETE FROM t1; +INSERT INTO t1 VALUES(0,NULL),(1,'One'),(2,'Two'),(3,'Three'); +SELECT * FROM t1; +_id msg +0 NULL +1 One +2 Two +3 Three +UPDATE t1 SET msg = 'Deux' WHERE _id = 2; +DELETE FROM t1 WHERE msg IS NULL; +SELECT * FROM t1; +_id msg +1 One +2 Deux +3 Three +DELETE FROM t1; +DROP TABLE t1; +true +# +# List states whose population is equal or more than 10 millions +# +false +CREATE TABLE t1 ( +_id char(5) NOT NULL, +city char(16) NOT NULL, +loc_0 double(12,6) NOT NULL `JPATH`='loc.0', +loc_1 char(12) NOT NULL `JPATH`='loc.1', +pop int(11) NOT NULL, +state char(2) NOT NULL) +ENGINE=CONNECT CONNECTION='mongodb://localhost:27017' TABLE_TYPE=BSON TABNAME='cities' +OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024 DATA_CHARSET='utf8'; +# Using SQL for grouping +SELECT state, sum(pop) AS totalPop FROM t1 GROUP BY state HAVING totalPop >= 10000000 ORDER BY totalPop DESC; +state totalPop +CA 29754890 +NY 17990402 +TX 16984601 +FL 12686644 +PA 11881643 +IL 11427576 +OH 10846517 +DROP TABLE t1; +# Using a pipeline for grouping +CREATE TABLE t1 (_id CHAR(2) NOT NULL, totalPop INT(11) NOT NULL) +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='cities' DATA_CHARSET=utf8 +COLIST='{"pipeline":[{"$group":{"_id":"$state","totalPop":{"$sum":"$pop"}}},{"$match":{"totalPop":{"$gte":10000000}}},{"$sort":{"totalPop":-1}}]}' +OPTION_LIST='Driver=C,Version=0,Pipeline=1' CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT * FROM t1; +_id totalPop +CA 29754890 +NY 17990402 +TX 16984601 +FL 12686644 +PA 11881643 +IL 11427576 +OH 10846517 +DROP TABLE t1; +true +# +# Test making array +# +CREATE TABLE t1 ( +_id int(4) NOT NULL, +item CHAR(8) NOT NULL, +prices_0 INT(6) JPATH='prices.0', +prices_1 INT(6) JPATH='prices.1', +prices_2 INT(6) JPATH='prices.2', +prices_3 INT(6) JPATH='prices.3', +prices_4 INT(6) JPATH='prices.4') +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' DATA_CHARSET=utf8 +OPTION_LIST='Driver=C,Version=0' CONNECTION='mongodb://localhost:27017' LRECL=1024; +INSERT INTO t1 VALUES +(1,'journal',87,45,63,12,78), +(2,'notebook',123,456,789,NULL,NULL), +(3,'paper',5,7,3,8,NULL), +(4,'planner',25,71,NULL,44,27), +(5,'postcard',5,7,3,8,NULL); +SELECT * FROM t1; +_id item prices_0 prices_1 prices_2 prices_3 prices_4 +1 journal 87 45 63 12 78 +2 notebook 123 456 789 NULL NULL +3 paper 5 7 3 8 NULL +4 planner 25 71 44 27 NULL +5 postcard 5 7 3 8 NULL +DROP TABLE t1; +# +# Test array aggregation +# +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=BSON TABNAME='testcoll' +COLIST='{"pipeline":[{"$project":{"_id":0,"item":1,"total":{"$sum":"$prices"},"average":{"$avg":"$prices"}}}]}' +OPTION_LIST='Driver=C,Version=0,Pipeline=YES' CONNECTION='mongodb://localhost:27017' LRECL=1024; +SELECT * FROM t1; +item total average +journal 285 57.00 +notebook 1368 456.00 +paper 23 5.75 +planner 167 41.75 +postcard 23 5.75 +DROP TABLE t1; +true +set connect_enable_mongo=0; diff --git a/storage/connect/mysql-test/connect/t/bson.test b/storage/connect/mysql-test/connect/t/bson.test new file mode 100644 index 00000000000..ab38cab73fc --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson.test @@ -0,0 +1,294 @@ +--source include/not_embedded.inc +--source include/have_partition.inc + +let $MYSQLD_DATADIR= `select @@datadir`; + +--copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json +--copy_file $MTR_SUITE_DIR/std_data/bib0.json $MYSQLD_DATADIR/test/bib0.json +--copy_file $MTR_SUITE_DIR/std_data/expense.json $MYSQLD_DATADIR/test/expense.json +--copy_file $MTR_SUITE_DIR/std_data/mulexp3.json $MYSQLD_DATADIR/test/mulexp3.json +--copy_file $MTR_SUITE_DIR/std_data/mulexp4.json $MYSQLD_DATADIR/test/mulexp4.json +--copy_file $MTR_SUITE_DIR/std_data/mulexp5.json $MYSQLD_DATADIR/test/mulexp5.json + +--echo # +--echo # Testing doc samples +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15), + LANG CHAR(2), + SUBJECT CHAR(32), + AUTHOR CHAR(64), + TITLE CHAR(32), + TRANSLATION CHAR(32), + TRANSLATOR CHAR(80), + PUBLISHER CHAR(32), + DATEPUB int(4) +) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +DROP TABLE t1; + + +--echo # +--echo # Testing Jpath. Get the number of authors +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15), + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + Authors INT(2) JPATH='$.AUTHOR[#]', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Concatenates the authors +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15), + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[" and "].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[" and "].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Testing expanding authors +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15), + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATION', + Translator CHAR(80) JPATH='$.TRANSLATOR', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB' +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT * FROM t1; +UPDATE t1 SET AuthorFN = 'Philippe' WHERE AuthorLN = 'Knab'; +SELECT * FROM t1 WHERE ISBN = '9782212090819'; + +--echo # +--echo # To add an author a new table must be created +--echo # +CREATE TABLE t2 ( +FIRSTNAME CHAR(32), +LASTNAME CHAR(32)) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json' OPTION_LIST='Object=$[1].AUTHOR'; +SELECT * FROM t2; +INSERT INTO t2 VALUES('Charles','Dickens'); +SELECT * FROM t1; +DROP TABLE t1; +DROP TABLE t2; + +--echo # +--echo # Check the biblio file has the good format +--echo # +CREATE TABLE t1 +( + line char(255) +) +ENGINE=CONNECT TABLE_TYPE=DOS FILE_NAME='biblio.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Testing a pretty=0 file +--echo # +CREATE TABLE t1 +( + ISBN CHAR(15) NOT NULL, + Language CHAR(2) JPATH='$.LANG', + Subject CHAR(32) JPATH='$.SUBJECT', + AuthorFN CHAR(128) JPATH='$.AUTHOR[*].FIRSTNAME', + AuthorLN CHAR(128) JPATH='$.AUTHOR[*].LASTNAME', + Title CHAR(32) JPATH='$.TITLE', + Translation CHAR(32) JPATH='$.TRANSLATED.PREFIX', + TranslatorFN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.FIRSTNAME', + TranslatorLN CHAR(80) JPATH='$.TRANSLATED.TRANSLATOR.LASTNAME', + Publisher CHAR(20) JPATH='$.PUBLISHER.NAME', + Location CHAR(16) JPATH='$.PUBLISHER.PLACE', + Year int(4) JPATH='$.DATEPUB', + INDEX IX(ISBN) +) +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='bib0.json' LRECL=320 OPTION_LIST='Pretty=0'; +SHOW INDEX FROM t1; +SELECT * FROM t1; +DESCRIBE SELECT * FROM t1 WHERE ISBN = '9782212090819'; +--error ER_GET_ERRMSG +UPDATE t1 SET AuthorFN = 'Philippe' WHERE ISBN = '9782212090819'; +DROP TABLE t1; + +--echo # +--echo # A file with 2 arrays +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[].EXPENSE["+"].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[].EXPENSE[+].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Now it can be fully expanded +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[*].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[*].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[*].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +#--error ER_GET_ERRMSG +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # A table showing many calculated results +--echo # +CREATE TABLE t1 ( +WHO CHAR(12) NOT NULL, +WEEKS CHAR(12) NOT NULL JPATH='$.WEEK[", "].NUMBER', +SUMS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[+].AMOUNT', +SUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[+].AMOUNT', +AVGS CHAR(64) NOT NULL JPATH='$.WEEK["+"].EXPENSE[!].AMOUNT', +SUMAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[+].EXPENSE[!].AMOUNT', +AVGSUM DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[+].AMOUNT', +AVGAVG DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[!].AMOUNT', +AVERAGE DOUBLE(8,2) NOT NULL JPATH='$.WEEK[!].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t1; +DROP TABLE t1; + +--echo # +--echo # Expand expense in 3 one week tables +--echo # +CREATE TABLE t2 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[0].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[0].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[0].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t2; + +CREATE TABLE t3 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[1].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[1].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[1].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t3; + +CREATE TABLE t4 ( +WHO CHAR(12), +WEEK INT(2) JPATH='$.WEEK[2].NUMBER', +WHAT CHAR(32) JPATH='$.WEEK[2].EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.WEEK[2].EXPENSE[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='expense.json'; +SELECT * FROM t4; + +--echo # +--echo # The expanded table is made as a TBL table +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32), +AMOUNT DOUBLE(8,2)) +ENGINE=CONNECT TABLE_TYPE=TBL TABLE_LIST='t2,t3,t4'; +SELECT * FROM t1; +DROP TABLE t1, t2, t3, t4; + +--echo # +--echo # Three partial JSON tables +--echo # +CREATE TABLE t2 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp3.json'; +SELECT * FROM t2; + +CREATE TABLE t3 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp4.json'; +SELECT * FROM t3; + +CREATE TABLE t4 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp5.json'; +SELECT * FROM t4; + +--echo # +--echo # The complete table can be a multiple JSON table +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp*.json' MULTIPLE=1; +SELECT * FROM t1 ORDER BY WHO, WEEK, WHAT, AMOUNT; +DROP TABLE t1; + +--echo # +--echo # Or also a partition JSON table +--echo # +CREATE TABLE t1 ( +WHO CHAR(12), +WEEK INT(2), +WHAT CHAR(32) JPATH='$.EXPENSE[*].WHAT', +AMOUNT DOUBLE(8,2) JPATH='$.EXPENSE.[*].AMOUNT') +ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='mulexp%s.json'; +ALTER TABLE t1 +PARTITION BY LIST COLUMNS(WEEK) ( +PARTITION `3` VALUES IN(3), +PARTITION `4` VALUES IN(4), +PARTITION `5` VALUES IN(5)); +SHOW WARNINGS; +SELECT * FROM t1; +SELECT * FROM t1 WHERE WEEK = 4; +DROP TABLE t1, t2, t3, t4; + +# +# Clean up +# +--remove_file $MYSQLD_DATADIR/test/biblio.json +--remove_file $MYSQLD_DATADIR/test/bib0.dnx +--remove_file $MYSQLD_DATADIR/test/bib0.json +--remove_file $MYSQLD_DATADIR/test/expense.json +--remove_file $MYSQLD_DATADIR/test/mulexp3.json +--remove_file $MYSQLD_DATADIR/test/mulexp4.json +--remove_file $MYSQLD_DATADIR/test/mulexp5.json diff --git a/storage/connect/mysql-test/connect/t/bson_java_2.test b/storage/connect/mysql-test/connect/t/bson_java_2.test new file mode 100644 index 00000000000..2188d9c2c91 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson_java_2.test @@ -0,0 +1,14 @@ +-- source jdbconn.inc +-- source mongo.inc + +--disable_query_log +eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo2.jar'; +set connect_json_all_path=0; +--enable_query_log +let $DRV= Java; +let $VERS= 2; +let $TYPE= BSON; +let $CONN= CONNECTION='mongodb://localhost:27017' LRECL=4096; + +-- source mongo_test.inc +-- source jdbconn_cleanup.inc diff --git a/storage/connect/mysql-test/connect/t/bson_java_3.test b/storage/connect/mysql-test/connect/t/bson_java_3.test new file mode 100644 index 00000000000..e7dd90b3563 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson_java_3.test @@ -0,0 +1,14 @@ +-- source jdbconn.inc +-- source mongo.inc + +--disable_query_log +eval SET GLOBAL connect_class_path='$MTR_SUITE_DIR/std_data/Mongo3.jar'; +set connect_json_all_path=0; +--enable_query_log +let $DRV= Java; +let $VERS= 3; +let $TYPE= BSON; +let $CONN= CONNECTION='mongodb://localhost:27017' LRECL=4096; + +-- source mongo_test.inc +-- source jdbconn_cleanup.inc diff --git a/storage/connect/mysql-test/connect/t/bson_mongo_c.test b/storage/connect/mysql-test/connect/t/bson_mongo_c.test new file mode 100644 index 00000000000..938d77c7c95 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson_mongo_c.test @@ -0,0 +1,10 @@ +-- source mongo.inc + +let $DRV= C; +let $VERS= 0; +let $PROJ= {"projection":; +let $ENDP= }; +let $TYPE= BSON; +let $CONN= CONNECTION='mongodb://localhost:27017' LRECL=1024; + +-- source mongo_test.inc diff --git a/storage/connect/mysql-test/connect/t/mongo_test.inc b/storage/connect/mysql-test/connect/t/mongo_test.inc index 0a9c80f5ba5..6e7c78e81ac 100644 --- a/storage/connect/mysql-test/connect/t/mongo_test.inc +++ b/storage/connect/mysql-test/connect/t/mongo_test.inc @@ -126,6 +126,10 @@ IF ($TYPE == JSON) { SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B'; } +IF ($TYPE == BSON) +{ +SELECT name, borough, address_street, grades_score AS score FROM t1 WHERE grades_grade = 'B'; +} DROP TABLE t1; --echo # diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp index c33639bf744..69d258d9fd3 100644 --- a/storage/connect/plugutil.cpp +++ b/storage/connect/plugutil.cpp @@ -474,8 +474,10 @@ bool AllocSarea(PGLOBAL g, size_t size) if (!g->Sarea) { sprintf(g->Message, MSG(MALLOC_ERROR), "malloc"); g->Sarea_Size = 0; - } else - g->Sarea_Size = size; + } else { + g->Sarea_Size = size; + PlugSubSet(g->Sarea, size); + } // endif Sarea #if defined(DEVELOPMENT) if (true) { @@ -484,7 +486,6 @@ bool AllocSarea(PGLOBAL g, size_t size) #endif if (g->Sarea) { htrc("Work area of %zd allocated at %p\n", size, g->Sarea); - PlugSubSet(g->Sarea, size); } else htrc("SareaAlloc: %s\n", g->Message); @@ -624,7 +625,7 @@ size_t MakeOff(void* memp, void* ptr) #if defined(_DEBUG) || defined(DEVELOPMENT) if (ptr <= memp) { fprintf(stderr, "ptr %p <= memp %p", ptr, memp); - throw 999; + DoThrow(999); } // endif ptr #endif // _DEBUG || DEVELOPMENT return (size_t)((char*)ptr - (size_t)memp); @@ -633,4 +634,4 @@ size_t MakeOff(void* memp, void* ptr) } /* end of MakeOff */ - /*--------------------- End of PLUGUTIL program -----------------------*/ +/*---------------------- End of PLUGUTIL program ------------------------*/ diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp index 69dd5749122..309eef2e292 100644 --- a/storage/connect/tabbson.cpp +++ b/storage/connect/tabbson.cpp @@ -246,7 +246,8 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) return 0; bp = tjsp->Bp; - bdp = tjsp->GetDoc() ? bp->GetBson(tjsp->GetDoc()) : NULL; +// bdp = tjsp->GetDoc() ? bp->GetBson(tjsp->GetDoc()) : NULL; + bdp = tjsp->GetDoc(); jsp = bdp ? bp->GetArrayValue(bdp, 0) : NULL; } else { if (!((tdp->Lrecl = GetIntegerTableOption(g, topt, "Lrecl", 0)))) { @@ -312,7 +313,7 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) case RC_FX: goto err; default: - jsp = bp->FindRow(g); + jsp = tjnp->Row; } // endswitch ReadDB } // endif pretty @@ -362,7 +363,7 @@ int BSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) case RC_FX: goto err; default: - jsp = bp->FindRow(g); + jsp = tjnp->Row; } // endswitch ReadDB } else @@ -400,21 +401,25 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j) jcol.Type = (JTYP)jvp->Type; switch (jvp->Type) { - case TYPE_STRG: - case TYPE_DTM: - jcol.Len = (int)strlen(bp->GetString(jvp)); - break; - case TYPE_INTG: - case TYPE_BINT: - case TYPE_DBL: - jcol.Len = (int)strlen(bp->GetString(jvp, buf)); - break; - case TYPE_BOOL: - jcol.Len = 1; - break; - default: - jcol.Len = 0; - break; + case TYPE_STRG: + case TYPE_DTM: + jcol.Len = (int)strlen(bp->GetString(jvp)); + break; + case TYPE_INTG: + case TYPE_BINT: + jcol.Len = (int)strlen(bp->GetString(jvp, buf)); + break; + case TYPE_DBL: + case TYPE_FLOAT: + jcol.Len = (int)strlen(bp->GetString(jvp, buf)); + jcol.Scale = jvp->Nd; + break; + case TYPE_BOOL: + jcol.Len = 1; + break; + default: + jcol.Len = 0; + break; } // endswitch Type jcol.Scale = jvp->Nd; @@ -513,7 +518,8 @@ bool BSONDISC::Find(PGLOBAL g, PBVAL jvp, PCSZ key, int j) return false; } // end of Find -void BSONDISC::AddColumn(PGLOBAL g) { +void BSONDISC::AddColumn(PGLOBAL g) +{ bool b = fmt[bf] != 0; // True if formatted // Check whether this column was already found diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 4bddef1940e..0ef281f2aae 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -310,7 +310,8 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) case RC_FX: goto err; default: - jsp = tjnp->FindRow(g); +// jsp = tjnp->FindRow(g); // FindRow was done in ReadDB + jsp = tjnp->Row; } // endswitch ReadDB } // endif pretty @@ -360,7 +361,8 @@ int JSONDISC::GetColumns(PGLOBAL g, PCSZ db, PCSZ dsn, PTOS topt) case RC_FX: goto err; default: - jsp = tjnp->FindRow(g); +// jsp = tjnp->FindRow(g); + jsp = tjnp->Row; } // endswitch ReadDB } else @@ -397,26 +399,26 @@ bool JSONDISC::Find(PGLOBAL g, PJVAL jvp, PCSZ key, int j) jcol.Type = jvp->DataType; - switch (jvp->DataType) { - case TYPE_STRG: - case TYPE_DTM: - jcol.Len = (int)strlen(jvp->Strp); - break; - case TYPE_INTG: - case TYPE_BINT: - jcol.Len = (int)strlen(jvp->GetString(g)); - break; - case TYPE_DBL: - jcol.Len = (int)strlen(jvp->GetString(g)); - jcol.Scale = jvp->Nd; - break; - case TYPE_BOOL: - jcol.Len = 1; - break; - default: - jcol.Len = 0; - break; - } // endswitch Type + switch (jvp->DataType) { + case TYPE_STRG: + case TYPE_DTM: + jcol.Len = (int)strlen(jvp->Strp); + break; + case TYPE_INTG: + case TYPE_BINT: + jcol.Len = (int)strlen(jvp->GetString(g)); + break; + case TYPE_DBL: + jcol.Len = (int)strlen(jvp->GetString(g)); + jcol.Scale = jvp->Nd; + break; + case TYPE_BOOL: + jcol.Len = 1; + break; + default: + jcol.Len = 0; + break; + } // endswitch Type jcol.Scale = jvp->Nd; jcol.Cbn = jvp->DataType == TYPE_NULL; From 02bb11709d7baddb74bfe9285cd542b818da1862 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sun, 10 Jan 2021 00:14:37 +0100 Subject: [PATCH 046/150] - add the test on REST --- storage/connect/CMakeLists.txt | 20 ++++++++++++------- .../connect/mysql-test/connect/r/rest.result | 19 ++++++++++++++++++ storage/connect/mysql-test/connect/t/rest.inc | 17 ++++++++++++++++ .../connect/mysql-test/connect/t/rest.test | 17 ++++++++++++++++ 4 files changed, 66 insertions(+), 7 deletions(-) create mode 100644 storage/connect/mysql-test/connect/r/rest.result create mode 100644 storage/connect/mysql-test/connect/t/rest.inc create mode 100644 storage/connect/mysql-test/connect/t/rest.test diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index e8ffeebafcc..c83cc584e9d 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -88,7 +88,7 @@ ENDIF(UNIX) OPTION(CONNECT_WITH_BSON "Compile CONNECT storage engine with BSON support" ON) IF(CONNECT_WITH_BSON) - SET(CONNECT_SOURCES ${CONNECT_SOURCES} + SET(CONNECT_SOURCES ${CONNECT_SOURCES} bson.cpp tabbson.cpp bsonudf.cpp bson.h tabbson.h bsonudf.h) add_definitions(-DBSON_SUPPORT) ENDIF(CONNECT_WITH_BSON) @@ -335,25 +335,31 @@ IF(CONNECT_WITH_REST) SET(CONNECT_SOURCES ${CONNECT_SOURCES} tabrest.cpp tabrest.h) add_definitions(-DREST_SUPPORT) FIND_PACKAGE(cpprestsdk QUIET) - IF (cpprestsdk_FOUND) - IF(UNIX) + IF (UNIX) + execute_process(COMMAND lsb_release -is + OUTPUT_VARIABLE LSB_RELEASE_ID_SHORT + OUTPUT_STRIP_TRAILING_WHITESPACE) + MESSAGE (STATUS ${LSB_RELEASE_ID_SHORT}) + ENDIF(UNIX) + IF (cpprestsdk_FOUND OR ("${LSB_RELEASE_ID_SHORT}" STREQUAL "Ubuntu")) + IF(UNIX) # INCLUDE_DIRECTORIES(${CPPRESTSDK_INCLUDE_DIR}) # If needed edit next line to set the path to libcpprest.so SET(REST_LIBRARY -lcpprest) MESSAGE (STATUS ${REST_LIBRARY}) - ELSE(NOT UNIX) + ELSE(NOT UNIX) # Next line sets debug compile mode matching cpprest_2_10d.dll # when it was binary installed (can be change later in Visual Studio) # Comment it out if not needed depending on your cpprestsdk installation. - SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd") - ENDIF(UNIX) + SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd") + ENDIF(UNIX) # IF(REST_LIBRARY) why this? how about Windows SET(CONNECT_SOURCES ${CONNECT_SOURCES} restget.cpp) add_definitions(-DREST_SOURCE) # ENDIF() # ELSE(NOT cpprestsdk_FOUND) # MESSAGE(STATUS "=====> cpprestsdk package not found") - ENDIF (cpprestsdk_FOUND) + ENDIF (cpprestsdk_FOUND OR ("${LSB_RELEASE_ID_SHORT}" STREQUAL "Ubuntu")) ENDIF(CONNECT_WITH_REST) # diff --git a/storage/connect/mysql-test/connect/r/rest.result b/storage/connect/mysql-test/connect/r/rest.result new file mode 100644 index 00000000000..3c4ec80ce71 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/rest.result @@ -0,0 +1,19 @@ +# +# Testing REST query +# +CREATE TABLE t1 +ENGINE=CONNECT DATA_CHARSET=utf8 TABLE_TYPE=JSON FILE_NAME='users.json' +HTTP='http://jsonplaceholder.typicode.com/users'; +SELECT * FROM t1; +id name username email address_street address_suite address_city address_zipcode address_geo_lat address_geo_lng phone website company_name company_catchPhrase company_bs +1 Leanne Graham Bret Sincere@april.biz Kulas Light Apt. 556 Gwenborough 92998-3874 -37.3159 81.1496 1-770-736-8031 x56442 hildegard.org Romaguera-Crona Multi-layered client-server neural-net harness real-time e-markets +2 Ervin Howell Antonette Shanna@melissa.tv Victor Plains Suite 879 Wisokyburgh 90566-7771 -43.9509 -34.4618 010-692-6593 x09125 anastasia.net Deckow-Crist Proactive didactic contingency synergize scalable supply-chains +3 Clementine Bauch Samantha Nathan@yesenia.net Douglas Extension Suite 847 McKenziehaven 59590-4157 -68.6102 -47.0653 1-463-123-4447 ramiro.info Romaguera-Jacobson Face to face bifurcated interface e-enable strategic applications +4 Patricia Lebsack Karianne Julianne.OConner@kory.org Hoeger Mall Apt. 692 South Elvis 53919-4257 29.4572 -164.2990 493-170-9623 x156 kale.biz Robel-Corkery Multi-tiered zero tolerance productivity transition cutting-edge web services +5 Chelsey Dietrich Kamren Lucio_Hettinger@annie.ca Skiles Walks Suite 351 Roscoeview 33263 -31.8129 62.5342 (254)954-1289 demarco.info Keebler LLC User-centric fault-tolerant solution revolutionize end-to-end systems +6 Mrs. Dennis Schulist Leopoldo_Corkery Karley_Dach@jasper.info Norberto Crossing Apt. 950 South Christy 23505-1337 -71.4197 71.7478 1-477-935-8478 x6430 ola.org Considine-Lockman Synchronised bottom-line interface e-enable innovative applications +7 Kurtis Weissnat Elwyn.Skiles Telly.Hoeger@billy.biz Rex Trail Suite 280 Howemouth 58804-1099 24.8918 21.8984 210.067.6132 elvis.io Johns Group Configurable multimedia task-force generate enterprise e-tailers +8 Nicholas Runolfsdottir V Maxime_Nienow Sherwood@rosamond.me Ellsworth Summit Suite 729 Aliyaview 45169 -14.3990 -120.7677 586.493.6943 x140 jacynthe.com Abernathy Group Implemented secondary concept e-enable extensible e-tailers +9 Glenna Reichert Delphine Chaim_McDermott@dana.io Dayna Park Suite 449 Bartholomebury 76495-3109 24.6463 -168.8889 (775)976-6794 x41206 conrad.com Yost and Sons Switchable contextually-based project aggregate real-time technologies +10 Clementina DuBuque Moriah.Stanton Rey.Padberg@karina.biz Kattie Turnpike Suite 198 Lebsackbury 31428-2261 -38.2386 57.2232 024-648-3804 ambrose.net Hoeger LLC Centralized empowering task-force target end-to-end models +DROP TABLE t1; diff --git a/storage/connect/mysql-test/connect/t/rest.inc b/storage/connect/mysql-test/connect/t/rest.inc new file mode 100644 index 00000000000..6848e4b6965 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/rest.inc @@ -0,0 +1,17 @@ +--disable_query_log +--error 0,ER_UNKNOWN_ERROR +CREATE TABLE t1 +ENGINE=CONNECT TABLE_TYPE=JSON FILE_NAME='users.json' +HTTP='http://jsonplaceholder.typicode.com/users'; + +if (!`SELECT count(*) FROM INFORMATION_SCHEMA.TABLES + WHERE TABLE_SCHEMA='test' AND TABLE_NAME='t1' + AND ENGINE='CONNECT' + AND CREATE_OPTIONS LIKE "%`table_type`='JSON'%"`) +{ + DROP TABLE IF EXISTS t1; + Skip Need Curl or Casablanca; +} +DROP TABLE t1; +--enable_query_log + diff --git a/storage/connect/mysql-test/connect/t/rest.test b/storage/connect/mysql-test/connect/t/rest.test new file mode 100644 index 00000000000..67066ed4639 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/rest.test @@ -0,0 +1,17 @@ +--source rest.inc + +let $MYSQLD_DATADIR= `select @@datadir`; + +--echo # +--echo # Testing REST query +--echo # +CREATE TABLE t1 +ENGINE=CONNECT DATA_CHARSET=utf8 TABLE_TYPE=JSON FILE_NAME='users.json' +HTTP='http://jsonplaceholder.typicode.com/users'; +SELECT * FROM t1; +DROP TABLE t1; + +# +# Clean up +# +--remove_file $MYSQLD_DATADIR/test/users.json From 251a55dcb4d31821c141583255dc5239b9d06065 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sun, 10 Jan 2021 01:05:38 +0100 Subject: [PATCH 047/150] Remove changes to CMakeLists.txt that cause compile error --- storage/connect/CMakeLists.txt | 31 ++++++------------------------- 1 file changed, 6 insertions(+), 25 deletions(-) diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index c83cc584e9d..7eedba08bee 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -80,19 +80,6 @@ ELSE(NOT UNIX) ENDIF() ENDIF(UNIX) -# -# BSON: this the new version of JSON that is temporarily included here for testing -# When fully tested, it will replace the old support (and be renamed to JSON) -# - -OPTION(CONNECT_WITH_BSON "Compile CONNECT storage engine with BSON support" ON) - -IF(CONNECT_WITH_BSON) - SET(CONNECT_SOURCES ${CONNECT_SOURCES} - bson.cpp tabbson.cpp bsonudf.cpp bson.h tabbson.h bsonudf.h) - add_definitions(-DBSON_SUPPORT) -ENDIF(CONNECT_WITH_BSON) - # # VCT: the VEC format might be not supported in future versions @@ -335,31 +322,25 @@ IF(CONNECT_WITH_REST) SET(CONNECT_SOURCES ${CONNECT_SOURCES} tabrest.cpp tabrest.h) add_definitions(-DREST_SUPPORT) FIND_PACKAGE(cpprestsdk QUIET) - IF (UNIX) - execute_process(COMMAND lsb_release -is - OUTPUT_VARIABLE LSB_RELEASE_ID_SHORT - OUTPUT_STRIP_TRAILING_WHITESPACE) - MESSAGE (STATUS ${LSB_RELEASE_ID_SHORT}) - ENDIF(UNIX) - IF (cpprestsdk_FOUND OR ("${LSB_RELEASE_ID_SHORT}" STREQUAL "Ubuntu")) - IF(UNIX) + IF (cpprestsdk_FOUND) + IF(UNIX) # INCLUDE_DIRECTORIES(${CPPRESTSDK_INCLUDE_DIR}) # If needed edit next line to set the path to libcpprest.so SET(REST_LIBRARY -lcpprest) MESSAGE (STATUS ${REST_LIBRARY}) - ELSE(NOT UNIX) + ELSE(NOT UNIX) # Next line sets debug compile mode matching cpprest_2_10d.dll # when it was binary installed (can be change later in Visual Studio) # Comment it out if not needed depending on your cpprestsdk installation. - SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd") - ENDIF(UNIX) + SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd") + ENDIF(UNIX) # IF(REST_LIBRARY) why this? how about Windows SET(CONNECT_SOURCES ${CONNECT_SOURCES} restget.cpp) add_definitions(-DREST_SOURCE) # ENDIF() # ELSE(NOT cpprestsdk_FOUND) # MESSAGE(STATUS "=====> cpprestsdk package not found") - ENDIF (cpprestsdk_FOUND OR ("${LSB_RELEASE_ID_SHORT}" STREQUAL "Ubuntu")) + ENDIF (cpprestsdk_FOUND) ENDIF(CONNECT_WITH_REST) # From 70cfeb9bc96fd8b3fa25d2133c2f15bd16103b70 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sun, 10 Jan 2021 02:23:11 +0100 Subject: [PATCH 048/150] Re-include BSON into CMakeLists.txt --- storage/connect/CMakeLists.txt | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index 7eedba08bee..f110a1eda04 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -81,6 +81,19 @@ ELSE(NOT UNIX) ENDIF(UNIX) +# +# BSON: the new handling of JSON data included temporarily for testing +# + +OPTION(CONNECT_WITH_BSON "Compile CONNECT storage engine with BSON support" ON) + +IF(CONNECT_WITH_BSON) + SET(CONNECT_SOURCES ${CONNECT_SOURCES} + bson.cpp bsonudf.cpp tabbson.cpp bson.h bsonudf.h tabbson.h) + add_definitions(-DBSON_SUPPORT) +ENDIF(CONNECT_WITH_BSON) + + # # VCT: the VEC format might be not supported in future versions # From 66f4900b517681da2aed3b562158ef58679961e4 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 11 Jan 2021 13:16:38 +0100 Subject: [PATCH 049/150] Revert "MDEV-23536 : Race condition between KILL and transaction commit" This reverts the server part of the commit 775fccea0 but keeps InnoDB part (which reverted MDEV-17092 5530a93f4). So after this both MDEV-23536 and MDEV-17092 are reverted, and the original bug is resurrected. --- sql/sql_class.cc | 38 +++++++------------------------------- sql/sql_class.h | 2 +- 2 files changed, 8 insertions(+), 32 deletions(-) diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 2321991d99f..2595717572a 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1357,15 +1357,12 @@ void THD::change_user(void) /* Do operations that may take a long time */ -void THD::cleanup(bool have_mutex) +void THD::cleanup(void) { DBUG_ENTER("THD::cleanup"); DBUG_ASSERT(cleanup_done == 0); - if (have_mutex) - set_killed_no_mutex(KILL_CONNECTION,0,0); - else - set_killed(KILL_CONNECTION); + set_killed(KILL_CONNECTION); #ifdef ENABLE_WHEN_BINLOG_WILL_BE_ABLE_TO_PREPARE if (transaction.xid_state.xa_state == XA_PREPARED) { @@ -1440,28 +1437,6 @@ void THD::cleanup(bool have_mutex) void THD::free_connection() { DBUG_ASSERT(free_connection_done == 0); - /* Check that we have already called thd->unlink() */ - DBUG_ASSERT(prev == 0 && next == 0); - - /* - Other threads may have a lock on THD::LOCK_thd_data or - THD::LOCK_thd_kill to ensure that this THD is not deleted - while they access it. The following mutex_lock ensures - that no one else is using this THD and it's now safe to - continue. - - For example consider KILL-statement execution on - sql_parse.cc kill_one_thread() that will use - THD::LOCK_thd_data to protect victim thread during - THD::awake(). - */ - mysql_mutex_lock(&LOCK_thd_data); - mysql_mutex_lock(&LOCK_thd_kill); - -#ifdef WITH_WSREP - delete wsrep_rgi; - wsrep_rgi= 0; -#endif /* WITH_WSREP */ my_free(db); db= NULL; #ifndef EMBEDDED_LIBRARY @@ -1470,8 +1445,8 @@ void THD::free_connection() net.vio= 0; net_end(&net); #endif - if (!cleanup_done) - cleanup(true); // We have locked THD::LOCK_thd_kill + if (!cleanup_done) + cleanup(); ha_close_connection(this); plugin_thdvar_cleanup(this); mysql_audit_free_thd(this); @@ -1482,8 +1457,6 @@ void THD::free_connection() #if defined(ENABLED_PROFILING) profiling.restart(); // Reset profiling #endif - mysql_mutex_unlock(&LOCK_thd_kill); - mysql_mutex_unlock(&LOCK_thd_data); } /* @@ -1539,6 +1512,9 @@ THD::~THD() mysql_mutex_lock(&LOCK_thd_data); mysql_mutex_unlock(&LOCK_thd_data); +#ifdef WITH_WSREP + delete wsrep_rgi; +#endif if (!free_connection_done) free_connection(); diff --git a/sql/sql_class.h b/sql/sql_class.h index a97f64c13b3..b68e3553a2d 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3194,7 +3194,7 @@ public: void update_all_stats(); void update_stats(void); void change_user(void); - void cleanup(bool have_mutex=false); + void cleanup(void); void cleanup_after_query(); void free_connection(); void reset_for_reuse(); From 9b750dcbd89ecf455211a77348a85464b282abee Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 11 Jan 2021 13:21:42 +0100 Subject: [PATCH 050/150] MDEV-23536 Race condition between KILL and transaction commit Server part: kill_handlerton() was accessing thd->ha_data[] for some other thd, while it could be concurrently modified by its owner thd. protect thd->ha_data[] modifications with a mutex. require this mutex when accessing thd->ha_data[] from kill_handlerton. InnoDB part: on close_connection, detach trx from thd before freeing the trx --- sql/handler.cc | 1 + sql/sql_class.cc | 3 +++ storage/innobase/handler/ha_innodb.cc | 1 + 3 files changed, 5 insertions(+) diff --git a/sql/handler.cc b/sql/handler.cc index c38c604347a..29b01763e8b 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -838,6 +838,7 @@ static my_bool kill_handlerton(THD *thd, plugin_ref plugin, { handlerton *hton= plugin_hton(plugin); + mysql_mutex_assert_owner(&thd->LOCK_thd_data); if (hton->state == SHOW_OPTION_YES && hton->kill_query && thd_get_ha_data(thd, hton)) hton->kill_query(hton, thd, *(enum thd_kill_levels *) level); diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 2595717572a..c3274ae9b82 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -444,6 +444,7 @@ void thd_set_ha_data(THD *thd, const struct handlerton *hton, const void *ha_data) { plugin_ref *lock= &thd->ha_data[hton->slot].lock; + DBUG_ASSERT(thd == current_thd); if (ha_data && !*lock) *lock= ha_lock_engine(NULL, (handlerton*) hton); else if (!ha_data && *lock) @@ -451,7 +452,9 @@ void thd_set_ha_data(THD *thd, const struct handlerton *hton, plugin_unlock(NULL, *lock); *lock= NULL; } + mysql_mutex_lock(&thd->LOCK_thd_data); *thd_ha_data(thd, hton)= (void*) ha_data; + mysql_mutex_unlock(&thd->LOCK_thd_data); } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index c79b3423b63..fb3d6637dec 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -5117,6 +5117,7 @@ innobase_close_connection( if (trx) { + thd_set_ha_data(thd, hton, NULL); if (!trx_is_registered_for_2pc(trx) && trx_is_started(trx)) { sql_print_error("Transaction not registered for MariaDB 2PC, " From 4c448836d489bd5a25c7509e8a69309c3b0a8e72 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 5 Jan 2021 18:10:04 +0100 Subject: [PATCH 051/150] MDEV-12161 Can't specify collation for virtual columns sql standard (2016) allows in two places in the - as a part of the or at the very end. Let's do that too. Side effect: in column/SP declaration `COLLATE cs_coll` automatically implies `CHARACTER SET cs` (unless charset was specified explicitly). See changes in sp-ucs2.result --- mysql-test/r/sp-ucs2.result | 6 +-- .../gcol/inc/gcol_column_def_options.inc | 5 ++- .../r/gcol_column_def_options_innodb.result | 14 ++++++- .../r/gcol_column_def_options_myisam.result | 14 ++++++- mysql-test/t/sp-ucs2.test | 6 +-- sql/sql_show.cc | 7 +++- sql/sql_yacc.yy | 42 ++++++++++--------- 7 files changed, 61 insertions(+), 33 deletions(-) diff --git a/mysql-test/r/sp-ucs2.result b/mysql-test/r/sp-ucs2.result index ca448efa535..6c94a111f6f 100644 --- a/mysql-test/r/sp-ucs2.result +++ b/mysql-test/r/sp-ucs2.result @@ -100,20 +100,20 @@ RETURNS VARCHAR(64) CHARACTER SET ucs2 BEGIN RETURN 'str'; END| -ERROR 42000: COLLATION 'ucs2_unicode_ci' is not valid for CHARACTER SET 'latin1' +DROP FUNCTION f| CREATE FUNCTION f(f1 VARCHAR(64) CHARACTER SET ucs2) RETURNS VARCHAR(64) COLLATE ucs2_unicode_ci BEGIN RETURN 'str'; END| -ERROR 42000: COLLATION 'ucs2_unicode_ci' is not valid for CHARACTER SET 'latin1' +DROP FUNCTION f| CREATE FUNCTION f(f1 VARCHAR(64) CHARACTER SET ucs2) RETURNS VARCHAR(64) CHARACTER SET ucs2 BEGIN DECLARE f2 VARCHAR(64) COLLATE ucs2_unicode_ci; RETURN 'str'; END| -ERROR 42000: COLLATION 'ucs2_unicode_ci' is not valid for CHARACTER SET 'latin1' +DROP FUNCTION f| SET NAMES utf8; DROP FUNCTION IF EXISTS bug48766; CREATE FUNCTION bug48766 () diff --git a/mysql-test/suite/gcol/inc/gcol_column_def_options.inc b/mysql-test/suite/gcol/inc/gcol_column_def_options.inc index 28c854c44f4..f4350d25ae9 100644 --- a/mysql-test/suite/gcol/inc/gcol_column_def_options.inc +++ b/mysql-test/suite/gcol/inc/gcol_column_def_options.inc @@ -343,11 +343,12 @@ DELETE FROM t1 WHERE c=1; DROP TABLE t1; } ---error ER_PARSE_ERROR CREATE TABLE t1 (c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar")); +SHOW CREATE TABLE t1; +DROP TABLE t1; CREATE TABLE t1 (i INT); ---error ER_PARSE_ERROR ALTER TABLE t1 ADD COLUMN c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar"); +SHOW CREATE TABLE t1; DROP TABLE t1; --error ER_PARSE_ERROR CREATE TABLE t1 (i INT COLLATE utf8_bin, c INT COLLATE utf8_bin GENERATED ALWAYS AS (10)); diff --git a/mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result b/mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result index ee80e5f605a..48c4613f6ca 100644 --- a/mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result +++ b/mysql-test/suite/gcol/r/gcol_column_def_options_innodb.result @@ -426,10 +426,20 @@ INSERT INTO t1(a) VALUES(0); DELETE FROM t1 WHERE c=1; DROP TABLE t1; CREATE TABLE t1 (c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar")); -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS ("foo bar"))' at line 1 +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c` char(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ('foo bar') VIRTUAL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 +DROP TABLE t1; CREATE TABLE t1 (i INT); ALTER TABLE t1 ADD COLUMN c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar"); -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS ("foo bar")' at line 1 +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) DEFAULT NULL, + `c` char(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ('foo bar') VIRTUAL +) ENGINE=InnoDB DEFAULT CHARSET=latin1 DROP TABLE t1; CREATE TABLE t1 (i INT COLLATE utf8_bin, c INT COLLATE utf8_bin GENERATED ALWAYS AS (10)); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS (10))' at line 1 diff --git a/mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result b/mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result index 09e5b617af1..b7ae6488c95 100644 --- a/mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result +++ b/mysql-test/suite/gcol/r/gcol_column_def_options_myisam.result @@ -426,10 +426,20 @@ INSERT INTO t1(a) VALUES(0); DELETE FROM t1 WHERE c=1; DROP TABLE t1; CREATE TABLE t1 (c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar")); -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS ("foo bar"))' at line 1 +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `c` char(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ('foo bar') VIRTUAL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +DROP TABLE t1; CREATE TABLE t1 (i INT); ALTER TABLE t1 ADD COLUMN c CHAR(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ("foo bar"); -ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS ("foo bar")' at line 1 +SHOW CREATE TABLE t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `i` int(11) DEFAULT NULL, + `c` char(10) CHARACTER SET utf8 COLLATE utf8_bin GENERATED ALWAYS AS ('foo bar') VIRTUAL +) ENGINE=MyISAM DEFAULT CHARSET=latin1 DROP TABLE t1; CREATE TABLE t1 (i INT COLLATE utf8_bin, c INT COLLATE utf8_bin GENERATED ALWAYS AS (10)); ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near 'GENERATED ALWAYS AS (10))' at line 1 diff --git a/mysql-test/t/sp-ucs2.test b/mysql-test/t/sp-ucs2.test index a1aec8071b4..95021bcb8ef 100644 --- a/mysql-test/t/sp-ucs2.test +++ b/mysql-test/t/sp-ucs2.test @@ -114,35 +114,35 @@ DROP FUNCTION f1| # # COLLATE with no CHARACTER SET in IN param # ---error ER_COLLATION_CHARSET_MISMATCH CREATE FUNCTION f(f1 VARCHAR(64) COLLATE ucs2_unicode_ci) RETURNS VARCHAR(64) CHARACTER SET ucs2 BEGIN RETURN 'str'; END| +DROP FUNCTION f| # # COLLATE with no CHARACTER SET in RETURNS # ---error ER_COLLATION_CHARSET_MISMATCH CREATE FUNCTION f(f1 VARCHAR(64) CHARACTER SET ucs2) RETURNS VARCHAR(64) COLLATE ucs2_unicode_ci BEGIN RETURN 'str'; END| +DROP FUNCTION f| # # COLLATE with no CHARACTER SET in DECLARE # ---error ER_COLLATION_CHARSET_MISMATCH CREATE FUNCTION f(f1 VARCHAR(64) CHARACTER SET ucs2) RETURNS VARCHAR(64) CHARACTER SET ucs2 BEGIN DECLARE f2 VARCHAR(64) COLLATE ucs2_unicode_ci; RETURN 'str'; END| +DROP FUNCTION f| delimiter ;| diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 49659c239bc..79897429a4f 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2013,8 +2013,13 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, /* For string types dump collation name only if collation is not primary for the given charset + + For generated fields don't print the COLLATE clause if + the collation matches the expression's collation. */ - if (!(field->charset()->state & MY_CS_PRIMARY) && !field->vcol_info) + if (!(field->charset()->state & MY_CS_PRIMARY) && + (!field->vcol_info || + field->charset() != field->vcol_info->expr->collation.collation)) { packet->append(STRING_WITH_LEN(" COLLATE ")); packet->append(field->charset()->name); diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index 1f37296842c..a1616024fb8 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1033,7 +1033,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); Currently there are 98 shift/reduce conflicts. We should not introduce new conflicts any more. */ -%expect 98 +%expect 109 /* Comments for TOKENS. @@ -1774,7 +1774,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); %type int_type real_type -%type type_with_opt_collate field_type +%type field_type %type opt_dyncol_type dyncol_type numeric_dyncol_type temporal_dyncol_type string_dyncol_type @@ -3010,11 +3010,12 @@ sp_param_name_and_type: thd->variables.collation_database); $$= spvar; } - type_with_opt_collate + field_type { LEX *lex= Lex; sp_variable *spvar= $2; + Lex->set_last_field_type($3); if (lex->sphead->fill_field_definition(thd, lex, lex->last_field)) { MYSQL_YYABORT; @@ -3096,14 +3097,15 @@ sp_decl: thd->lex->init_last_field(&spvar->field_def, spvar->name.str, thd->variables.collation_database); } - type_with_opt_collate + field_type sp_opt_default { LEX *lex= Lex; sp_pcontext *pctx= lex->spcont; uint num_vars= pctx->context_var_count(); Item *dflt_value_item= $5; - + Lex->set_last_field_type($4); + if (!dflt_value_item) { dflt_value_item= new (thd->mem_root) Item_null(thd); @@ -6665,20 +6667,6 @@ serial_attribute: ; -type_with_opt_collate: - field_type opt_collate - { - $$= $1; - - if ($2) - { - if (!(Lex->charset= merge_charset_and_collation(Lex->charset, $2))) - MYSQL_YYABORT; - } - Lex->set_last_field_type($1); - } - ; - charset: CHAR_SYM SET {} | CHARSET {} @@ -6751,12 +6739,25 @@ charset_or_alias: } ; +collate: COLLATE_SYM collation_name_or_default + { + Lex->charset= Lex->last_field->charset= $2; + } + ; + opt_binary: /* empty */ { bincmp_collation(NULL, false); } | BYTE_SYM { bincmp_collation(&my_charset_bin, false); } | charset_or_alias opt_bin_mod { bincmp_collation($1, $2); } | BINARY { bincmp_collation(NULL, true); } | BINARY charset_or_alias { bincmp_collation($2, true); } + | charset_or_alias collate + { + if (!my_charset_same(Lex->charset, $1)) + my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0), + Lex->charset->name, $1->csname)); + } + | collate { } ; opt_bin_mod: @@ -16854,8 +16855,9 @@ sf_tail: lex->init_last_field(&lex->sphead->m_return_field_def, NULL, thd->variables.collation_database); } - type_with_opt_collate /* $11 */ + field_type /* $11 */ { /* $12 */ + Lex->set_last_field_type($11); if (Lex->sphead->fill_field_definition(thd, Lex, Lex->last_field)) MYSQL_YYABORT; } From 63f91927870b41b8965e2a2a868abcc2b3672f68 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 7 Jan 2021 19:37:47 +0100 Subject: [PATCH 052/150] MDEV-17251 SHOW STATUS unnecessary calls calc_sum_of_all_status 1. only call calc_sum_of_all_status() if a global SHOW_xxx_STATUS variable is to be returned 2. only lock LOCK_status when copying global_status_var, but not when iterating all threads --- sql/sql_parse.cc | 1 + sql/sql_plugin.h | 5 +++-- sql/sql_show.cc | 20 +++++++++++--------- sql/sql_test.cc | 1 + 4 files changed, 16 insertions(+), 11 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 54937116383..131ba4a86c5 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -2179,6 +2179,7 @@ bool dispatch_command(enum enum_server_command command, THD *thd, break; general_log_print(thd, command, NullS); status_var_increment(thd->status_var.com_stat[SQLCOM_SHOW_STATUS]); + *current_global_status_var= global_status_var; calc_sum_of_all_status(current_global_status_var); if (!(uptime= (ulong) (thd->start_time - server_start_time))) queries_per_second1000= 0; diff --git a/sql/sql_plugin.h b/sql/sql_plugin.h index 64194b5a1b5..13f7296e0cd 100644 --- a/sql/sql_plugin.h +++ b/sql/sql_plugin.h @@ -22,9 +22,10 @@ that is defined in plugin.h */ #define SHOW_always_last SHOW_KEY_CACHE_LONG, \ - SHOW_LONG_STATUS, SHOW_DOUBLE_STATUS, \ SHOW_HAVE, SHOW_MY_BOOL, SHOW_HA_ROWS, SHOW_SYS, \ - SHOW_LONG_NOFLUSH, SHOW_LONGLONG_STATUS, SHOW_LEX_STRING + SHOW_LONG_NOFLUSH, SHOW_LEX_STRING, \ + /* SHOW_*_STATUS must be at the end, SHOW_LONG_STATUS being first */ \ + SHOW_LONG_STATUS, SHOW_DOUBLE_STATUS, SHOW_LONGLONG_STATUS #include #undef SHOW_always_last diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 79897429a4f..4294157cce3 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3580,6 +3580,16 @@ static bool show_status_array(THD *thd, const char *wild, if (show_type == SHOW_SYS) mysql_mutex_lock(&LOCK_global_system_variables); + else if (show_type >= SHOW_LONG_STATUS && scope == OPT_GLOBAL && + !status_var->local_memory_used) + { + mysql_mutex_lock(&LOCK_status); + *status_var= global_status_var; + mysql_mutex_unlock(&LOCK_status); + calc_sum_of_all_status(status_var); + DBUG_ASSERT(status_var->local_memory_used); + } + pos= get_one_variable(thd, var, scope, show_type, status_var, &charset, buff, &length); @@ -3620,8 +3630,6 @@ uint calc_sum_of_all_status(STATUS_VAR *to) I_List_iterator it(threads); THD *tmp; - /* Get global values as base */ - *to= global_status_var; to->local_memory_used= 0; /* Add to this status from existing threads */ @@ -7586,13 +7594,7 @@ int fill_status(THD *thd, TABLE_LIST *tables, COND *cond) if (partial_cond) partial_cond->val_int(); - if (scope == OPT_GLOBAL) - { - /* We only hold LOCK_status for summary status vars */ - mysql_mutex_lock(&LOCK_status); - calc_sum_of_all_status(&tmp); - mysql_mutex_unlock(&LOCK_status); - } + tmp.local_memory_used= 0; // meaning tmp was not populated yet mysql_mutex_lock(&LOCK_show_status); res= show_status_array(thd, wild, diff --git a/sql/sql_test.cc b/sql/sql_test.cc index 4e68ec2ec2e..c0e62227665 100644 --- a/sql/sql_test.cc +++ b/sql/sql_test.cc @@ -564,6 +564,7 @@ void mysql_print_status() STATUS_VAR tmp; uint count; + tmp= global_status_var; count= calc_sum_of_all_status(&tmp); printf("\nStatus information:\n\n"); (void) my_getwd(current_dir, sizeof(current_dir),MYF(0)); From 69f1adaa5565bdd752215774cc7e455793c25109 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 9 Jan 2021 16:56:35 +0100 Subject: [PATCH 053/150] main.skip_grants cleanup --- mysql-test/r/skip_grants.result | 30 +++++++----- ...skip_grants-master.opt => skip_grants.opt} | 0 mysql-test/t/skip_grants.test | 49 ++++++++----------- 3 files changed, 38 insertions(+), 41 deletions(-) rename mysql-test/t/{skip_grants-master.opt => skip_grants.opt} (100%) diff --git a/mysql-test/r/skip_grants.result b/mysql-test/r/skip_grants.result index 1c055ef7385..b92e62a6ff0 100644 --- a/mysql-test/r/skip_grants.result +++ b/mysql-test/r/skip_grants.result @@ -1,14 +1,4 @@ use test; -DROP VIEW IF EXISTS v1; -DROP VIEW IF EXISTS v2; -DROP VIEW IF EXISTS v3; -DROP TABLE IF EXISTS t1; -DROP PROCEDURE IF EXISTS p1; -DROP PROCEDURE IF EXISTS p2; -DROP PROCEDURE IF EXISTS p3; -DROP FUNCTION IF EXISTS f1; -DROP FUNCTION IF EXISTS f2; -DROP FUNCTION IF EXISTS f3; CREATE TABLE t1(c INT); CREATE TRIGGER t1_bi BEFORE INSERT ON t1 FOR EACH ROW @@ -58,10 +48,16 @@ DROP PROCEDURE p3; DROP FUNCTION f1; DROP FUNCTION f2; DROP FUNCTION f3; +# +# Bug #26807 "set global event_scheduler=1" and --skip-grant-tables crashes server +# set global event_scheduler=1; Warnings: Note 1408 Event Scheduler: Loaded 0 events set global event_scheduler=0; +# +# Bug#26285 Selecting information_schema crahes server +# select count(*) from information_schema.COLUMN_PRIVILEGES; count(*) 0 @@ -74,14 +70,21 @@ count(*) select count(*) from information_schema.USER_PRIVILEGES; count(*) 0 -End of 5.0 tests +# +# End of 5.0 tests +# # # Bug#29817 Queries with UDF fail with non-descriptive error # if mysql.proc is missing # select no_such_function(1); ERROR 42000: FUNCTION test.no_such_function does not exist -End of 5.1 tests +# +# End of 5.1 tests +# +# +# MDEV-8280 crash in 'show global status' with --skip-grant-tables +# show global status like 'Acl%'; Variable_name Value Acl_column_grants 0 @@ -93,3 +96,6 @@ Acl_role_grants 0 Acl_roles 0 Acl_table_grants 0 Acl_users 0 +# +# End of 10.1 tests +# diff --git a/mysql-test/t/skip_grants-master.opt b/mysql-test/t/skip_grants.opt similarity index 100% rename from mysql-test/t/skip_grants-master.opt rename to mysql-test/t/skip_grants.opt diff --git a/mysql-test/t/skip_grants.test b/mysql-test/t/skip_grants.test index 5f79404e7e4..f68734f8244 100644 --- a/mysql-test/t/skip_grants.test +++ b/mysql-test/t/skip_grants.test @@ -17,24 +17,6 @@ use test; # Prepare. ---disable_warnings - -DROP VIEW IF EXISTS v1; -DROP VIEW IF EXISTS v2; -DROP VIEW IF EXISTS v3; - -DROP TABLE IF EXISTS t1; - -DROP PROCEDURE IF EXISTS p1; -DROP PROCEDURE IF EXISTS p2; -DROP PROCEDURE IF EXISTS p3; - -DROP FUNCTION IF EXISTS f1; -DROP FUNCTION IF EXISTS f2; -DROP FUNCTION IF EXISTS f3; - ---enable_warnings - # Test case. CREATE TABLE t1(c INT); @@ -109,20 +91,23 @@ DROP FUNCTION f1; DROP FUNCTION f2; DROP FUNCTION f3; -# -# Bug #26807 "set global event_scheduler=1" and --skip-grant-tables crashes server -# +--echo # +--echo # Bug #26807 "set global event_scheduler=1" and --skip-grant-tables crashes server +--echo # set global event_scheduler=1; set global event_scheduler=0; -# -# Bug#26285 Selecting information_schema crahes server -# +--echo # +--echo # Bug#26285 Selecting information_schema crahes server +--echo # select count(*) from information_schema.COLUMN_PRIVILEGES; select count(*) from information_schema.SCHEMA_PRIVILEGES; select count(*) from information_schema.TABLE_PRIVILEGES; select count(*) from information_schema.USER_PRIVILEGES; ---echo End of 5.0 tests + +--echo # +--echo # End of 5.0 tests +--echo # --echo # --echo # Bug#29817 Queries with UDF fail with non-descriptive error @@ -131,9 +116,15 @@ select count(*) from information_schema.USER_PRIVILEGES; --error ER_SP_DOES_NOT_EXIST select no_such_function(1); ---echo End of 5.1 tests +--echo # +--echo # End of 5.1 tests +--echo # -# -# MDEV-8280 crash in 'show global status' with --skip-grant-tables -# +--echo # +--echo # MDEV-8280 crash in 'show global status' with --skip-grant-tables +--echo # show global status like 'Acl%'; + +--echo # +--echo # End of 10.1 tests +--echo # From fc0d9a470ce9ae2285f687e9b13f08b76527051b Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 9 Jan 2021 17:00:04 +0100 Subject: [PATCH 054/150] MDEV-22966 Server crashes or hangs with SET ROLE when started with skip-grant-tables --- mysql-test/r/skip_grants.result | 8 ++++++++ mysql-test/t/skip_grants.test | 11 +++++++++++ sql/sql_acl.cc | 6 ++++++ 3 files changed, 25 insertions(+) diff --git a/mysql-test/r/skip_grants.result b/mysql-test/r/skip_grants.result index b92e62a6ff0..b058c8f001e 100644 --- a/mysql-test/r/skip_grants.result +++ b/mysql-test/r/skip_grants.result @@ -99,3 +99,11 @@ Acl_users 0 # # End of 10.1 tests # +# +# MDEV-22966 Server crashes or hangs with SET ROLE when started with skip-grant-tables +# +set role x; +ERROR HY000: The MariaDB server is running with the --skip-grant-tables option so it cannot execute this statement +# +# End of 10.2 tests +# diff --git a/mysql-test/t/skip_grants.test b/mysql-test/t/skip_grants.test index f68734f8244..0ecaa022fd4 100644 --- a/mysql-test/t/skip_grants.test +++ b/mysql-test/t/skip_grants.test @@ -128,3 +128,14 @@ show global status like 'Acl%'; --echo # --echo # End of 10.1 tests --echo # + +--echo # +--echo # MDEV-22966 Server crashes or hangs with SET ROLE when started with skip-grant-tables +--echo # + +--error ER_OPTION_PREVENTS_STATEMENT +set role x; + +--echo # +--echo # End of 10.2 tests +--echo # diff --git a/sql/sql_acl.cc b/sql/sql_acl.cc index 6aa53e50bae..f1034986f22 100644 --- a/sql/sql_acl.cc +++ b/sql/sql_acl.cc @@ -2770,6 +2770,12 @@ end: int acl_check_setrole(THD *thd, char *rolename, ulonglong *access) { + if (!initialized) + { + my_error(ER_OPTION_PREVENTS_STATEMENT, MYF(0), "--skip-grant-tables"); + return 1; + } + return check_user_can_set_role(thd, thd->security_ctx->priv_user, thd->security_ctx->host, thd->security_ctx->ip, rolename, access); } From 22b171d3044675481c03b83888cffa018a502c4e Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 9 Jan 2021 17:56:33 +0100 Subject: [PATCH 055/150] MDEV-17852 Altered connection limits for user have no effect update mqh in struct user_conn after taking it from the cache --- mysql-test/r/user_limits.result | 27 +++++++++++++++++++++++++++ mysql-test/t/user_limits.test | 26 ++++++++++++++++++++++++++ sql/sql_connect.cc | 2 +- 3 files changed, 54 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/user_limits.result b/mysql-test/r/user_limits.result index bbf73d7c617..8658cc1497d 100644 --- a/mysql-test/r/user_limits.result +++ b/mysql-test/r/user_limits.result @@ -186,3 +186,30 @@ connection default; drop user mysqltest_1@localhost; drop table t1; set global max_user_connections= @my_max_user_connections; +# +# End of 10.1 tests +# +# +# MDEV-17852 Altered connection limits for user have no effect +# +create user foo@'%' with max_user_connections 1; +connect con1,localhost,foo; +select current_user(); +current_user() +foo@% +connect(localhost,foo,,test,MYSQL_PORT,MYSQL_SOCK); +connect con2,localhost,foo; +ERROR 42000: User 'foo' has exceeded the 'max_user_connections' resource (current value: 1) +connection default; +alter user foo with max_user_connections 2; +connect con3,localhost,foo; +select current_user(); +current_user() +foo@% +disconnect con3; +disconnect con1; +connection default; +drop user foo@'%'; +# +# End of 10.2 tests +# diff --git a/mysql-test/t/user_limits.test b/mysql-test/t/user_limits.test index ebb4fd4fb88..36524febd8d 100644 --- a/mysql-test/t/user_limits.test +++ b/mysql-test/t/user_limits.test @@ -216,3 +216,29 @@ drop table t1; --source include/wait_until_count_sessions.inc set global max_user_connections= @my_max_user_connections; + +--echo # +--echo # End of 10.1 tests +--echo # + +--echo # +--echo # MDEV-17852 Altered connection limits for user have no effect +--echo # +create user foo@'%' with max_user_connections 1; +--connect con1,localhost,foo +select current_user(); +--replace_result $MASTER_MYPORT MYSQL_PORT $MASTER_MYSOCK MYSQL_SOCK +--error ER_USER_LIMIT_REACHED +--connect con2,localhost,foo +--connection default +alter user foo with max_user_connections 2; +--connect con3,localhost,foo +select current_user(); +--disconnect con3 +--disconnect con1 +--connection default +drop user foo@'%'; + +--echo # +--echo # End of 10.2 tests +--echo # diff --git a/sql/sql_connect.cc b/sql/sql_connect.cc index aa7a877ed20..ec1bc45433a 100644 --- a/sql/sql_connect.cc +++ b/sql/sql_connect.cc @@ -85,7 +85,6 @@ int get_or_create_user_conn(THD *thd, const char *user, uc->host= uc->user + user_len + 1; uc->len= temp_len; uc->connections= uc->questions= uc->updates= uc->conn_per_hour= 0; - uc->user_resources= *mqh; uc->reset_utime= thd->thr_create_utime; if (my_hash_insert(&hash_user_connections, (uchar*) uc)) { @@ -95,6 +94,7 @@ int get_or_create_user_conn(THD *thd, const char *user, goto end; } } + uc->user_resources= *mqh; thd->user_connect=uc; uc->connections++; end: From 674be2fd8296092f246f2d89bc514f50f65dfa2c Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 9 Jan 2021 18:52:33 +0100 Subject: [PATCH 056/150] MDEV-18428 Memory: If transactional=0 is specified in CREATE TABLE, it is not possible to ALTER TABLE fix "engine does not support TRANSACTIONAL=1" error message to match user input --- mysql-test/r/create.result | 2 +- sql/sql_table.cc | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index 3e5efbe74dd..2f039826209 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -1290,7 +1290,7 @@ drop table if exists t1,t2,t3; # Fix modified for MariaDB: we support this syntax create table t1 (a int) transactional=0; Warnings: -Warning 1478 Table storage engine 'MyISAM' does not support the create option 'TRANSACTIONAL=1' +Warning 1478 Table storage engine 'MyISAM' does not support the create option 'TRANSACTIONAL=0' create table t2 (a int) page_checksum=1; create table t3 (a int) row_format=page; drop table t1,t2,t3; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 57284272316..4b62ccb7d7c 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -4254,7 +4254,8 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, ER_ILLEGAL_HA_CREATE_OPTION, ER_THD(thd, ER_ILLEGAL_HA_CREATE_OPTION), file->engine_name()->str, - "TRANSACTIONAL=1"); + create_info->transactional == HA_CHOICE_YES + ? "TRANSACTIONAL=1" : "TRANSACTIONAL=0"); if (parse_option_list(thd, file->partition_ht(), &create_info->option_struct, &create_info->option_list, From 4568a72ce45207a538d89449ffcff4a84cb3ea33 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 10 Jan 2021 01:31:38 +0100 Subject: [PATCH 057/150] don't do a warning for bad table options in replication slave thread otherwise ALTER TABLE can break replication --- mysql-test/suite/rpl/r/rpl_table_options.result | 9 ++++++--- mysql-test/suite/rpl/t/rpl_table_options.test | 1 + sql/create_options.cc | 5 ++--- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/mysql-test/suite/rpl/r/rpl_table_options.result b/mysql-test/suite/rpl/r/rpl_table_options.result index 87fd8c2b2fb..14af4e390c2 100644 --- a/mysql-test/suite/rpl/r/rpl_table_options.result +++ b/mysql-test/suite/rpl/r/rpl_table_options.result @@ -5,24 +5,27 @@ set storage_engine=example; connection slave; connection master; create table t1 (a int not null) ull=12340; +alter table t1 ull=12350; +Warnings: +Note 1105 EXAMPLE DEBUG: ULL 12340 -> 12350 show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL -) ENGINE=EXAMPLE DEFAULT CHARSET=latin1 `ull`=12340 +) ENGINE=EXAMPLE DEFAULT CHARSET=latin1 `ull`=12350 connection slave; connection slave; show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL -) ENGINE=MyISAM DEFAULT CHARSET=latin1 /* `ull`=12340 */ +) ENGINE=MyISAM DEFAULT CHARSET=latin1 /* `ull`=12350 */ set sql_mode=ignore_bad_table_options; show create table t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL -) ENGINE=MyISAM DEFAULT CHARSET=latin1 `ull`=12340 +) ENGINE=MyISAM DEFAULT CHARSET=latin1 `ull`=12350 connection master; drop table t1; set storage_engine=default; diff --git a/mysql-test/suite/rpl/t/rpl_table_options.test b/mysql-test/suite/rpl/t/rpl_table_options.test index 3f52444a3c7..6dd1c9bd20d 100644 --- a/mysql-test/suite/rpl/t/rpl_table_options.test +++ b/mysql-test/suite/rpl/t/rpl_table_options.test @@ -18,6 +18,7 @@ connection master; # the option is unknown. # create table t1 (a int not null) ull=12340; +alter table t1 ull=12350; show create table t1; sync_slave_with_master; diff --git a/sql/create_options.cc b/sql/create_options.cc index 4049443de2a..63552e60c4b 100644 --- a/sql/create_options.cc +++ b/sql/create_options.cc @@ -97,14 +97,13 @@ static bool report_unknown_option(THD *thd, engine_option_value *val, { DBUG_ENTER("report_unknown_option"); - if (val->parsed || suppress_warning) + if (val->parsed || suppress_warning || thd->slave_thread) { DBUG_PRINT("info", ("parsed => exiting")); DBUG_RETURN(FALSE); } - if (!(thd->variables.sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS) && - !thd->slave_thread) + if (!(thd->variables.sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS)) { my_error(ER_UNKNOWN_OPTION, MYF(0), val->name.str); DBUG_RETURN(TRUE); From 6f707430e5e24aed3720e39de6cf49dc8d18d131 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sat, 9 Jan 2021 18:48:16 +0100 Subject: [PATCH 058/150] cleanup: copy RAII helpers from 10.5, cleanup test --- mysql-test/r/create.result | 16 +++-- mysql-test/t/create.test | 138 ++++++++++++++++++------------------- sql/sql_class.h | 32 +++++++++ sql/unireg.cc | 4 +- 4 files changed, 114 insertions(+), 76 deletions(-) diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index 2f039826209..d0755931bf7 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -1,7 +1,4 @@ call mtr.add_suppression("table or database name 't-1'"); -drop table if exists t1,t2,t3,t4,t5; -drop database if exists mysqltest; -drop view if exists v1; create table t1 (b char(0)); insert into t1 values (""),(null); select * from t1; @@ -2066,10 +2063,21 @@ alter table t1 add key xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx0064 (f64) comment 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'; ERROR HY000: Cannot create table `t1`: index information is too long. Decrease number of indexes or use shorter index names or shorter comments. drop table t1; -End of 5.5 tests +# +# End of 5.5 tests +# +# +# MDEV-4880 Attempt to create a table without columns produces ER_ILLEGAL_HA instead of ER_TABLE_MUST_HAVE_COLUMNS +# create table t1; ERROR 42000: A table must have at least 1 column +# +# MDEV-11231 Server crashes in check_duplicate_key on CREATE TABLE ... SELECT +# create table t1 (i int, j int, key(i), key(i)) as select 1 as i, 2 as j; Warnings: Note 1831 Duplicate index `i_2`. This is deprecated and will be disallowed in a future release drop table t1; +# +# End of 10.0 tests +# diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test index 11f641c6033..03437320ee6 100644 --- a/mysql-test/t/create.test +++ b/mysql-test/t/create.test @@ -5,12 +5,6 @@ call mtr.add_suppression("table or database name 't-1'"); # Check some special create statements. # ---disable_warnings -drop table if exists t1,t2,t3,t4,t5; -drop database if exists mysqltest; -drop view if exists v1; ---enable_warnings - create table t1 (b char(0)); insert into t1 values (""),(null); select * from t1; @@ -29,30 +23,30 @@ drop table t1; # Test of some CREATE TABLE'S that should fail # ---error 1146 +--error ER_NO_SUCH_TABLE create table t2 engine=heap select * from t1; ---error 1146 +--error ER_NO_SUCH_TABLE create table t2 select auto+1 from t1; drop table if exists t1,t2; ---error 1167 +--error ER_WRONG_KEY_COLUMN create table t1 (b char(0) not null, index(b)); ---error 1163 +--error ER_TABLE_CANT_HANDLE_BLOB create table t1 (a int not null,b text) engine=heap; drop table if exists t1; ---error 1075 +--error ER_WRONG_AUTO_KEY create table t1 (ordid int(8) not null auto_increment, ord varchar(50) not null, primary key (ord,ordid)) engine=heap; --- error 1049 +--error ER_BAD_DB_ERROR create table not_existing_database.test (a int); create table `a/a` (a int); show create table `a/a`; create table t1 like `a/a`; drop table `a/a`; drop table `t1`; ---error 1103 +--error ER_WRONG_TABLE_NAME create table `aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa` (aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa int); ---error 1059 +--error ER_TOO_LONG_IDENT create table a (`aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa` int); # @@ -62,17 +56,17 @@ create table t1 (a datetime default now()); drop table t1; create table t1 (a datetime on update now()); drop table t1; ---error 1067 +--error ER_INVALID_DEFAULT create table t1 (a int default 100 auto_increment); ---error 1067 +--error ER_INVALID_DEFAULT create table t1 (a tinyint default 1000); ---error 1067 +--error ER_INVALID_DEFAULT create table t1 (a varchar(5) default 'abcdef'); create table t1 (a varchar(5) default 'abcde'); insert into t1 values(); select * from t1; ---error 1067 +--error ER_INVALID_DEFAULT SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR alter table t1 alter column a set default 'abcdef'; drop table t1; @@ -97,13 +91,13 @@ create table mysqltest.test2$ (a int); drop table mysqltest.test2$; drop database mysqltest; ---error 1103 +--error ER_WRONG_TABLE_NAME create table `` (a int); ---error 1103 +--error ER_WRONG_TABLE_NAME drop table if exists ``; ---error 1166 +--error ER_WRONG_COLUMN_NAME create table t1 (`` int); ---error 1280 +--error ER_WRONG_NAME_FOR_INDEX create table t1 (i int, index `` (i)); # @@ -158,13 +152,13 @@ create table t2 (a int) select * from t1; describe t1; describe t2; drop table if exists t2; ---error 1060 +--error ER_DUP_FIELDNAME create table t2 (a int, a float) select * from t1; drop table if exists t2; ---error 1060 +--error ER_DUP_FIELDNAME create table t2 (a int) select a as b, a+1 as b from t1; drop table if exists t2; ---error 1060 +--error ER_DUP_FIELDNAME create table t2 (b int) select a as b, a+1 as b from t1; drop table if exists t1,t2; @@ -176,7 +170,7 @@ CREATE TABLE t1 (a int not null); INSERT INTO t1 values (1),(2),(1); --error ER_DUP_ENTRY CREATE TABLE t2 (primary key(a)) SELECT * FROM t1; ---error 1146 +--error ER_NO_SUCH_TABLE SELECT * from t2; DROP TABLE t1; DROP TABLE IF EXISTS t2; @@ -202,7 +196,7 @@ SELECT @@storage_engine; CREATE TABLE t1 (a int not null); show create table t1; drop table t1; ---error 1286 +--error ER_UNKNOWN_STORAGE_ENGINE SET SESSION storage_engine="gemini"; SELECT @@storage_engine; CREATE TABLE t1 (a int not null); @@ -216,11 +210,11 @@ drop table t1; # create table t1 ( k1 varchar(2), k2 int, primary key(k1,k2)); insert into t1 values ("a", 1), ("b", 2); ---error 1048 +--error ER_BAD_NULL_ERROR insert into t1 values ("c", NULL); ---error 1048 +--error ER_BAD_NULL_ERROR insert into t1 values (NULL, 3); ---error 1048 +--error ER_BAD_NULL_ERROR insert into t1 values (NULL, NULL); drop table t1; @@ -262,11 +256,11 @@ drop table t1; # "Table truncated when creating another table name with Spaces" # ---error 1103 +--error ER_WRONG_TABLE_NAME create table `t1 `(a int); ---error 1102 +--error ER_WRONG_DB_NAME create database `db1 `; ---error 1166 +--error ER_WRONG_COLUMN_NAME create table t1(`a ` int); # @@ -274,11 +268,11 @@ create table t1(`a ` int); # "Parser permits multiple commas without syntax error" # ---error 1064 +--error ER_PARSE_ERROR create table t1 (a int,); ---error 1064 +--error ER_PARSE_ERROR create table t1 (a int,,b int); ---error 1064 +--error ER_PARSE_ERROR create table t1 (,b int); # @@ -320,13 +314,13 @@ create table t2 like t3; show create table t2; select * from t2; create table t3 like t1; ---error 1050 +--error ER_TABLE_EXISTS_ERROR create table t3 like mysqltest.t3; ---error 1049 +--error ER_BAD_DB_ERROR create table non_existing_database.t1 like t1; --error ER_NO_SUCH_TABLE create table t3 like non_existing_table; ---error 1050 +--error ER_TABLE_EXISTS_ERROR create temporary table t3 like t1; drop table t1, t2, t3; drop table t3; @@ -360,7 +354,7 @@ SELECT @@storage_engine; CREATE TABLE t1 (a int not null); show create table t1; drop table t1; ---error 1286 +--error ER_UNKNOWN_STORAGE_ENGINE SET SESSION storage_engine="gemini"; SELECT @@storage_engine; CREATE TABLE t1 (a int not null); @@ -464,9 +458,9 @@ use test; # Test for Bug 856 'Naming a key "Primary" causes trouble' # ---error 1280 +--error ER_WRONG_NAME_FOR_INDEX create table t1 (a int, index `primary` (a)); ---error 1280 +--error ER_WRONG_NAME_FOR_INDEX create table t1 (a int, index `PRIMARY` (a)); create table t1 (`primary` int, index(`primary`)); @@ -475,9 +469,9 @@ create table t2 (`PRIMARY` int, index(`PRIMARY`)); show create table t2; create table t3 (a int); ---error 1280 +--error ER_WRONG_NAME_FOR_INDEX alter table t3 add index `primary` (a); ---error 1280 +--error ER_WRONG_NAME_FOR_INDEX alter table t3 add index `PRIMARY` (a); create table t4 (`primary` int); @@ -532,11 +526,11 @@ drop table t1; # # Bug#10413: Invalid column name is not rejected # ---error 1103 +--error ER_WRONG_TABLE_NAME create table t1(column.name int); ---error 1103 +--error ER_WRONG_TABLE_NAME create table t1(test.column.name int); ---error 1102 +--error ER_WRONG_DB_NAME create table t1(xyz.t1.name int); create table t1(t1.name int); create table t2(test.t2.name int); @@ -575,7 +569,7 @@ drop table if exists test.t1; create database mysqltest; use mysqltest; create view v1 as select 'foo' from dual; ---error 1347 +--error ER_WRONG_OBJECT create table t1 like v1; drop view v1; drop database mysqltest; @@ -696,7 +690,7 @@ drop table t1, t2; # # Bug #15316 SET value having comma not correctly handled # ---error 1367 +--error ER_ILLEGAL_VALUE_FOR_TYPE create table t1(a set("a,b","c,d") not null); # End of 4.1 tests @@ -894,9 +888,9 @@ INSERT IGNORE INTO t1 (b) VALUES (5); CREATE TABLE IF NOT EXISTS t2 (a INTEGER NOT NULL AUTO_INCREMENT PRIMARY KEY) SELECT a FROM t1; ---error 1062 +--error ER_DUP_ENTRY INSERT INTO t2 SELECT a FROM t1; ---error 1062 +--error ER_DUP_ENTRY INSERT INTO t2 SELECT a FROM t1; DROP TABLE t1, t2; @@ -956,24 +950,24 @@ drop table t1,t2; # Test incorrect database names # ---error 1102 +--error ER_WRONG_DB_NAME CREATE DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; ---error 1102 +--error ER_WRONG_DB_NAME DROP DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; # TODO: enable these tests when RENAME DATABASE is implemented. -# --error 1049 +# --error ER_BAD_DB_ERROR # RENAME DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa TO a; -# --error 1102 +# --error ER_WRONG_DB_NAME # RENAME DATABASE mysqltest TO aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; # create database mysqltest; -# --error 1102 +# --error ER_WRONG_DB_NAME # RENAME DATABASE mysqltest TO aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; # drop database mysqltest; ---error 1102 +--error ER_WRONG_DB_NAME USE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; ---error 1102 +--error ER_WRONG_DB_NAME SHOW CREATE DATABASE aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa; # @@ -1025,11 +1019,11 @@ before insert on имя_таблицы_в_кодировке_утф8_длино select TRIGGER_NAME from information_schema.triggers where trigger_schema='test'; drop trigger имя_триггера_в_кодировке_утф8_длиной_больше_чем_49; ---error 1059 +--error ER_TOO_LONG_IDENT create trigger очень_очень_очень_очень_очень_очень_очень_очень_длинная_строка_66 before insert on имя_таблицы_в_кодировке_утф8_длиной_больше_чем_48 for each row set @a:=1; ---error 1059 +--error ER_TOO_LONG_IDENT drop trigger очень_очень_очень_очень_очень_очень_очень_очень_длинная_строка_66; create procedure имя_процедуры_в_кодировке_утф8_длиной_больше_чем_50() @@ -1038,7 +1032,7 @@ end; select ROUTINE_NAME from information_schema.routines where routine_schema='test'; drop procedure имя_процедуры_в_кодировке_утф8_длиной_больше_чем_50; ---error 1059 +--error ER_TOO_LONG_IDENT create procedure очень_очень_очень_очень_очень_очень_очень_очень_длинная_строка_66() begin end; @@ -1049,7 +1043,7 @@ return 0; select ROUTINE_NAME from information_schema.routines where routine_schema='test'; drop function имя_функции_в_кодировке_утф8_длиной_больше_чем_49; ---error 1059 +--error ER_TOO_LONG_IDENT create function очень_очень_очень_очень_очень_очень_очень_очень_длинная_строка_66() returns int return 0; @@ -1917,16 +1911,22 @@ alter table t1 add key xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx0064 (f64) comment 'yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy'; drop table t1; ---echo End of 5.5 tests +--echo # +--echo # End of 5.5 tests +--echo # -# -# MDEV-4880 Attempt to create a table without columns produces ER_ILLEGAL_HA instead of ER_TABLE_MUST_HAVE_COLUMNS -# +--echo # +--echo # MDEV-4880 Attempt to create a table without columns produces ER_ILLEGAL_HA instead of ER_TABLE_MUST_HAVE_COLUMNS +--echo # --error ER_TABLE_MUST_HAVE_COLUMNS create table t1; -# -# MDEV-11231 Server crashes in check_duplicate_key on CREATE TABLE ... SELECT -# +--echo # +--echo # MDEV-11231 Server crashes in check_duplicate_key on CREATE TABLE ... SELECT +--echo # create table t1 (i int, j int, key(i), key(i)) as select 1 as i, 2 as j; drop table t1; + +--echo # +--echo # End of 10.0 tests +--echo # diff --git a/sql/sql_class.h b/sql/sql_class.h index b68e3553a2d..fe920270542 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -5997,6 +5997,38 @@ class Sql_mode_save sql_mode_t old_mode; // SQL mode saved at construction time. }; +class Abort_on_warning_instant_set +{ + THD *m_thd; + bool m_save_abort_on_warning; +public: + Abort_on_warning_instant_set(THD *thd, bool temporary_value) + :m_thd(thd), m_save_abort_on_warning(thd->abort_on_warning) + { + thd->abort_on_warning= temporary_value; + } + ~Abort_on_warning_instant_set() + { + m_thd->abort_on_warning= m_save_abort_on_warning; + } +}; + +class Check_level_instant_set +{ + THD *m_thd; + enum_check_fields m_check_level; +public: + Check_level_instant_set(THD *thd, enum_check_fields temporary_value) + :m_thd(thd), m_check_level(thd->count_cuted_fields) + { + thd->count_cuted_fields= temporary_value; + } + ~Check_level_instant_set() + { + m_thd->count_cuted_fields= m_check_level; + } +}; + class Switch_to_definer_security_ctx { public: diff --git a/sql/unireg.cc b/sql/unireg.cc index 92949931f77..9c22527581a 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -944,7 +944,7 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options, TABLE table; TABLE_SHARE share; Create_field *field; - enum_check_fields old_count_cuted_fields= thd->count_cuted_fields; + Check_level_instant_set old_count_cuted_fields(thd, CHECK_FIELD_WARN); DBUG_ENTER("make_empty_rec"); /* We need a table to generate columns for default values */ @@ -963,7 +963,6 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options, null_pos= buff; List_iterator it(create_fields); - thd->count_cuted_fields= CHECK_FIELD_WARN; // To find wrong default values while ((field=it++)) { /* regfield don't have to be deleted as it's allocated on THD::mem_root */ @@ -1039,6 +1038,5 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options, *(null_pos + null_count / 8)|= ~(((uchar) 1 << (null_count & 7)) - 1); err: - thd->count_cuted_fields= old_count_cuted_fields; DBUG_RETURN(error); } /* make_empty_rec */ From 0d8bd7cc3ac9b71450f47700320dfd3d67347a88 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 10 Jan 2021 00:57:02 +0100 Subject: [PATCH 059/150] MDEV-18428 Memory: If transactional=0 is specified in CREATE TABLE, it is not possible to ALTER TABLE * be strict in CREATE TABLE, just like in ALTER TABLE, because CREATE TABLE, just like ALTER TABLE, can be rolled back for any engine * but don't auto-convert warnings into errors for engine warnings (handler::create) - this matches ALTER TABLE behavior * and not when creating a default record, these errors are handled specially (and replaced with ER_INVALID_DEFAULT) * always issue a Note when a non-unique key is truncated, because it's not a Warning that can be converted to an Error. Before this commit it was a Note for blobs and a Warning for all other data types. --- mysql-test/r/create.result | 8 ++++++++ mysql-test/r/ctype_utf32.result | 2 +- mysql-test/r/ctype_utf8mb4.result | 6 +++--- mysql-test/r/ctype_utf8mb4_innodb.result | 4 ++-- mysql-test/r/ctype_utf8mb4_myisam.result | 4 ++-- mysql-test/r/mix2_myisam.result | 2 +- mysql-test/r/myisam.result | 15 ++++++++------- mysql-test/r/partition_innodb.result | 2 +- mysql-test/r/table_elim.result | 2 +- .../suite/innodb/r/file_format_defaults.result | 2 +- mysql-test/suite/innodb/r/innodb.result | 6 +++--- .../suite/innodb_zip/r/index_large_prefix.result | 6 +++--- .../innodb_zip/r/prefix_index_liftedlimit.result | 2 +- mysql-test/suite/maria/maria-ucs2.result | 4 ++-- mysql-test/suite/maria/maria.result | 14 +++++++------- mysql-test/suite/maria/maria3.result | 2 +- mysql-test/suite/maria/mrr.result | 4 ++-- mysql-test/suite/rpl/r/rpl_row_utf32.result | 4 ++-- mysql-test/t/create.test | 10 ++++++++++ mysql-test/t/myisam.test | 1 + sql/handler.cc | 1 + sql/sql_table.cc | 10 +++++++--- sql/unireg.cc | 1 + 23 files changed, 69 insertions(+), 43 deletions(-) diff --git a/mysql-test/r/create.result b/mysql-test/r/create.result index d0755931bf7..edbceba7ee5 100644 --- a/mysql-test/r/create.result +++ b/mysql-test/r/create.result @@ -2081,3 +2081,11 @@ drop table t1; # # End of 10.0 tests # +# +# MDEV-18428 Memory: If transactional=0 is specified in CREATE TABLE, it is not possible to ALTER TABLE +# +create table t1 (c int(10) unsigned) engine=memory transactional=0; +ERROR HY000: Table storage engine 'MEMORY' does not support the create option 'TRANSACTIONAL=0' +# +# End of 10.2 tests +# diff --git a/mysql-test/r/ctype_utf32.result b/mysql-test/r/ctype_utf32.result index 7598474e493..584ca12f8c3 100644 --- a/mysql-test/r/ctype_utf32.result +++ b/mysql-test/r/ctype_utf32.result @@ -1306,7 +1306,7 @@ create table t1 (a varchar(334) character set utf32 primary key); ERROR 42000: Specified key was too long; max key length is 1000 bytes create table t1 (a varchar(333) character set utf32, key(a)); Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes insert into t1 values (repeat('a',333)), (repeat('b',333)); flush tables; check table t1; diff --git a/mysql-test/r/ctype_utf8mb4.result b/mysql-test/r/ctype_utf8mb4.result index bdcc07d590e..2762873b9c7 100644 --- a/mysql-test/r/ctype_utf8mb4.result +++ b/mysql-test/r/ctype_utf8mb4.result @@ -1478,7 +1478,7 @@ a varchar(255) NOT NULL default '', KEY a (a) ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE utf8mb4_general_ci; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes insert into t1 values (_utf8mb4 0xe880bd); insert into t1 values (_utf8mb4 0x5b); select hex(a) from t1; @@ -1526,7 +1526,7 @@ Warnings: Note 1051 Unknown table 'test.t1' CREATE TABLE t1(a VARCHAR(255), KEY(a)) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes INSERT INTO t1 VALUES('uuABCDEFGHIGKLMNOPRSTUVWXYZ̈bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'); INSERT INTO t1 VALUES('uu'); check table t1; @@ -2726,7 +2726,7 @@ DEFAULT CHARACTER SET utf8, MODIFY subject varchar(255) CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci, MODIFY p varchar(255) CHARACTER SET utf8; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( diff --git a/mysql-test/r/ctype_utf8mb4_innodb.result b/mysql-test/r/ctype_utf8mb4_innodb.result index c8fe1233b55..e9e54cb86fc 100644 --- a/mysql-test/r/ctype_utf8mb4_innodb.result +++ b/mysql-test/r/ctype_utf8mb4_innodb.result @@ -1438,7 +1438,7 @@ a varchar(255) NOT NULL default '', KEY a (a) ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE utf8mb4_general_ci; Warnings: -Warning 1071 Specified key was too long; max key length is 767 bytes +Note 1071 Specified key was too long; max key length is 767 bytes insert into t1 values (_utf8mb4 0xe880bd); insert into t1 values (_utf8mb4 0x5b); select hex(a) from t1; @@ -1486,7 +1486,7 @@ Warnings: Note 1051 Unknown table 'test.t1' CREATE TABLE t1(a VARCHAR(255), KEY(a)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4; Warnings: -Warning 1071 Specified key was too long; max key length is 767 bytes +Note 1071 Specified key was too long; max key length is 767 bytes INSERT INTO t1 VALUES('uuABCDEFGHIGKLMNOPRSTUVWXYZ̈bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'); INSERT INTO t1 VALUES('uu'); check table t1; diff --git a/mysql-test/r/ctype_utf8mb4_myisam.result b/mysql-test/r/ctype_utf8mb4_myisam.result index c975d805dfa..408b4f9c68b 100644 --- a/mysql-test/r/ctype_utf8mb4_myisam.result +++ b/mysql-test/r/ctype_utf8mb4_myisam.result @@ -1438,7 +1438,7 @@ a varchar(255) NOT NULL default '', KEY a (a) ) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4 COLLATE utf8mb4_general_ci; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes insert into t1 values (_utf8mb4 0xe880bd); insert into t1 values (_utf8mb4 0x5b); select hex(a) from t1; @@ -1486,7 +1486,7 @@ Warnings: Note 1051 Unknown table 'test.t1' CREATE TABLE t1(a VARCHAR(255), KEY(a)) ENGINE=MyISAM DEFAULT CHARSET=utf8mb4; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes INSERT INTO t1 VALUES('uuABCDEFGHIGKLMNOPRSTUVWXYZ̈bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb'); INSERT INTO t1 VALUES('uu'); check table t1; diff --git a/mysql-test/r/mix2_myisam.result b/mysql-test/r/mix2_myisam.result index 34764466d2a..dbc992ece23 100644 --- a/mysql-test/r/mix2_myisam.result +++ b/mysql-test/r/mix2_myisam.result @@ -1989,7 +1989,7 @@ a b drop table t1; create table t1 (v varchar(65530), key(v)); Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes drop table t1; SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR create table t1 (v varchar(65536)); diff --git a/mysql-test/r/myisam.result b/mysql-test/r/myisam.result index dc99dc06b6d..1a0fb5f0c04 100644 --- a/mysql-test/r/myisam.result +++ b/mysql-test/r/myisam.result @@ -1695,7 +1695,7 @@ a b drop table t1; create table t1 (v varchar(65530), key(v)); Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes drop table if exists t1; set statement sql_mode = 'NO_ENGINE_SUBSTITUTION' for create table t1 (v varchar(65536)); @@ -1968,7 +1968,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a varchar(2048), key `a` (a)); Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -1978,7 +1978,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a varchar(2048), key `a` (a) key_block_size=1024); Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -1988,7 +1988,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a int not null, b varchar(2048), key (a), key(b)) key_block_size=1024; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -2034,7 +2034,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a int not null, b varchar(2048), key (a), key(b)) key_block_size=8192; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -2046,7 +2046,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a int not null, b varchar(2048), key (a) key_block_size=1024, key(b)) key_block_size=8192; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -2076,7 +2076,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a varchar(2048), key `a` (a) key_block_size=1000000000000000000); Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -2322,6 +2322,7 @@ Key Start Len Index Type 1 2 30 multip. varchar 2 33 30 multip. char NULL DROP TABLE t1; +set statement sql_mode='' for create table t1 (n int not null, c char(1)) transactional=1; Warnings: Warning 1478 Table storage engine 'MyISAM' does not support the create option 'TRANSACTIONAL=1' diff --git a/mysql-test/r/partition_innodb.result b/mysql-test/r/partition_innodb.result index 6770b64552f..a4405882189 100644 --- a/mysql-test/r/partition_innodb.result +++ b/mysql-test/r/partition_innodb.result @@ -853,7 +853,7 @@ PARTITION p0 VALUES IN (1,3,9,null), PARTITION p1 VALUES IN (2,4,0) ); Warnings: -Warning 1071 Specified key was too long; max key length is 767 bytes +Note 1071 Specified key was too long; max key length is 767 bytes INSERT INTO t2 VALUES ('k','s',3,'b','j'),('a','b',NULL,'v','j'),('c','m',9,'t',NULL), ('b','l',9,'b',NULL),('i','y',3,'o','w'),('c','m',NULL,'a','m'), diff --git a/mysql-test/r/table_elim.result b/mysql-test/r/table_elim.result index 764a1b2780e..8e4210c06fb 100644 --- a/mysql-test/r/table_elim.result +++ b/mysql-test/r/table_elim.result @@ -544,7 +544,7 @@ drop table t0,t1,t2,t3,t4,t5,t6; CREATE TABLE t1 (f1 int(11), PRIMARY KEY (f1)) ; CREATE TABLE t2 (f4 varchar(1024), KEY (f4)) ; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes INSERT IGNORE INTO t2 VALUES ('xcddwntkbxyorzdv'), ('cnxxcddwntkbxyor'),('r'),('r'), ('did'),('I'),('when'), ('hczkfqjeggivdvac'),('e'),('okay'),('up'); diff --git a/mysql-test/suite/innodb/r/file_format_defaults.result b/mysql-test/suite/innodb/r/file_format_defaults.result index 5ea34ceb7b5..2be4e375eb2 100644 --- a/mysql-test/suite/innodb/r/file_format_defaults.result +++ b/mysql-test/suite/innodb/r/file_format_defaults.result @@ -12,7 +12,7 @@ Warning 131 Using innodb_large_prefix is deprecated and the parameter may be rem SET SQL_MODE=strict_all_tables; CREATE TABLE tab0 (c1 VARCHAR(65530), KEY(c1(3073))) ENGINE=InnoDB ROW_FORMAT=COMPRESSED; Warnings: -Warning 1071 Specified key was too long; max key length is 3072 bytes +Note 1071 Specified key was too long; max key length is 3072 bytes SHOW CREATE TABLE tab0; Table Create Table tab0 CREATE TABLE `tab0` ( diff --git a/mysql-test/suite/innodb/r/innodb.result b/mysql-test/suite/innodb/r/innodb.result index 921f9880d47..ace226781c0 100644 --- a/mysql-test/suite/innodb/r/innodb.result +++ b/mysql-test/suite/innodb/r/innodb.result @@ -2302,7 +2302,7 @@ Warnings: Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See https://mariadb.com/kb/en/library/xtradbinnodb-file-format/ create table t1 (v varchar(65530), key(v)); Warnings: -Warning 1071 Specified key was too long; max key length is 767 bytes +Note 1071 Specified key was too long; max key length is 767 bytes SET GLOBAL innodb_large_prefix=default; Warnings: Warning 131 Using innodb_large_prefix is deprecated and the parameter may be removed in future releases. See https://mariadb.com/kb/en/library/xtradbinnodb-file-format/ @@ -2477,11 +2477,11 @@ Warning 131 Using innodb_large_prefix is deprecated and the parameter may be rem create table t1 (col1 varchar(768), index(col1)) character set = latin1 engine = innodb; Warnings: -Warning 1071 Specified key was too long; max key length is 767 bytes +Note 1071 Specified key was too long; max key length is 767 bytes create table t2 (col1 varbinary(768), index(col1)) character set = latin1 engine = innodb; Warnings: -Warning 1071 Specified key was too long; max key length is 767 bytes +Note 1071 Specified key was too long; max key length is 767 bytes create table t3 (col1 text, index(col1(768))) character set = latin1 engine = innodb; Warnings: diff --git a/mysql-test/suite/innodb_zip/r/index_large_prefix.result b/mysql-test/suite/innodb_zip/r/index_large_prefix.result index fe03586546a..d125df09ae3 100644 --- a/mysql-test/suite/innodb_zip/r/index_large_prefix.result +++ b/mysql-test/suite/innodb_zip/r/index_large_prefix.result @@ -407,10 +407,10 @@ ROW_FORMAT=DYNAMIC; SET sql_mode=''; create index idx1 on worklog5743(a2); Warnings: -Warning 1071 Specified key was too long; max key length is 3072 bytes +Note 1071 Specified key was too long; max key length is 3072 bytes create index idx2 on worklog5743(a3); Warnings: -Warning 1071 Specified key was too long; max key length is 3072 bytes +Note 1071 Specified key was too long; max key length is 3072 bytes create index idx3 on worklog5743(a4); show warnings; Level Code Message @@ -419,7 +419,7 @@ create index idx4 on worklog5743(a1, a2); ERROR 42000: Specified key was too long; max key length is 3072 bytes show warnings; Level Code Message -Error 1071 Specified key was too long; max key length is 3072 bytes +Note 1071 Specified key was too long; max key length is 3072 bytes Error 1071 Specified key was too long; max key length is 3072 bytes create index idx5 on worklog5743(a1, a5); ERROR 42000: Specified key was too long; max key length is 3072 bytes diff --git a/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result b/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result index 6cb0b09dee0..a5da38e24db 100644 --- a/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result +++ b/mysql-test/suite/innodb_zip/r/prefix_index_liftedlimit.result @@ -1221,7 +1221,7 @@ DROP INDEX prefix_idx ON worklog5743; SET sql_mode = 'NO_ENGINE_SUBSTITUTION'; CREATE INDEX prefix_idx ON worklog5743(col_1_varbinary (4000)); Warnings: -Warning 1071 Specified key was too long; max key length is 3072 bytes +Note 1071 Specified key was too long; max key length is 3072 bytes SET sql_mode = default; INSERT INTO worklog5743 VALUES(REPEAT("a", 4000),REPEAT("o", 4000)); SELECT col_1_varbinary = REPEAT("a", 4000) FROM worklog5743; diff --git a/mysql-test/suite/maria/maria-ucs2.result b/mysql-test/suite/maria/maria-ucs2.result index 7499b37279b..73c45759373 100644 --- a/mysql-test/suite/maria/maria-ucs2.result +++ b/mysql-test/suite/maria/maria-ucs2.result @@ -17,7 +17,7 @@ test.t1 check status OK SET STATEMENT sql_mode = 'NO_ENGINE_SUBSTITUTION' FOR ALTER TABLE t1 MODIFY a VARCHAR(800) CHARSET `ucs2`; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes CHECK TABLE t1; Table Op Msg_type Msg_text test.t1 check status OK @@ -30,7 +30,7 @@ t1 CREATE TABLE `t1` ( DROP TABLE t1; CREATE TABLE t1 (a VARCHAR(800),KEY(a)) ENGINE=Aria CHARACTER SET ucs2; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes INSERT INTO t1 VALUES (REPEAT('abc ',200)); CHECK TABLE t1; Table Op Msg_type Msg_text diff --git a/mysql-test/suite/maria/maria.result b/mysql-test/suite/maria/maria.result index 35ec098f7a8..1e85ee8b50c 100644 --- a/mysql-test/suite/maria/maria.result +++ b/mysql-test/suite/maria/maria.result @@ -1585,7 +1585,7 @@ a b drop table t1; create table t1 (v varchar(65530), key(v)); Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes drop table if exists t1; set statement sql_mode = 'NO_ENGINE_SUBSTITUTION' for create table t1 (v varchar(65536)); @@ -1855,7 +1855,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a varchar(2048), key `a` (a)); Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -1865,7 +1865,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a varchar(2048), key `a` (a) key_block_size=1024); Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -1875,7 +1875,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a int not null, b varchar(2048), key (a), key(b)) key_block_size=1024; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -1921,7 +1921,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a int not null, b varchar(2048), key (a), key(b)) key_block_size=8192; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -1933,7 +1933,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a int not null, b varchar(2048), key (a) key_block_size=1024, key(b)) key_block_size=8192; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -1963,7 +1963,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a varchar(2048), key `a` (a) key_block_size=1000000000000000000); Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( diff --git a/mysql-test/suite/maria/maria3.result b/mysql-test/suite/maria/maria3.result index e923039a07f..6005bedf7bb 100644 --- a/mysql-test/suite/maria/maria3.result +++ b/mysql-test/suite/maria/maria3.result @@ -17,7 +17,7 @@ t1 CREATE TABLE `t1` ( drop table t1; create table t1 (a varchar(2048), key `a` (a) key_block_size=1000000000000000000); Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes show create table t1; Table Create Table t1 CREATE TABLE `t1` ( diff --git a/mysql-test/suite/maria/mrr.result b/mysql-test/suite/maria/mrr.result index 06be64566e5..5f9fcb4f937 100644 --- a/mysql-test/suite/maria/mrr.result +++ b/mysql-test/suite/maria/mrr.result @@ -393,7 +393,7 @@ PRIMARY KEY (pk), KEY col_varchar_1024_latin1_key (col_varchar_1024_latin1_key) ) ENGINE=Aria; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes INSERT INTO t1 VALUES (1,'z'), (2,'abcdefjhjkl'), (3,'in'), (4,'abcdefjhjkl'), (6,'abcdefjhjkl'), (11,'zx'), (12,'abcdefjhjm'), (13,'jn'), (14,'abcdefjhjp'), (16,'abcdefjhjr'); @@ -430,7 +430,7 @@ f5 varchar(1024) COLLATE latin1_bin, KEY (f5) ) ENGINE=Aria TRANSACTIONAL=0 ; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes # Fill the table with some data SELECT alias2.* , alias1.f2 FROM diff --git a/mysql-test/suite/rpl/r/rpl_row_utf32.result b/mysql-test/suite/rpl/r/rpl_row_utf32.result index af6e709860e..6d177b7cda0 100644 --- a/mysql-test/suite/rpl/r/rpl_row_utf32.result +++ b/mysql-test/suite/rpl/r/rpl_row_utf32.result @@ -3,7 +3,7 @@ include/master-slave.inc SET SQL_LOG_BIN=0; CREATE TABLE t1 (c1 char(255) DEFAULT NULL, KEY c1 (c1)) DEFAULT CHARSET=utf32; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes SET SQL_LOG_BIN=1; connection slave; SET @saved_slave_type_conversions= @@global.slave_type_conversions; @@ -13,7 +13,7 @@ include/start_slave.inc SET SQL_LOG_BIN=0; CREATE TABLE t1 ( c1 varchar(255) DEFAULT NULL, KEY c1 (c1)) DEFAULT CHARSET=utf32; Warnings: -Warning 1071 Specified key was too long; max key length is 1000 bytes +Note 1071 Specified key was too long; max key length is 1000 bytes SET SQL_LOG_BIN=1; connection master; INSERT INTO t1(c1) VALUES ('insert into t1'); diff --git a/mysql-test/t/create.test b/mysql-test/t/create.test index 03437320ee6..396de147ec2 100644 --- a/mysql-test/t/create.test +++ b/mysql-test/t/create.test @@ -1930,3 +1930,13 @@ drop table t1; --echo # --echo # End of 10.0 tests --echo # + +--echo # +--echo # MDEV-18428 Memory: If transactional=0 is specified in CREATE TABLE, it is not possible to ALTER TABLE +--echo # +--error ER_ILLEGAL_HA_CREATE_OPTION +create table t1 (c int(10) unsigned) engine=memory transactional=0; + +--echo # +--echo # End of 10.2 tests +--echo # diff --git a/mysql-test/t/myisam.test b/mysql-test/t/myisam.test index 217789fccb2..d6efdea982f 100644 --- a/mysql-test/t/myisam.test +++ b/mysql-test/t/myisam.test @@ -1539,6 +1539,7 @@ DROP TABLE t1; # MariaDB: Note that the table will still have 'TRANSACTIONAL=1' attribute. # That's the intended behavior atm. # +set statement sql_mode='' for create table t1 (n int not null, c char(1)) transactional=1; show create table t1; drop table t1; diff --git a/sql/handler.cc b/sql/handler.cc index 29b01763e8b..87592beb5d3 100644 --- a/sql/handler.cc +++ b/sql/handler.cc @@ -4847,6 +4847,7 @@ int ha_create_table(THD *thd, const char *path, char name_buff[FN_REFLEN]; const char *name; TABLE_SHARE share; + Abort_on_warning_instant_set old_abort_on_warning(thd, 0); bool temp_table __attribute__((unused)) = create_info->options & (HA_LEX_CREATE_TMP_TABLE | HA_CREATE_TMP_ALTER); diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 4b62ccb7d7c..cb28c6adcec 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -3983,8 +3983,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, { /* not a critical problem */ push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, - ER_TOO_LONG_KEY, - ER_THD(thd, ER_TOO_LONG_KEY), + ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY), key_part_length); /* Align key length to multibyte char boundary */ key_part_length-= key_part_length % sql_field->charset->mbmaxlen; @@ -4030,7 +4029,7 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, if (key->type == Key::MULTIPLE) { /* not a critical problem */ - push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, + push_warning_printf(thd, Sql_condition::WARN_LEVEL_NOTE, ER_TOO_LONG_KEY, ER_THD(thd, ER_TOO_LONG_KEY), key_part_length); /* Align key length to multibyte char boundary */ @@ -5133,6 +5132,9 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table, if (!opt_explicit_defaults_for_timestamp) promote_first_timestamp_column(&alter_info->create_list); + /* We can abort create table for any table type */ + thd->abort_on_warning= thd->is_strict_mode(); + if (mysql_create_table_no_lock(thd, db, table_name, create_info, alter_info, &is_trans, create_table_mode) > 0) { @@ -5165,6 +5167,8 @@ bool mysql_create_table(THD *thd, TABLE_LIST *create_table, } err: + thd->abort_on_warning= 0; + /* In RBR we don't need to log CREATE TEMPORARY TABLE */ if (!result && thd->is_current_stmt_binlog_format_row() && create_info->tmp_table()) DBUG_RETURN(result); diff --git a/sql/unireg.cc b/sql/unireg.cc index 9c22527581a..7974255af35 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -945,6 +945,7 @@ static bool make_empty_rec(THD *thd, uchar *buff, uint table_options, TABLE_SHARE share; Create_field *field; Check_level_instant_set old_count_cuted_fields(thd, CHECK_FIELD_WARN); + Abort_on_warning_instant_set old_abort_on_warning(thd, 0); DBUG_ENTER("make_empty_rec"); /* We need a table to generate columns for default values */ From ad9a140d9b210758b354a997572ff86e884954fc Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 10 Jan 2021 20:35:27 +0100 Subject: [PATCH 060/150] MDEV-14884 Failed to enable encryption of temporary files in mariadb 10.3.3 enable the encryption (and abort on failure) after printing --help, not before --- sql/mysqld.cc | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index d3e124e3405..8d67bc53164 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5439,12 +5439,12 @@ static int init_server_components() } } - if (init_io_cache_encryption()) - unireg_abort(1); - if (opt_abort) unireg_abort(0); + if (init_io_cache_encryption()) + unireg_abort(1); + /* if the errmsg.sys is not loaded, terminate to maintain behaviour */ if (!DEFAULT_ERRMSGS[0][0]) unireg_abort(1); From a216672dab202207a21f9d2ffdc4b00eb958060f Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 10 Jan 2021 21:20:51 +0100 Subject: [PATCH 061/150] MDEV-16341 Wrong length for USER columns in performance_schema tables use USERNAME_CHAR_LENGTH and HOSTNAME_LENGTH for perfschema USER and HOST columns --- include/mysql_com.h | 2 +- mysql-test/suite/perfschema/r/schema.result | 22 +++++++++---------- .../suite/perfschema/r/table_schema.result | 22 +++++++++---------- storage/perfschema/table_accounts.cc | 4 ++-- .../table_esgs_by_account_by_event_name.cc | 4 ++-- .../table_esgs_by_host_by_event_name.cc | 2 +- .../table_esgs_by_user_by_event_name.cc | 2 +- .../table_esms_by_account_by_event_name.cc | 4 ++-- .../table_esms_by_host_by_event_name.cc | 2 +- .../table_esms_by_user_by_event_name.cc | 2 +- .../table_ews_by_account_by_event_name.cc | 4 ++-- .../table_ews_by_host_by_event_name.cc | 2 +- .../table_ews_by_user_by_event_name.cc | 2 +- storage/perfschema/table_hosts.cc | 2 +- storage/perfschema/table_setup_actors.cc | 6 ++--- storage/perfschema/table_threads.cc | 4 ++-- storage/perfschema/table_users.cc | 2 +- 17 files changed, 44 insertions(+), 44 deletions(-) diff --git a/include/mysql_com.h b/include/mysql_com.h index a63bcc9d87e..398bf9058df 100644 --- a/include/mysql_com.h +++ b/include/mysql_com.h @@ -26,7 +26,7 @@ #define HOSTNAME_LENGTH 60 #define SYSTEM_CHARSET_MBMAXLEN 3 #define NAME_CHAR_LEN 64U /* Field/table name length */ -#define USERNAME_CHAR_LENGTH 128U +#define USERNAME_CHAR_LENGTH 128 #define NAME_LEN (NAME_CHAR_LEN*SYSTEM_CHARSET_MBMAXLEN) #define USERNAME_LENGTH (USERNAME_CHAR_LENGTH*SYSTEM_CHARSET_MBMAXLEN) #define DEFINER_CHAR_LENGTH (USERNAME_CHAR_LENGTH + HOSTNAME_LENGTH + 1) diff --git a/mysql-test/suite/perfschema/r/schema.result b/mysql-test/suite/perfschema/r/schema.result index 1f331394df6..8ce4cad4f4b 100644 --- a/mysql-test/suite/perfschema/r/schema.result +++ b/mysql-test/suite/perfschema/r/schema.result @@ -62,7 +62,7 @@ users show create table accounts; Table Create Table accounts CREATE TABLE `accounts` ( - `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `CURRENT_CONNECTIONS` bigint(20) NOT NULL, `TOTAL_CONNECTIONS` bigint(20) NOT NULL @@ -140,7 +140,7 @@ events_stages_summary_by_thread_by_event_name CREATE TABLE `events_stages_summar show create table events_stages_summary_by_user_by_event_name; Table Create Table events_stages_summary_by_user_by_event_name CREATE TABLE `events_stages_summary_by_user_by_event_name` ( - `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `EVENT_NAME` varchar(128) NOT NULL, `COUNT_STAR` bigint(20) unsigned NOT NULL, `SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL, @@ -151,7 +151,7 @@ events_stages_summary_by_user_by_event_name CREATE TABLE `events_stages_summary_ show create table events_stages_summary_by_account_by_event_name; Table Create Table events_stages_summary_by_account_by_event_name CREATE TABLE `events_stages_summary_by_account_by_event_name` ( - `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `EVENT_NAME` varchar(128) NOT NULL, `COUNT_STAR` bigint(20) unsigned NOT NULL, @@ -398,7 +398,7 @@ events_statements_summary_by_thread_by_event_name CREATE TABLE `events_statement show create table events_statements_summary_by_user_by_event_name; Table Create Table events_statements_summary_by_user_by_event_name CREATE TABLE `events_statements_summary_by_user_by_event_name` ( - `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `EVENT_NAME` varchar(128) NOT NULL, `COUNT_STAR` bigint(20) unsigned NOT NULL, `SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL, @@ -428,7 +428,7 @@ events_statements_summary_by_user_by_event_name CREATE TABLE `events_statements_ show create table events_statements_summary_by_account_by_event_name; Table Create Table events_statements_summary_by_account_by_event_name CREATE TABLE `events_statements_summary_by_account_by_event_name` ( - `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `EVENT_NAME` varchar(128) NOT NULL, `COUNT_STAR` bigint(20) unsigned NOT NULL, @@ -590,7 +590,7 @@ events_waits_summary_by_thread_by_event_name CREATE TABLE `events_waits_summary_ show create table events_waits_summary_by_user_by_event_name; Table Create Table events_waits_summary_by_user_by_event_name CREATE TABLE `events_waits_summary_by_user_by_event_name` ( - `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `EVENT_NAME` varchar(128) NOT NULL, `COUNT_STAR` bigint(20) unsigned NOT NULL, `SUM_TIMER_WAIT` bigint(20) unsigned NOT NULL, @@ -601,7 +601,7 @@ events_waits_summary_by_user_by_event_name CREATE TABLE `events_waits_summary_by show create table events_waits_summary_by_account_by_event_name; Table Create Table events_waits_summary_by_account_by_event_name CREATE TABLE `events_waits_summary_by_account_by_event_name` ( - `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `EVENT_NAME` varchar(128) NOT NULL, `COUNT_STAR` bigint(20) unsigned NOT NULL, @@ -762,8 +762,8 @@ show create table setup_actors; Table Create Table setup_actors CREATE TABLE `setup_actors` ( `HOST` char(60) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%', - `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%', - `ROLE` char(16) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%' + `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%', + `ROLE` char(128) CHARACTER SET utf8 COLLATE utf8_bin NOT NULL DEFAULT '%' ) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8 show create table setup_consumers; Table Create Table @@ -1028,7 +1028,7 @@ threads CREATE TABLE `threads` ( `NAME` varchar(128) NOT NULL, `TYPE` varchar(10) NOT NULL, `PROCESSLIST_ID` bigint(20) unsigned DEFAULT NULL, - `PROCESSLIST_USER` varchar(16) DEFAULT NULL, + `PROCESSLIST_USER` varchar(128) DEFAULT NULL, `PROCESSLIST_HOST` varchar(60) DEFAULT NULL, `PROCESSLIST_DB` varchar(64) DEFAULT NULL, `PROCESSLIST_COMMAND` varchar(16) DEFAULT NULL, @@ -1042,7 +1042,7 @@ threads CREATE TABLE `threads` ( show create table users; Table Create Table users CREATE TABLE `users` ( - `USER` char(16) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `USER` char(128) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `CURRENT_CONNECTIONS` bigint(20) NOT NULL, `TOTAL_CONNECTIONS` bigint(20) NOT NULL ) ENGINE=PERFORMANCE_SCHEMA DEFAULT CHARSET=utf8 diff --git a/mysql-test/suite/perfschema/r/table_schema.result b/mysql-test/suite/perfschema/r/table_schema.result index 5c4cf88e9a5..a9beee31f28 100644 --- a/mysql-test/suite/perfschema/r/table_schema.result +++ b/mysql-test/suite/perfschema/r/table_schema.result @@ -1,7 +1,7 @@ select * from information_schema.columns where table_schema="performance_schema" order by table_name, ordinal_position; TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAULT IS_NULLABLE DATA_TYPE CHARACTER_MAXIMUM_LENGTH CHARACTER_OCTET_LENGTH NUMERIC_PRECISION NUMERIC_SCALE DATETIME_PRECISION CHARACTER_SET_NAME COLLATION_NAME COLUMN_TYPE COLUMN_KEY EXTRA PRIVILEGES COLUMN_COMMENT IS_GENERATED GENERATION_EXPRESSION -def performance_schema accounts USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL +def performance_schema accounts USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL def performance_schema accounts HOST 2 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL def performance_schema accounts CURRENT_CONNECTIONS 3 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL def performance_schema accounts TOTAL_CONNECTIONS 4 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL @@ -37,7 +37,7 @@ def performance_schema events_stages_history_long TIMER_END 7 NULL YES bigint NU def performance_schema events_stages_history_long TIMER_WAIT 8 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_stages_history_long NESTING_EVENT_ID 9 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_stages_history_long NESTING_EVENT_TYPE 10 NULL YES enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('STATEMENT','STAGE','WAIT') select,insert,update,references NEVER NULL -def performance_schema events_stages_summary_by_account_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL +def performance_schema events_stages_summary_by_account_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL def performance_schema events_stages_summary_by_account_by_event_name HOST 2 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL def performance_schema events_stages_summary_by_account_by_event_name EVENT_NAME 3 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL def performance_schema events_stages_summary_by_account_by_event_name COUNT_STAR 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL @@ -59,7 +59,7 @@ def performance_schema events_stages_summary_by_thread_by_event_name SUM_TIMER_W def performance_schema events_stages_summary_by_thread_by_event_name MIN_TIMER_WAIT 5 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_stages_summary_by_thread_by_event_name AVG_TIMER_WAIT 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_stages_summary_by_thread_by_event_name MAX_TIMER_WAIT 7 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL -def performance_schema events_stages_summary_by_user_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL +def performance_schema events_stages_summary_by_user_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL def performance_schema events_stages_summary_by_user_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL def performance_schema events_stages_summary_by_user_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_stages_summary_by_user_by_event_name SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL @@ -192,7 +192,7 @@ def performance_schema events_statements_history_long NO_INDEX_USED 37 NULL NO b def performance_schema events_statements_history_long NO_GOOD_INDEX_USED 38 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_statements_history_long NESTING_EVENT_ID 39 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_statements_history_long NESTING_EVENT_TYPE 40 NULL YES enum 9 27 NULL NULL NULL utf8 utf8_general_ci enum('STATEMENT','STAGE','WAIT') select,insert,update,references NEVER NULL -def performance_schema events_statements_summary_by_account_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL +def performance_schema events_statements_summary_by_account_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL def performance_schema events_statements_summary_by_account_by_event_name HOST 2 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL def performance_schema events_statements_summary_by_account_by_event_name EVENT_NAME 3 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL def performance_schema events_statements_summary_by_account_by_event_name COUNT_STAR 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL @@ -300,7 +300,7 @@ def performance_schema events_statements_summary_by_thread_by_event_name SUM_SOR def performance_schema events_statements_summary_by_thread_by_event_name SUM_SORT_SCAN 24 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_statements_summary_by_thread_by_event_name SUM_NO_INDEX_USED 25 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_statements_summary_by_thread_by_event_name SUM_NO_GOOD_INDEX_USED 26 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL -def performance_schema events_statements_summary_by_user_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL +def performance_schema events_statements_summary_by_user_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL def performance_schema events_statements_summary_by_user_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL def performance_schema events_statements_summary_by_user_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_statements_summary_by_user_by_event_name SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL @@ -408,7 +408,7 @@ def performance_schema events_waits_history_long NESTING_EVENT_TYPE 16 NULL YES def performance_schema events_waits_history_long OPERATION 17 NULL NO varchar 32 96 NULL NULL NULL utf8 utf8_general_ci varchar(32) select,insert,update,references NEVER NULL def performance_schema events_waits_history_long NUMBER_OF_BYTES 18 NULL YES bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL def performance_schema events_waits_history_long FLAGS 19 NULL YES int NULL NULL 10 0 NULL NULL NULL int(10) unsigned select,insert,update,references NEVER NULL -def performance_schema events_waits_summary_by_account_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL +def performance_schema events_waits_summary_by_account_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL def performance_schema events_waits_summary_by_account_by_event_name HOST 2 NULL YES char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL def performance_schema events_waits_summary_by_account_by_event_name EVENT_NAME 3 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL def performance_schema events_waits_summary_by_account_by_event_name COUNT_STAR 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL @@ -437,7 +437,7 @@ def performance_schema events_waits_summary_by_thread_by_event_name SUM_TIMER_WA def performance_schema events_waits_summary_by_thread_by_event_name MIN_TIMER_WAIT 5 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_waits_summary_by_thread_by_event_name AVG_TIMER_WAIT 6 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_waits_summary_by_thread_by_event_name MAX_TIMER_WAIT 7 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL -def performance_schema events_waits_summary_by_user_by_event_name USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL +def performance_schema events_waits_summary_by_user_by_event_name USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL def performance_schema events_waits_summary_by_user_by_event_name EVENT_NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL def performance_schema events_waits_summary_by_user_by_event_name COUNT_STAR 3 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema events_waits_summary_by_user_by_event_name SUM_TIMER_WAIT 4 NULL NO bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL @@ -561,8 +561,8 @@ def performance_schema session_connect_attrs ATTR_NAME 2 NULL NO varchar 32 96 N def performance_schema session_connect_attrs ATTR_VALUE 3 NULL YES varchar 1024 3072 NULL NULL NULL utf8 utf8_bin varchar(1024) select,insert,update,references NEVER NULL def performance_schema session_connect_attrs ORDINAL_POSITION 4 NULL YES int NULL NULL 10 0 NULL NULL NULL int(11) select,insert,update,references NEVER NULL def performance_schema setup_actors HOST 1 '%' NO char 60 180 NULL NULL NULL utf8 utf8_bin char(60) select,insert,update,references NEVER NULL -def performance_schema setup_actors USER 2 '%' NO char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL -def performance_schema setup_actors ROLE 3 '%' NO char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL +def performance_schema setup_actors USER 2 '%' NO char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL +def performance_schema setup_actors ROLE 3 '%' NO char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL def performance_schema setup_consumers NAME 1 NULL NO varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references NEVER NULL def performance_schema setup_consumers ENABLED 2 NULL NO enum 3 9 NULL NULL NULL utf8 utf8_general_ci enum('YES','NO') select,insert,update,references NEVER NULL def performance_schema setup_instruments NAME 1 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL @@ -783,7 +783,7 @@ def performance_schema threads THREAD_ID 1 NULL NO bigint NULL NULL 20 0 NULL NU def performance_schema threads NAME 2 NULL NO varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL def performance_schema threads TYPE 3 NULL NO varchar 10 30 NULL NULL NULL utf8 utf8_general_ci varchar(10) select,insert,update,references NEVER NULL def performance_schema threads PROCESSLIST_ID 4 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL -def performance_schema threads PROCESSLIST_USER 5 NULL YES varchar 16 48 NULL NULL NULL utf8 utf8_general_ci varchar(16) select,insert,update,references NEVER NULL +def performance_schema threads PROCESSLIST_USER 5 NULL YES varchar 128 384 NULL NULL NULL utf8 utf8_general_ci varchar(128) select,insert,update,references NEVER NULL def performance_schema threads PROCESSLIST_HOST 6 NULL YES varchar 60 180 NULL NULL NULL utf8 utf8_general_ci varchar(60) select,insert,update,references NEVER NULL def performance_schema threads PROCESSLIST_DB 7 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references NEVER NULL def performance_schema threads PROCESSLIST_COMMAND 8 NULL YES varchar 16 48 NULL NULL NULL utf8 utf8_general_ci varchar(16) select,insert,update,references NEVER NULL @@ -793,7 +793,7 @@ def performance_schema threads PROCESSLIST_INFO 11 NULL YES longtext 4294967295 def performance_schema threads PARENT_THREAD_ID 12 NULL YES bigint NULL NULL 20 0 NULL NULL NULL bigint(20) unsigned select,insert,update,references NEVER NULL def performance_schema threads ROLE 13 NULL YES varchar 64 192 NULL NULL NULL utf8 utf8_general_ci varchar(64) select,insert,update,references NEVER NULL def performance_schema threads INSTRUMENTED 14 NULL NO enum 3 9 NULL NULL NULL utf8 utf8_general_ci enum('YES','NO') select,insert,update,references NEVER NULL -def performance_schema users USER 1 NULL YES char 16 48 NULL NULL NULL utf8 utf8_bin char(16) select,insert,update,references NEVER NULL +def performance_schema users USER 1 NULL YES char 128 384 NULL NULL NULL utf8 utf8_bin char(128) select,insert,update,references NEVER NULL def performance_schema users CURRENT_CONNECTIONS 2 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL def performance_schema users TOTAL_CONNECTIONS 3 NULL NO bigint NULL NULL 19 0 NULL NULL NULL bigint(20) select,insert,update,references NEVER NULL select count(*) from information_schema.columns diff --git a/storage/perfschema/table_accounts.cc b/storage/perfschema/table_accounts.cc index 708f8269a69..550f6614abb 100644 --- a/storage/perfschema/table_accounts.cc +++ b/storage/perfschema/table_accounts.cc @@ -43,8 +43,8 @@ table_accounts::m_share= sizeof(PFS_simple_index), /* ref length */ &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE accounts(" - "USER CHAR(16) collate utf8_bin default null," - "HOST CHAR(60) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "CURRENT_CONNECTIONS bigint not null," "TOTAL_CONNECTIONS bigint not null)") } }; diff --git a/storage/perfschema/table_esgs_by_account_by_event_name.cc b/storage/perfschema/table_esgs_by_account_by_event_name.cc index 22e4e0040f1..9a983eb076e 100644 --- a/storage/perfschema/table_esgs_by_account_by_event_name.cc +++ b/storage/perfschema/table_esgs_by_account_by_event_name.cc @@ -49,8 +49,8 @@ table_esgs_by_account_by_event_name::m_share= sizeof(pos_esgs_by_account_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_stages_summary_by_account_by_event_name(" - "USER CHAR(16) collate utf8_bin default null," - "HOST CHAR(60) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_esgs_by_host_by_event_name.cc b/storage/perfschema/table_esgs_by_host_by_event_name.cc index 86cc2eb1b86..5ff9faf0c1e 100644 --- a/storage/perfschema/table_esgs_by_host_by_event_name.cc +++ b/storage/perfschema/table_esgs_by_host_by_event_name.cc @@ -50,7 +50,7 @@ table_esgs_by_host_by_event_name::m_share= sizeof(pos_esgs_by_host_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_stages_summary_by_host_by_event_name(" - "HOST CHAR(60) collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_esgs_by_user_by_event_name.cc b/storage/perfschema/table_esgs_by_user_by_event_name.cc index af73c1fc5fd..23b7b0f6689 100644 --- a/storage/perfschema/table_esgs_by_user_by_event_name.cc +++ b/storage/perfschema/table_esgs_by_user_by_event_name.cc @@ -50,7 +50,7 @@ table_esgs_by_user_by_event_name::m_share= sizeof(pos_esgs_by_user_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_stages_summary_by_user_by_event_name(" - "USER CHAR(16) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_esms_by_account_by_event_name.cc b/storage/perfschema/table_esms_by_account_by_event_name.cc index 7afdabcbbfe..312050aa9c9 100644 --- a/storage/perfschema/table_esms_by_account_by_event_name.cc +++ b/storage/perfschema/table_esms_by_account_by_event_name.cc @@ -49,8 +49,8 @@ table_esms_by_account_by_event_name::m_share= sizeof(pos_esms_by_account_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_statements_summary_by_account_by_event_name(" - "USER CHAR(16) collate utf8_bin default null," - "HOST CHAR(60) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_esms_by_host_by_event_name.cc b/storage/perfschema/table_esms_by_host_by_event_name.cc index 42629ab6c09..b390d1e17a4 100644 --- a/storage/perfschema/table_esms_by_host_by_event_name.cc +++ b/storage/perfschema/table_esms_by_host_by_event_name.cc @@ -50,7 +50,7 @@ table_esms_by_host_by_event_name::m_share= sizeof(pos_esms_by_host_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_statements_summary_by_host_by_event_name(" - "HOST CHAR(60) collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_esms_by_user_by_event_name.cc b/storage/perfschema/table_esms_by_user_by_event_name.cc index f8708ac9a14..1fa1289aa8c 100644 --- a/storage/perfschema/table_esms_by_user_by_event_name.cc +++ b/storage/perfschema/table_esms_by_user_by_event_name.cc @@ -50,7 +50,7 @@ table_esms_by_user_by_event_name::m_share= sizeof(pos_esms_by_user_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_statements_summary_by_user_by_event_name(" - "USER CHAR(16) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_ews_by_account_by_event_name.cc b/storage/perfschema/table_ews_by_account_by_event_name.cc index fa6258ec9ac..40e0152f889 100644 --- a/storage/perfschema/table_ews_by_account_by_event_name.cc +++ b/storage/perfschema/table_ews_by_account_by_event_name.cc @@ -49,8 +49,8 @@ table_ews_by_account_by_event_name::m_share= sizeof(pos_ews_by_account_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_waits_summary_by_account_by_event_name(" - "USER CHAR(16) collate utf8_bin default null," - "HOST CHAR(60) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_ews_by_host_by_event_name.cc b/storage/perfschema/table_ews_by_host_by_event_name.cc index e3ef7ca3720..d22d6fc8d79 100644 --- a/storage/perfschema/table_ews_by_host_by_event_name.cc +++ b/storage/perfschema/table_ews_by_host_by_event_name.cc @@ -50,7 +50,7 @@ table_ews_by_host_by_event_name::m_share= sizeof(pos_ews_by_host_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_waits_summary_by_host_by_event_name(" - "HOST CHAR(60) collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_ews_by_user_by_event_name.cc b/storage/perfschema/table_ews_by_user_by_event_name.cc index cb99f749a9c..b2f8e1da824 100644 --- a/storage/perfschema/table_ews_by_user_by_event_name.cc +++ b/storage/perfschema/table_ews_by_user_by_event_name.cc @@ -50,7 +50,7 @@ table_ews_by_user_by_event_name::m_share= sizeof(pos_ews_by_user_by_event_name), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE events_waits_summary_by_user_by_event_name(" - "USER CHAR(16) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," "EVENT_NAME VARCHAR(128) not null," "COUNT_STAR BIGINT unsigned not null," "SUM_TIMER_WAIT BIGINT unsigned not null," diff --git a/storage/perfschema/table_hosts.cc b/storage/perfschema/table_hosts.cc index 8bc5310817c..221e0664590 100644 --- a/storage/perfschema/table_hosts.cc +++ b/storage/perfschema/table_hosts.cc @@ -44,7 +44,7 @@ table_hosts::m_share= sizeof(PFS_simple_index), /* ref length */ &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE hosts(" - "HOST CHAR(60) collate utf8_bin default null," + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default null," "CURRENT_CONNECTIONS bigint not null," "TOTAL_CONNECTIONS bigint not null)") } }; diff --git a/storage/perfschema/table_setup_actors.cc b/storage/perfschema/table_setup_actors.cc index 305cfd32169..767a7e9e6f0 100644 --- a/storage/perfschema/table_setup_actors.cc +++ b/storage/perfschema/table_setup_actors.cc @@ -49,9 +49,9 @@ table_setup_actors::m_share= sizeof(PFS_simple_index), &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE setup_actors(" - "HOST CHAR(60) collate utf8_bin default '%' not null," - "USER CHAR(16) collate utf8_bin default '%' not null," - "ROLE CHAR(16) collate utf8_bin default '%' not null)") } + "HOST CHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ") collate utf8_bin default '%' not null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default '%' not null," + "ROLE CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default '%' not null)") } }; PFS_engine_table* table_setup_actors::create() diff --git a/storage/perfschema/table_threads.cc b/storage/perfschema/table_threads.cc index eccf41db971..00196718113 100644 --- a/storage/perfschema/table_threads.cc +++ b/storage/perfschema/table_threads.cc @@ -46,8 +46,8 @@ table_threads::m_share= "NAME VARCHAR(128) not null," "TYPE VARCHAR(10) not null," "PROCESSLIST_ID BIGINT unsigned," - "PROCESSLIST_USER VARCHAR(16)," - "PROCESSLIST_HOST VARCHAR(60)," + "PROCESSLIST_USER VARCHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ")," + "PROCESSLIST_HOST VARCHAR(" STRINGIFY_ARG(HOSTNAME_LENGTH) ")," "PROCESSLIST_DB VARCHAR(64)," "PROCESSLIST_COMMAND VARCHAR(16)," "PROCESSLIST_TIME BIGINT," diff --git a/storage/perfschema/table_users.cc b/storage/perfschema/table_users.cc index 883ebd36633..e9592c55f55 100644 --- a/storage/perfschema/table_users.cc +++ b/storage/perfschema/table_users.cc @@ -44,7 +44,7 @@ table_users::m_share= sizeof(PFS_simple_index), /* ref length */ &m_table_lock, { C_STRING_WITH_LEN("CREATE TABLE users(" - "USER CHAR(16) collate utf8_bin default null," + "USER CHAR(" STRINGIFY_ARG(USERNAME_CHAR_LENGTH) ") collate utf8_bin default null," "CURRENT_CONNECTIONS bigint not null," "TOTAL_CONNECTIONS bigint not null)") } }; From 3b94309a6cec8d8149c9f312229d18227036e01d Mon Sep 17 00:00:00 2001 From: Varun Gupta Date: Tue, 12 Jan 2021 11:17:37 +0530 Subject: [PATCH 062/150] MDEV-23753: SIGSEGV in Column_stat::store_stat_fields For EITS collection min and max fields are allocated for each column that is set in the read_set bitmap of a table. This allocation of min and max fields happens inside alloc_statistics_for_table. For a partitioned table ha_rnd_init is called inside the function collect_statistics_for_table which sets the read_set bitmap for the columns inside the partition expression. This happens only when there is a write lock on the partitioned table. But the allocation happens before this, so min and max fields are not allocated for the columns involved in the partition expression. This resulted in a crash, as the EITS statistics were collected but there was no min and max field to store the value to. The fix would be to call ha_rnd_init inside the function alloc_statistics_for_table that would make sure that min and max fields are allocated for the columns involved in the partition expression. --- mysql-test/r/stat_tables.result | 14 ++++++++++++++ mysql-test/r/stat_tables_innodb.result | 14 ++++++++++++++ mysql-test/t/stat_tables.test | 12 ++++++++++++ sql/sql_statistics.cc | 4 ++++ 4 files changed, 44 insertions(+) diff --git a/mysql-test/r/stat_tables.result b/mysql-test/r/stat_tables.result index bb3a0a80f7e..a52db46d119 100644 --- a/mysql-test/r/stat_tables.result +++ b/mysql-test/r/stat_tables.result @@ -825,5 +825,19 @@ length(a) set names latin1; set @@use_stat_tables=@save_use_stat_tables; drop table t1; +# +# MDEV-23753: SIGSEGV in Column_stat::store_stat_fields +# +CREATE TABLE t1 (a INT, b INT) PARTITION BY HASH (b) PARTITIONS 2; +LOCK TABLES t1 WRITE; +ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a) INDEXES (); +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (nonexisting) INDEXES (nonexisting); +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze error Invalid argument +DROP TABLE t1; # please keep this at the last set @@global.histogram_size=@save_histogram_size; diff --git a/mysql-test/r/stat_tables_innodb.result b/mysql-test/r/stat_tables_innodb.result index 23b952cc891..3be6b0bd4b3 100644 --- a/mysql-test/r/stat_tables_innodb.result +++ b/mysql-test/r/stat_tables_innodb.result @@ -852,6 +852,20 @@ length(a) set names latin1; set @@use_stat_tables=@save_use_stat_tables; drop table t1; +# +# MDEV-23753: SIGSEGV in Column_stat::store_stat_fields +# +CREATE TABLE t1 (a INT, b INT) PARTITION BY HASH (b) PARTITIONS 2; +LOCK TABLES t1 WRITE; +ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a) INDEXES (); +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze status OK +ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (nonexisting) INDEXES (nonexisting); +Table Op Msg_type Msg_text +test.t1 analyze status Engine-independent statistics collected +test.t1 analyze error Invalid argument +DROP TABLE t1; # please keep this at the last set @@global.histogram_size=@save_histogram_size; set optimizer_switch=@save_optimizer_switch_for_stat_tables_test; diff --git a/mysql-test/t/stat_tables.test b/mysql-test/t/stat_tables.test index 4c21e21ea70..f7c18637cdd 100644 --- a/mysql-test/t/stat_tables.test +++ b/mysql-test/t/stat_tables.test @@ -1,4 +1,5 @@ --source include/have_stat_tables.inc +--source include/have_partition.inc select @@global.use_stat_tables; select @@session.use_stat_tables; @@ -568,5 +569,16 @@ set names latin1; set @@use_stat_tables=@save_use_stat_tables; drop table t1; +--echo # +--echo # MDEV-23753: SIGSEGV in Column_stat::store_stat_fields +--echo # + +CREATE TABLE t1 (a INT, b INT) PARTITION BY HASH (b) PARTITIONS 2; +LOCK TABLES t1 WRITE; +ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (a) INDEXES (); +ANALYZE TABLE t1 PERSISTENT FOR COLUMNS (nonexisting) INDEXES (nonexisting); +DROP TABLE t1; + + --echo # please keep this at the last set @@global.histogram_size=@save_histogram_size; diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index ab8edbf584b..b63172045e6 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -2110,6 +2110,10 @@ int alloc_statistics_for_table(THD* thd, TABLE *table) ulonglong *idx_avg_frequency= (ulonglong*) alloc_root(&table->mem_root, sizeof(ulonglong) * key_parts); + if (table->file->ha_rnd_init(TRUE)) + DBUG_RETURN(1); + table->file->ha_rnd_end(); + uint columns= 0; for (field_ptr= table->field; *field_ptr; field_ptr++) { From ab271ee7e22ce1250ec36b09123bfb98bc3f8107 Mon Sep 17 00:00:00 2001 From: Varun Gupta Date: Tue, 12 Jan 2021 14:25:55 +0530 Subject: [PATCH 063/150] MDEV-23826: ORDER BY in view definition leads to wrong result with GROUP BY on query using view Introduced val_time_packed and val_datetime_packed functions for Item_direct_ref to make sure to get the value from the item it is referring to. The issue for incorrect result was that the item was getting its value from the temporary table rather than from the view. --- mysql-test/r/group_by.result | 44 ++++++++++++++++++++++++++++++++++++ mysql-test/t/group_by.test | 33 +++++++++++++++++++++++++++ sql/item.cc | 16 +++++++++++++ sql/item.h | 19 +++++++++++++++- 4 files changed, 111 insertions(+), 1 deletion(-) diff --git a/mysql-test/r/group_by.result b/mysql-test/r/group_by.result index c996627486c..55e7703a377 100644 --- a/mysql-test/r/group_by.result +++ b/mysql-test/r/group_by.result @@ -2875,3 +2875,47 @@ WHERE c.table_schema=(SELECT COUNT(*) FROM INFORMATION_SCHEMA.columns GROUP BY c GROUP BY t.table_name; ERROR HY001: Out of sort memory, consider increasing server sort buffer size SET max_sort_length= @save_max_sort_length; +# +# MDEV-23826: ORDER BY in view definition leads to wrong result with GROUP BY on query using view +# +CREATE TABLE t1 +( +id INT PRIMARY KEY AUTO_INCREMENT, +dt datetime, +INDEX(dt), +foo int +); +INSERT INTO t1 VALUES (1,'2020-09-26 12:00:00',1); +INSERT INTO t1 VALUES (2,'2020-09-26 13:00:00',1); +INSERT INTO t1 VALUES (3,'2020-09-27 13:00:00',1); +INSERT INTO t1 VALUES (4,'2020-09-27 12:00:00',1); +INSERT INTO t1 VALUES (5,'2020-09-28 12:00:00',1); +INSERT INTO t1 VALUES (6,'2020-09-28 13:00:00',1); +INSERT INTO t1 VALUES (7,'2020-09-25 12:00:00',1); +INSERT INTO t1 VALUES (8,'2020-09-25 13:00:00',1); +INSERT INTO t1 VALUES (9,'2020-09-26 13:00:00',1); +CREATE VIEW v1 AS SELECT * FROM t1; +CREATE VIEW v2 AS SELECT * FROM t1 ORDER BY dt; +SELECT dt, sum(foo) AS foo FROM v1 WHERE dt>DATE_SUB('2020-09-27 00:00:00', INTERVAL 3 DAY) GROUP BY dt; +dt foo +2020-09-25 12:00:00 1 +2020-09-25 13:00:00 1 +2020-09-26 12:00:00 1 +2020-09-26 13:00:00 2 +2020-09-27 12:00:00 1 +2020-09-27 13:00:00 1 +2020-09-28 12:00:00 1 +2020-09-28 13:00:00 1 +SELECT dt, sum(foo) AS foo FROM v2 WHERE dt>DATE_SUB('2020-09-27 00:00:00', INTERVAL 3 DAY) GROUP BY dt; +dt foo +2020-09-25 12:00:00 1 +2020-09-25 13:00:00 1 +2020-09-26 12:00:00 1 +2020-09-26 13:00:00 2 +2020-09-27 12:00:00 1 +2020-09-27 13:00:00 1 +2020-09-28 12:00:00 1 +2020-09-28 13:00:00 1 +DROP TABLE t1; +DROP VIEW v1,v2; +# End of 10.2 tests diff --git a/mysql-test/t/group_by.test b/mysql-test/t/group_by.test index 324b41ce23c..d4c6d6527d5 100644 --- a/mysql-test/t/group_by.test +++ b/mysql-test/t/group_by.test @@ -1996,3 +1996,36 @@ ON t.table_schema=c.table_schema WHERE c.table_schema=(SELECT COUNT(*) FROM INFORMATION_SCHEMA.columns GROUP BY column_type) GROUP BY t.table_name; SET max_sort_length= @save_max_sort_length; + + +--echo # +--echo # MDEV-23826: ORDER BY in view definition leads to wrong result with GROUP BY on query using view +--echo # + +CREATE TABLE t1 +( + id INT PRIMARY KEY AUTO_INCREMENT, + dt datetime, + INDEX(dt), + foo int +); + +INSERT INTO t1 VALUES (1,'2020-09-26 12:00:00',1); +INSERT INTO t1 VALUES (2,'2020-09-26 13:00:00',1); +INSERT INTO t1 VALUES (3,'2020-09-27 13:00:00',1); +INSERT INTO t1 VALUES (4,'2020-09-27 12:00:00',1); +INSERT INTO t1 VALUES (5,'2020-09-28 12:00:00',1); +INSERT INTO t1 VALUES (6,'2020-09-28 13:00:00',1); +INSERT INTO t1 VALUES (7,'2020-09-25 12:00:00',1); +INSERT INTO t1 VALUES (8,'2020-09-25 13:00:00',1); +INSERT INTO t1 VALUES (9,'2020-09-26 13:00:00',1); + +CREATE VIEW v1 AS SELECT * FROM t1; +CREATE VIEW v2 AS SELECT * FROM t1 ORDER BY dt; +SELECT dt, sum(foo) AS foo FROM v1 WHERE dt>DATE_SUB('2020-09-27 00:00:00', INTERVAL 3 DAY) GROUP BY dt; +SELECT dt, sum(foo) AS foo FROM v2 WHERE dt>DATE_SUB('2020-09-27 00:00:00', INTERVAL 3 DAY) GROUP BY dt; + +DROP TABLE t1; +DROP VIEW v1,v2; + +--echo # End of 10.2 tests diff --git a/sql/item.cc b/sql/item.cc index a2753caf496..e633964270b 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -8160,6 +8160,22 @@ bool Item_direct_ref::get_date(MYSQL_TIME *ltime,ulonglong fuzzydate) } +longlong Item_direct_ref::val_time_packed() +{ + longlong tmp = (*ref)->val_time_packed(); + null_value= (*ref)->null_value; + return tmp; +} + + +longlong Item_direct_ref::val_datetime_packed() +{ + longlong tmp = (*ref)->val_datetime_packed(); + null_value= (*ref)->null_value; + return tmp; +} + + Item_cache_wrapper::~Item_cache_wrapper() { DBUG_ASSERT(expr_cache == 0); diff --git a/sql/item.h b/sql/item.h index ed20074a8da..823ffd873b6 100644 --- a/sql/item.h +++ b/sql/item.h @@ -4671,13 +4671,16 @@ public: return Item_ref::fix_fields(thd, it); } void save_val(Field *to); + /* Below we should have all val() methods as in Item_ref */ double val_real(); longlong val_int(); - String *val_str(String* tmp); my_decimal *val_decimal(my_decimal *); bool val_bool(); + String *val_str(String* tmp); bool is_null(); bool get_date(MYSQL_TIME *ltime, ulonglong fuzzydate); + longlong val_datetime_packed(); + longlong val_time_packed(); virtual Ref_Type ref_type() { return DIRECT_REF; } Item *get_copy(THD *thd, MEM_ROOT *mem_root) { return get_item_copy(thd, mem_root, this); } @@ -4992,6 +4995,20 @@ public: } return Item_direct_ref::get_date(ltime, fuzzydate); } + longlong val_time_packed() + { + if (check_null_ref()) + return 0; + else + return Item_direct_ref::val_time_packed(); + } + longlong val_datetime_packed() + { + if (check_null_ref()) + return 0; + else + return Item_direct_ref::val_datetime_packed(); + } bool send(Protocol *protocol, String *buffer); void save_org_in_field(Field *field, fast_field_copier data __attribute__ ((__unused__))) From 347bce0201076d3a01ec6554bd0c8147a501af44 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 12 Jan 2021 18:25:41 +0100 Subject: [PATCH 064/150] - Remove static linkage to cpprestsdk when it is installed modified: storage/connect/CMakeLists.txt - Continue BSON development modified: storage/connect/bson.cpp modified: storage/connect/bson.h modified: storage/connect/bsonudf.cpp modified: storage/connect/bsonudf.h added: storage/connect/mysql-test/connect/r/bson_udf.result added: storage/connect/mysql-test/connect/t/bson_udf.inc added: storage/connect/mysql-test/connect/t/bson_udf.test added: storage/connect/mysql-test/connect/t/bson_udf2.inc --- storage/connect/CMakeLists.txt | 40 +- storage/connect/bson.cpp | 20 +- storage/connect/bson.h | 2 +- storage/connect/bsonudf.cpp | 206 +++-- storage/connect/bsonudf.h | 11 +- .../mysql-test/connect/r/bson_udf.result | 708 ++++++++++++++++++ .../connect/mysql-test/connect/t/bson_udf.inc | 70 ++ .../mysql-test/connect/t/bson_udf.test | 281 +++++++ .../mysql-test/connect/t/bson_udf2.inc | 61 ++ 9 files changed, 1327 insertions(+), 72 deletions(-) create mode 100644 storage/connect/mysql-test/connect/r/bson_udf.result create mode 100644 storage/connect/mysql-test/connect/t/bson_udf.inc create mode 100644 storage/connect/mysql-test/connect/t/bson_udf.test create mode 100644 storage/connect/mysql-test/connect/t/bson_udf2.inc diff --git a/storage/connect/CMakeLists.txt b/storage/connect/CMakeLists.txt index f110a1eda04..77e77227e21 100644 --- a/storage/connect/CMakeLists.txt +++ b/storage/connect/CMakeLists.txt @@ -334,26 +334,26 @@ IF(CONNECT_WITH_REST) # MESSAGE(STATUS "=====> REST support is ON") SET(CONNECT_SOURCES ${CONNECT_SOURCES} tabrest.cpp tabrest.h) add_definitions(-DREST_SUPPORT) - FIND_PACKAGE(cpprestsdk QUIET) - IF (cpprestsdk_FOUND) - IF(UNIX) -# INCLUDE_DIRECTORIES(${CPPRESTSDK_INCLUDE_DIR}) -# If needed edit next line to set the path to libcpprest.so - SET(REST_LIBRARY -lcpprest) - MESSAGE (STATUS ${REST_LIBRARY}) - ELSE(NOT UNIX) -# Next line sets debug compile mode matching cpprest_2_10d.dll -# when it was binary installed (can be change later in Visual Studio) -# Comment it out if not needed depending on your cpprestsdk installation. - SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd") - ENDIF(UNIX) -# IF(REST_LIBRARY) why this? how about Windows - SET(CONNECT_SOURCES ${CONNECT_SOURCES} restget.cpp) - add_definitions(-DREST_SOURCE) -# ENDIF() -# ELSE(NOT cpprestsdk_FOUND) -# MESSAGE(STATUS "=====> cpprestsdk package not found") - ENDIF (cpprestsdk_FOUND) +# FIND_PACKAGE(cpprestsdk QUIET) +# IF (cpprestsdk_FOUND) +# IF(UNIX) +## INCLUDE_DIRECTORIES(${CPPRESTSDK_INCLUDE_DIR}) +## If needed edit next line to set the path to libcpprest.so +# SET(REST_LIBRARY -lcpprest) +# MESSAGE (STATUS ${REST_LIBRARY}) +# ELSE(NOT UNIX) +## Next line sets debug compile mode matching cpprest_2_10d.dll +## when it was binary installed (can be change later in Visual Studio) +## Comment it out if not needed depending on your cpprestsdk installation. +# SET(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /MDd") +# ENDIF(UNIX) +## IF(REST_LIBRARY) why this? how about Windows +# SET(CONNECT_SOURCES ${CONNECT_SOURCES} restget.cpp) +# add_definitions(-DREST_SOURCE) +## ENDIF() +##ELSE(NOT cpprestsdk_FOUND) +## MESSAGE(STATUS "=====> cpprestsdk package not found") +# ENDIF (cpprestsdk_FOUND) ENDIF(CONNECT_WITH_REST) # diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index f3ad919993f..fc58303a73f 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -631,7 +631,7 @@ PSZ BDOC::Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty) err = SerializeValue(MVP(bvp->To_Val)); break; default: - err = SerializeValue(bvp); + err = SerializeValue(bvp, true); } // endswitch Type if (fs) { @@ -737,7 +737,7 @@ bool BDOC::SerializeObject(OFFSET obp) /***********************************************************************/ /* Serialize a JSON Value. */ /***********************************************************************/ -bool BDOC::SerializeValue(PBVAL jvp) +bool BDOC::SerializeValue(PBVAL jvp, bool b) { char buf[64]; @@ -750,7 +750,11 @@ bool BDOC::SerializeValue(PBVAL jvp) return jp->WriteStr(jvp->B ? "true" : "false"); case TYPE_STRG: case TYPE_DTM: - return jp->Escape(MZP(jvp->To_Val)); + if (b) { + return jp->WriteStr(MZP(jvp->To_Val)); + } else + return jp->Escape(MZP(jvp->To_Val)); + case TYPE_INTG: sprintf(buf, "%d", jvp->N); return jp->WriteStr(buf); @@ -1505,8 +1509,12 @@ double BJSON::GetDouble(PBVAL vp) d = (double)vlp->N; break; case TYPE_FLOAT: - d = (double)vlp->F; - break; + { char buf[32]; + int n = (vlp->Nd) ? vlp->Nd : 5; + + sprintf(buf, "%.*f", n, vlp->F); + d = atof(buf); + } break; case TYPE_DTM: case TYPE_STRG: d = atof(MZP(vlp->To_Val)); @@ -1632,7 +1640,7 @@ PBVAL BJSON::SetValue(PBVAL vlp, PVAL valp) { double d = valp->GetFloatValue(); int nd = (IsTypeNum(valp->GetType())) ? valp->GetValPrec() : 0; - if (nd <= 6 && d >= FLT_MIN && d <= FLT_MAX) { + if (nd > 0 && nd <= 6 && d >= FLT_MIN && d <= FLT_MAX) { vlp->F = (float)valp->GetFloatValue(); vlp->Type = TYPE_FLOAT; } else { diff --git a/storage/connect/bson.h b/storage/connect/bson.h index 235168a36ce..6eb6c019c1a 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -193,7 +193,7 @@ protected: OFFSET ParseAsArray(int& i); bool SerializeArray(OFFSET arp, bool b); bool SerializeObject(OFFSET obp); - bool SerializeValue(PBVAL vp); + bool SerializeValue(PBVAL vp, bool b = false); // Members used when parsing and serializing JOUT* jp; // Used with serialize diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 4bdeafa0c33..719b7d7509a 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -36,7 +36,7 @@ void SetChanged(PBSON bsp); /* Program for saving the status of the memory pools. */ /*********************************************************************************/ inline void JsonMemSave(PGLOBAL g) { - g->Saved_Size = ((PPOOLHEADER)g->Sarea)->To_Free; + g->Saved_Size = ((PPOOLHEADER)g->Sarea)->To_Free; } /* end of JsonMemSave */ /*********************************************************************************/ @@ -854,12 +854,16 @@ my_bool BJNX::DeleteItem(PGLOBAL g, PBVAL row) PUSH_WARNING("Only one expand can be handled"); return b; } // endif loop + n++; - loop = true; } else n = Nodes[i].Rank; vlp = GetArrayValue(rwp, n); + + if (GetNext(vlp) && Nodes[i].Op == OP_EXP) + loop = true; + } else vlp = NULL; @@ -1384,9 +1388,9 @@ PBVAL BJNX::MakeValue(UDF_ARGS *args, uint i, bool b, PBVAL *top) if (n) { if (n == 3) { - if (i == 0) { - PBSON bsp = (PBSON)sap; + PBSON bsp = (PBSON)sap; + if (i == 0) { if (top) *top = (PBVAL)bsp->Top; @@ -1394,8 +1398,9 @@ PBVAL BJNX::MakeValue(UDF_ARGS *args, uint i, bool b, PBVAL *top) G = bsp->G; Base = G->Sarea; } else { - PUSH_WARNING("Only first argument can be binary"); - return jvp; + BJNX bnx(bsp->G); + + jvp = MoveJson(&bnx, (PBVAL)bsp->Jsp); } // endelse i } else { @@ -1616,7 +1621,7 @@ char *BJNX::MakeResult(UDF_ARGS *args, PBVAL top, uint n) /*********************************************************************************/ /* Make the binary result according to the first argument type. */ /*********************************************************************************/ -PBSON BJNX::MakeBinResult(PGLOBAL g, UDF_ARGS *args, PBVAL top, ulong len, int n) +PBSON BJNX::MakeBinResult(UDF_ARGS *args, PBVAL top, ulong len, int n) { char* filename = NULL; int pretty = 2; @@ -1641,7 +1646,7 @@ PBSON BJNX::MakeBinResult(PGLOBAL g, UDF_ARGS *args, PBVAL top, ulong len, int n filename = (char*)args->args[0]; } // endif 2 - if ((bnp = BbinAlloc(g, len, top))) { + if ((bnp = BbinAlloc(G, len, top))) { bnp->Filename = filename; bnp->Pretty = pretty; strcpy(bnp->Msg, "Json Binary item"); @@ -1650,6 +1655,114 @@ PBSON BJNX::MakeBinResult(PGLOBAL g, UDF_ARGS *args, PBVAL top, ulong len, int n return bnp; } // end of MakeBinResult +/***********************************************************************/ +/* Move a Json val block from one area to the current area. */ +/***********************************************************************/ +PBVAL BJNX::MoveVal(PBVAL vlp) +{ + PBVAL nvp = NewVal(vlp->Type); + + nvp->Nd = vlp->Nd; + return nvp; +} // end of MovedVal + +/***********************************************************************/ +/* Move a Json tree from one area to current area. */ +/***********************************************************************/ +PBVAL BJNX::MoveJson(PBJNX bxp, PBVAL jvp) +{ + PBVAL res = NULL; + + if (jvp) + switch (jvp->Type) { + case TYPE_JAR: + res = MoveArray(bxp, jvp); + break; + case TYPE_JOB: + res = MoveObject(bxp, jvp); + break; + default: + res = MoveValue(bxp, jvp); + break; + } // endswitch Type + + return res; +} // end of MoveJson + +/***********************************************************************/ +/* Move an array. */ +/***********************************************************************/ +PBVAL BJNX::MoveArray(PBJNX bxp, PBVAL jap) +{ + PBVAL vlp, vmp, jvp = NULL, jarp = MoveVal(jap); + + for (vlp = bxp->GetArray(jap); vlp; vlp = bxp->GetNext(vlp)) { + vmp = MoveJson(bxp, vlp); + + if (jvp) + jvp->Next = MOF(vmp); + else + jarp->To_Val = MOF(vmp); + + jvp = vmp; + } // endfor vlp + + return jarp; +} // end of MoveArray + +/***********************************************************************/ +/* Replace all object pointers by offsets. */ +/***********************************************************************/ +PBVAL BJNX::MoveObject(PBJNX bxp, PBVAL jop) +{ + PBPR mpp, prp, ppp = NULL; + PBVAL vmp, jobp = MoveVal(jop); + + for (prp = bxp->GetObject(jop); prp; prp = bxp->GetNext(prp)) { + vmp = MoveJson(bxp, GetVlp(prp)); + mpp = NewPair(DupStr(bxp->MZP(prp->Key))); + SetPairValue(mpp, vmp); + + if (ppp) + ppp->Vlp.Next = MOF(mpp); + else + jobp->To_Val = MOF(mpp); + + ppp = mpp; + } // endfor vlp + + return jobp; +} // end of MoffObject + +/***********************************************************************/ +/* Move a non json value. */ +/***********************************************************************/ +PBVAL BJNX::MoveValue(PBJNX bxp, PBVAL jvp) +{ + double *dp; + PBVAL nvp = MoveVal(jvp); + + switch (jvp->Type) { + case TYPE_STRG: + case TYPE_DTM: + nvp->To_Val = DupStr(bxp->MZP(jvp->To_Val)); + break; + case TYPE_DBL: + dp = (double*)BsonSubAlloc(sizeof(double)); + *dp = bxp->DBL(jvp->To_Val); + nvp->To_Val = MOF(dp); + break; + case TYPE_JVAL: + nvp->To_Val = MOF(MoveJson(bxp, bxp->MVP(jvp->To_Val))); + break; + default: + nvp->To_Val = jvp->To_Val; + break; + } // endswith Type + + return nvp; +} // end of MoveValue + /* -----------------------------Utility functions ------------------------------ */ /*********************************************************************************/ @@ -1688,14 +1801,14 @@ int IsArgJson(UDF_ARGS *args, uint i) !strnicmp(args->attributes[i], "Json_", 5)) { if (!args->args[i] || strchr("[{ \t\r\n", *args->args[i])) n = 1; // arg should be is a json item - else - n = 2; // A file name may have been returned +// else +// n = 2; // A file name may have been returned } else if (!strnicmp(args->attributes[i], "Bbin_", 5)) { if (args->lengths[i] == sizeof(BSON)) n = 3; // arg is a binary json item - else - n = 2; // A file name may have been returned +// else +// n = 2; // A file name may have been returned } else if (!strnicmp(args->attributes[i], "Bfile_", 6) || !strnicmp(args->attributes[i], "Jfile_", 6)) { @@ -1785,7 +1898,8 @@ void bsonvalue_deinit(UDF_INIT* initid) { } // end of bsonvalue_deinit /*********************************************************************************/ -/* Make a Bson array containing all the parameters. */ +/* Make a Json array containing all the parameters. */ +/* Note: jvp must be set before arp because it can be a binary argument. */ /*********************************************************************************/ my_bool bson_make_array_init(UDF_INIT* initid, UDF_ARGS* args, char* message) { @@ -1804,10 +1918,13 @@ char* bson_make_array(UDF_INIT* initid, UDF_ARGS* args, char* result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, args->arg_count, false)) { BJNX bnx(g); - PBVAL bvp = NULL, arp = bnx.NewVal(TYPE_JAR); + PBVAL jvp = bnx.MakeValue(args, 0); + PBVAL arp = bnx.NewVal(TYPE_JAR); - for (uint i = 0; i < args->arg_count; i++) - bnx.AddArrayValue(arp, bnx.MakeValue(args, i, true)); + for (uint i = 0; i < args->arg_count;) { + bnx.AddArrayValue(arp, jvp); + jvp = bnx.MakeValue(args, ++i); + } // endfor i if (!(str = bnx.Serialize(g, arp, NULL, 0))) str = strcpy(result, g->Message); @@ -2056,7 +2173,7 @@ char *bson_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!CheckMemory(g, initid, args, 1, false, false, true)) { int *x; uint n = 1; - BJNX bnx(g); + BJNX bnx(g, NULL, TYPE_STRING); PBVAL arp, top; PBVAL jvp = bnx.MakeValue(args, 0, true, &top); @@ -4621,6 +4738,7 @@ void bson_serialize_deinit(UDF_INIT* initid) /*********************************************************************************/ /* Make and return a binary Json array containing all the parameters. */ +/* Note: jvp must be set before arp because it can be a binary argument. */ /*********************************************************************************/ my_bool bbin_make_array_init(UDF_INIT *initid, UDF_ARGS *args, char *message) { @@ -4639,20 +4757,20 @@ char *bbin_make_array(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!g->Xchk) { if (!CheckMemory(g, initid, args, args->arg_count, false)) { BJNX bnx(g); - PBVAL arp; + PBVAL jvp = bnx.MakeValue(args, 0); + PBVAL arp = bnx.NewVal(TYPE_JAR); - if ((arp = bnx.NewVal(TYPE_JAR))) { - for (uint i = 0; i < args->arg_count; i++) - bnx.AddArrayValue(arp, bnx.MakeValue(args, i)); + for (uint i = 0; i < args->arg_count;) { + bnx.AddArrayValue(arp, jvp); + jvp = bnx.MakeValue(args, ++i); + } // endfor i - if ((bsp = BbinAlloc(g, initid->max_length, arp))) { - strcat(bsp->Msg, " array"); + if ((bsp = BbinAlloc(bnx.G, initid->max_length, arp))) { + strcat(bsp->Msg, " array"); - // Keep result of constant function - g->Xchk = (initid->const_item) ? bsp : NULL; - } // endif bsp - - } // endif arp + // Keep result of constant function + g->Xchk = (initid->const_item) ? bsp : NULL; + } // endif bsp } // endif CheckMemory @@ -4738,7 +4856,7 @@ char *bbin_array_add(UDF_INIT *initid, UDF_ARGS *args, char *result, if (jarp) { bnx.AddArrayValue(jarp, bnx.MakeValue(args, 1), x); bnx.SetChanged(true); - bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp = bnx.MakeBinResult(args, top, initid->max_length); if (initid->const_item) // Keep result of constant function @@ -4795,7 +4913,7 @@ char* bbin_array_add_values(UDF_INIT* initid, UDF_ARGS* args, char* result, bnx.AddArrayValue(arp, bnx.MakeValue(args, i)); bnx.SetChanged(true); - bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp = bnx.MakeBinResult(args, top, initid->max_length); } // endif CheckMemory // Keep result of constant function @@ -4937,7 +5055,7 @@ char *bbin_make_object(UDF_INIT *initid, UDF_ARGS *args, char *result, for (uint i = 0; i < args->arg_count; i++) bnx.SetKeyValue(objp, bnx.MakeValue(args, i), bnx.MakeKey(args, i)); - if ((bsp = BbinAlloc(g, initid->max_length, objp))) { + if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) { strcat(bsp->Msg, " object"); // Keep result of constant function @@ -4992,7 +5110,7 @@ char *bbin_object_nonull(UDF_INIT *initid, UDF_ARGS *args, char *result, if (!bnx.IsValueNull(jvp = bnx.MakeValue(args, i))) bnx.SetKeyValue(objp, jvp, bnx.MakeKey(args, i)); - if ((bsp = BbinAlloc(g, initid->max_length, objp))) { + if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) { strcat(bsp->Msg, " object"); // Keep result of constant function @@ -5051,7 +5169,7 @@ char *bbin_object_key(UDF_INIT *initid, UDF_ARGS *args, char *result, for (uint i = 0; i < args->arg_count; i += 2) bnx.SetKeyValue(objp, bnx.MakeValue(args, i + 1), MakePSZ(g, args, i)); - if ((bsp = BbinAlloc(g, initid->max_length, objp))) { + if ((bsp = BbinAlloc(bnx.G, initid->max_length, objp))) { strcat(bsp->Msg, " object"); // Keep result of constant function @@ -5129,7 +5247,7 @@ char *bbin_object_add(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif jobp // In case of error unchanged argument will be returned - bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp = bnx.MakeBinResult(args, top, initid->max_length); if (initid->const_item) // Keep result of constant function @@ -5182,7 +5300,7 @@ char *bbin_array_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, PUSH_WARNING(g->Message); else if (arp && arp->Type == TYPE_JAR) { bnx.SetChanged(bnx.DeleteValue(arp, *x)); - bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp = bnx.MakeBinResult(args, top, initid->max_length); } else { PUSH_WARNING("First argument target is not an array"); // if (g->Mrr) *error = 1; @@ -5259,7 +5377,7 @@ char *bbin_object_delete(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif jvp // In case of error unchanged argument will be returned - bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp = bnx.MakeBinResult(args, top, initid->max_length); if (initid->const_item) // Keep result of constant function @@ -5310,7 +5428,7 @@ char *bbin_object_list(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif jsp type // In case of error unchanged argument will be returned - bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp = bnx.MakeBinResult(args, top, initid->max_length); bsp->Jsp = (PJSON)jarp; } // endif CheckMemory @@ -5362,7 +5480,7 @@ char *bbin_object_values(UDF_INIT *initid, UDF_ARGS *args, char *result, } // endif jvp // In case of error unchanged argument will be returned - bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp = bnx.MakeBinResult(args, top, initid->max_length); bsp->Jsp = (PJSON)jarp; } // endif CheckMemory @@ -5414,7 +5532,7 @@ char *bbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, if (bnx.CheckPath(g, args, jsp, jvp, 1)) PUSH_WARNING(g->Message); else if (jvp) { - bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp = bnx.MakeBinResult(args, top, initid->max_length); bsp->Jsp = (PJSON)jvp; if (initid->const_item) @@ -5495,7 +5613,7 @@ char *bbin_item_merge(UDF_INIT *initid, UDF_ARGS *args, char *result, bnx.MergeObject(jsp[0], jsp[1]); bnx.SetChanged(true); - bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp = bnx.MakeBinResult(args, top, initid->max_length); } // endif CheckMemory if (g->N) @@ -5592,7 +5710,7 @@ static char *bbin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, bxp->SetChanged(true); } // endfor i - if (!(bsp = bxp->MakeBinResult(g, args, top, initid->max_length))) + if (!(bsp = bxp->MakeBinResult(args, top, initid->max_length))) throw 4; if (g->N) @@ -5745,7 +5863,7 @@ char *bbin_delete_item(UDF_INIT *initid, UDF_ARGS *args, char *result, bnx.SetChanged(bnx.DeleteItem(g, jvp)); } // endfor i - bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp = bnx.MakeBinResult(args, top, initid->max_length); if (args->arg_count == 1) // Here Jsp was not a sub-item of top @@ -5815,7 +5933,7 @@ char *bbin_file(UDF_INIT *initid, UDF_ARGS *args, char *result, // else if (pretty == 3) // pretty = pty; - if ((bsp = BbinAlloc(g, len, jsp))) { + if ((bsp = BbinAlloc(bnx.G, len, jsp))) { strcat(bsp->Msg, " file"); bsp->Filename = fn; bsp->Pretty = pretty; @@ -5923,7 +6041,7 @@ char* bbin_locate_all(UDF_INIT* initid, UDF_ARGS* args, char* result, mx = (int)*(long long*)args->args[2]; if ((path = bnx.LocateAll(g, bvp, bvp2, mx))) { - bsp = bnx.MakeBinResult(g, args, top, initid->max_length); + bsp = bnx.MakeBinResult(args, top, initid->max_length); bsp->Jsp = (PJSON)bnx.ParseJson(g, path, strlen(path)); } // endif path diff --git a/storage/connect/bsonudf.h b/storage/connect/bsonudf.h index 7e743c8a72a..01b9b9d55d5 100644 --- a/storage/connect/bsonudf.h +++ b/storage/connect/bsonudf.h @@ -116,7 +116,7 @@ public: JTYP type, PBVAL* top = NULL); PBVAL ParseJsonFile(PGLOBAL g, char* fn, int& pty, size_t& len); char *MakeResult(UDF_ARGS* args, PBVAL top, uint n = 2); - PBSON MakeBinResult(PGLOBAL g, UDF_ARGS* args, PBVAL top, ulong len, int n = 2); + PBSON MakeBinResult(UDF_ARGS* args, PBVAL top, ulong len, int n = 2); protected: my_bool SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm); @@ -126,6 +126,11 @@ protected: PVAL MakeJson(PGLOBAL g, PBVAL bvp); void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp); PBVAL GetRow(PGLOBAL g); + PBVAL MoveVal(PBVAL vlp); + PBVAL MoveJson(PBJNX bxp, PBVAL jvp); + PBVAL MoveArray(PBJNX bxp, PBVAL jvp); + PBVAL MoveObject(PBJNX bxp, PBVAL jvp); + PBVAL MoveValue(PBJNX bxp, PBVAL jvp); my_bool CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2); my_bool LocateArray(PGLOBAL g, PBVAL jarp); my_bool LocateObject(PGLOBAL g, PBVAL jobp); @@ -368,6 +373,10 @@ extern "C" { DllExport char *bbin_get_item(UDF_EXEC_ARGS); DllExport void bbin_get_item_deinit(UDF_INIT*); + DllExport my_bool bbin_item_merge_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport char* bbin_item_merge(UDF_EXEC_ARGS); + DllExport void bbin_item_merge_deinit(UDF_INIT*); + DllExport my_bool bbin_set_item_init(UDF_INIT*, UDF_ARGS*, char*); DllExport char *bbin_set_item(UDF_EXEC_ARGS); DllExport void bbin_set_item_deinit(UDF_INIT*); diff --git a/storage/connect/mysql-test/connect/r/bson_udf.result b/storage/connect/mysql-test/connect/r/bson_udf.result new file mode 100644 index 00000000000..ea3e0e28f68 --- /dev/null +++ b/storage/connect/mysql-test/connect/r/bson_udf.result @@ -0,0 +1,708 @@ +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=VIR BLOCK_SIZE=5; +# +# Test UDF's with constant arguments +# +SELECT BsonValue(56, 3.1416, 'foo', NULL); +ERROR HY000: Can't initialize function 'bsonvalue'; Cannot accept more than 1 argument +SELECT BsonValue(3.1416); +BsonValue(3.1416) +3.1416 +SELECT BsonValue(-80); +BsonValue(-80) +-80 +SELECT BsonValue('foo'); +BsonValue('foo') +foo +SELECT BsonValue(9223372036854775807); +BsonValue(9223372036854775807) +9223372036854775807 +SELECT BsonValue(NULL); +BsonValue(NULL) +null +SELECT BsonValue(TRUE); +BsonValue(TRUE) +true +SELECT BsonValue(FALSE); +BsonValue(FALSE) +false +SELECT BsonValue(); +BsonValue() +null +SELECT BsonValue('[11, 22, 33]' json_) FROM t1; +BsonValue('[11, 22, 33]' json_) +[11,22,33] +[11,22,33] +[11,22,33] +[11,22,33] +[11,22,33] +SELECT Bson_Make_Array(); +Bson_Make_Array() +[] +SELECT Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL); +Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL) +[56,3.1416,"My name is \"Foo\"",null] +SELECT Bson_Make_Array(Bson_Make_Array(56, 3.1416, 'foo'), TRUE); +Bson_Make_Array(Bson_Make_Array(56, 3.1416, 'foo'), TRUE) +[[56,3.1416,"foo"],true] +SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL)) Array; +ERROR HY000: Can't initialize function 'bson_array_add'; This function must have at least 2 arguments +SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL), 'One more') Array; +Array +[56,3.1416,"foo",null,"One more"] +SELECT Bson_Array_Add(BsonValue('one value'), 'One more'); +Bson_Array_Add(BsonValue('one value'), 'One more') +["one value","One more"] +SELECT Bson_Array_Add('one value', 'One more'); +Bson_Array_Add('one value', 'One more') +["one value","One more"] +SELECT Bson_Array_Add('one value' json_, 'One more'); +Bson_Array_Add('one value' json_, 'One more') +["one value","One more"] +SELECT Bson_Array_Add(5 json_, 'One more'); +Bson_Array_Add(5 json_, 'One more') +[5,"One more"] +SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 0); +Bson_Array_Add('[5,3,8,7,9]' json_, 4, 0) +[4,5,3,8,7,9] +SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 2) Array; +Array +[5,3,4,8,7,9] +SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 9); +Bson_Array_Add('[5,3,8,7,9]' json_, 4, 9) +[5,3,8,7,9,4] +SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), '[2]', 33, 1); +Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), '[2]', 33, 1) +[1,2,[11,22],"[2]"] +SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, '[2]', 1); +Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, '[2]', 1) +[1,2,[11,33,22]] +SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, 1, '[2]'); +Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, 1, '[2]') +[1,2,[11,33,22]] +SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin', NULL), 'One more', 'Two more') Array; +Array +[56,3.1416,"machin",null,"One more","Two more"] +SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), 'One more', 'Two more') Array FROM t1; +Array +[56,3.1416,"machin","One more","Two more"] +[56,3.1416,"machin","One more","Two more"] +[56,3.1416,"machin","One more","Two more"] +[56,3.1416,"machin","One more","Two more"] +[56,3.1416,"machin","One more","Two more"] +SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), n) Array FROM t1; +Array +[56,3.1416,"machin",1] +[56,3.1416,"machin",2] +[56,3.1416,"machin",3] +[56,3.1416,"machin",4] +[56,3.1416,"machin",5] +SELECT Bson_Array_Add_Values(Bson_Make_Array(n, 3.1416, 'machin'), n) Array FROM t1; +Array +[1,3.1416,"machin",1] +[2,3.1416,"machin",2] +[3,3.1416,"machin",3] +[4,3.1416,"machin",4] +[5,3.1416,"machin",5] +SELECT Bson_Array_Add_Values('[56]', 3.1416, 'machin') Array; +Array +[56,3.1416,"machin"] +SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), 0); +Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), 0) +[3.1416,"My name is \"Foo\"",null] +SELECT Bson_Array_Delete(Bson_Make_Object(56, 3.1416, 'My name is Foo', NULL), 2); +Bson_Array_Delete(Bson_Make_Object(56, 3.1416, 'My name is Foo', NULL), 2) +{"56":56,"3.1416":3.1416,"My name is Foo":"My name is Foo","NULL":null} +Warnings: +Warning 1105 First argument target is not an array +SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2'); +Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2') +[56,3.1416,"My name is \"Foo\"",null] +Warnings: +Warning 1105 Missing or null array index +SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2', 2); +Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2', 2) +[56,3.1416,"My name is \"Foo\"",null] +Warnings: +Warning 1105 First argument target is not an array +/* WARNING VOID */ +# +SELECT Bson_Make_Object(56, 3.1416, 'foo', NULL); +Bson_Make_Object(56, 3.1416, 'foo', NULL) +{"56":56,"3.1416":3.1416,"foo":"foo","NULL":null} +SELECT Bson_Make_Object(56 qty, 3.1416 price, 'foo' truc, NULL garanty); +Bson_Make_Object(56 qty, 3.1416 price, 'foo' truc, NULL garanty) +{"qty":56,"price":3.1416,"truc":"foo","garanty":null} +SELECT Bson_Make_Object(); +Bson_Make_Object() +{} +SELECT Bson_Make_Object(Bson_Make_Array(56, 3.1416, 'foo'), NULL); +Bson_Make_Object(Bson_Make_Array(56, 3.1416, 'foo'), NULL) +{"Make_Array(56, 3.1416, 'foo')":[56,3.1416,"foo"],"NULL":null} +SELECT Bson_Make_Array(Bson_Make_Object(56 "qty", 3.1416 "price", 'foo') ,NULL); +Bson_Make_Array(Bson_Make_Object(56 "qty", 3.1416 "price", 'foo') ,NULL) +[{"qty":56,"price":3.1416,"foo":"foo"},null] +SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty', NULL); +Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty', NULL) +{"qty":56,"price":3.1416,"truc":"machin","garanty":null} +SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty'); +ERROR HY000: Can't initialize function 'bson_object_key'; This function must have an even number of arguments +SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'blue' color); +Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'blue' color) +{"qty":56,"price":3.1416,"truc":"machin","garanty":null,"color":"blue"} +SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 45.99 price); +Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 45.99 price) +{"qty":56,"price":45.99,"truc":"machin","garanty":null} +SELECT Bson_Object_Add(Bson_File('notexist.json'), 'cheese' item, '[1]', 1); +Bson_Object_Add(Bson_File('notexist.json'), 'cheese' item, '[1]', 1) +NULL +Warnings: +Warning 1105 Error 2 opening notexist.json +Warning 1105 No sub-item at '[1]' +SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'truc'); +Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'truc') +{"qty":56,"price":3.1416,"garanty":null} +SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'chose'); +Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'chose') +{"qty":56,"price":3.1416,"truc":"machin","garanty":null} +SELECT Bson_Object_List(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty)) "Key List"; +Key List +["qty","price","truc","garanty"] +SELECT Bson_Object_List('{"qty":56, "price":3.1416, "truc":"machin", "garanty":null}') "Key List"; +Key List +["qty","price","truc","garanty"] +SELECT Bson_Object_Values('{"One":1,"Two":2,"Three":3}') "Value List"; +Value List +[1,2,3] +# +# Test UDF's with column arguments +# +CREATE TABLE t2 +( +ISBN CHAR(15), +LANG CHAR(2), +SUBJECT CHAR(32), +AUTHOR CHAR(64), +TITLE CHAR(32), +TRANSLATION CHAR(32), +TRANSLATOR CHAR(80), +PUBLISHER CHAR(32), +DATEPUB int(4) +) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; +SELECT Bson_Make_Array(AUTHOR, TITLE, DATEPUB) FROM t2; +Bson_Make_Array(AUTHOR, TITLE, DATEPUB) +[" Jean-Christophe Bernadac, Franois Knab","Construire une application XML",1999] +["William J. Pardi","XML en Action",1999] +SELECT Bson_Make_Object(AUTHOR, TITLE, DATEPUB) FROM t2; +Bson_Make_Object(AUTHOR, TITLE, DATEPUB) +{"AUTHOR":" Jean-Christophe Bernadac, Franois Knab","TITLE":"Construire une application XML","DATEPUB":1999} +{"AUTHOR":"William J. Pardi","TITLE":"XML en Action","DATEPUB":1999} +SELECT Bson_Array_Grp(TITLE, DATEPUB) FROM t2; +ERROR HY000: Can't initialize function 'bson_array_grp'; This function can only accept 1 argument +SELECT Bson_Array_Grp(TITLE) FROM t2; +Bson_Array_Grp(TITLE) +["Construire une application XML","XML en Action"] +CREATE TABLE t3 ( +SERIALNO CHAR(5) NOT NULL, +NAME VARCHAR(12) NOT NULL FLAG=6, +SEX SMALLINT(1) NOT NULL, +TITLE VARCHAR(15) NOT NULL FLAG=20, +MANAGER CHAR(5) DEFAULT NULL, +DEPARTMENT CHAr(4) NOT NULL FLAG=41, +SECRETARY CHAR(5) DEFAULT NULL FLAG=46, +SALARY DOUBLE(8,2) NOT NULL FLAG=52 +) ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=8 FILE_NAME='employee.dat' ENDING=1; +SELECT Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY) FROM t3 WHERE NAME = 'MERCHANT'; +Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY) +{"SERIALNO":"78943","NAME":"MERCHANT","TITLE":"SALESMAN","SALARY":8700.0000000000000000} +SELECT DEPARTMENT, Bson_Array_Grp(NAME) FROM t3 GROUP BY DEPARTMENT; +DEPARTMENT Bson_Array_Grp(NAME) +0021 ["STRONG","SHORTSIGHT"] +0318 ["BANCROFT","PLUMHEAD","HONEY","TONGHO","WALTER","SHRINKY","WERTHER","MERCHANT","WHEELFOR"] +0319 ["BULLOZER","QUINN","BROWNY","KITTY","MONAPENNY","MARTIN","FUNNIGUY","BUGHAPPY","FODDERMAN","MESSIFUL"] +2452 ["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"] +Warnings: +Warning 1105 Result truncated to json_grp_size values +SELECT BsonSet_Grp_Size(30); +BsonSet_Grp_Size(30) +30 +SELECT Bson_Make_Object(title, Bson_Array_Grp(name) `json_names`) from t3 GROUP BY title; +Bson_Make_Object(title, Bson_Array_Grp(name) `json_names`) +{"title":"ADMINISTRATOR","names":["GOOSEPEN","FUNNIGUY","SHRINKY"]} +{"title":"DIRECTOR","names":["QUINN","WERTHER","STRONG"]} +{"title":"ENGINEER","names":["BROWNY","ORELLY","MARTIN","TONGHO","WALTER","SMITH"]} +{"title":"PROGRAMMER","names":["BUGHAPPY"]} +{"title":"SALESMAN","names":["WHEELFOR","MERCHANT","BULLOZER","BANCROFT","FODDERMAN"]} +{"title":"SCIENTIST","names":["BIGHEAD","BIGHORN"]} +{"title":"SECRETARY","names":["MESSIFUL","HONEY","SHORTSIGHT","CHERRY","MONAPENNY"]} +{"title":"TYPIST","names":["KITTY","PLUMHEAD"]} +SELECT Bson_Make_Array(DEPARTMENT, Bson_Array_Grp(NAME)) FROM t3 GROUP BY DEPARTMENT; +Bson_Make_Array(DEPARTMENT, Bson_Array_Grp(NAME)) +["0021",["STRONG","SHORTSIGHT"]] +["0318",["BANCROFT","PLUMHEAD","HONEY","TONGHO","WALTER","SHRINKY","WERTHER","MERCHANT","WHEELFOR"]] +["0319",["BULLOZER","QUINN","BROWNY","KITTY","MONAPENNY","MARTIN","FUNNIGUY","BUGHAPPY","FODDERMAN","MESSIFUL","GOOSEPEN"]] +["2452",["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"]] +SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES) FROM t3 GROUP BY DEPARTMENT; +Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES) +{"DEPARTMENT":"0021","NAMES":["STRONG","SHORTSIGHT"]} +{"DEPARTMENT":"0318","NAMES":["BANCROFT","PLUMHEAD","HONEY","TONGHO","WALTER","SHRINKY","WERTHER","MERCHANT","WHEELFOR"]} +{"DEPARTMENT":"0319","NAMES":["BULLOZER","QUINN","BROWNY","KITTY","MONAPENNY","MARTIN","FUNNIGUY","BUGHAPPY","FODDERMAN","MESSIFUL","GOOSEPEN"]} +{"DEPARTMENT":"2452","NAMES":["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"]} +SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT; +Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) +{"DEPARTMENT":"0021","EMPLOYES":[{"SERIALNO":"87777","NAME":"STRONG","TITLE":"DIRECTOR","SALARY":23000.0000000000000000},{"SERIALNO":"22222","NAME":"SHORTSIGHT","TITLE":"SECRETARY","SALARY":5500.0000000000000000}]} +{"DEPARTMENT":"0318","EMPLOYES":[{"SERIALNO":"74200","NAME":"BANCROFT","TITLE":"SALESMAN","SALARY":9600.0000000000000000},{"SERIALNO":"24888","NAME":"PLUMHEAD","TITLE":"TYPIST","SALARY":2800.0000000000000000},{"SERIALNO":"27845","NAME":"HONEY","TITLE":"SECRETARY","SALARY":4900.0000000000000000},{"SERIALNO":"73452","NAME":"TONGHO","TITLE":"ENGINEER","SALARY":6800.0000000000000000},{"SERIALNO":"74234","NAME":"WALTER","TITLE":"ENGINEER","SALARY":7400.0000000000000000},{"SERIALNO":"77777","NAME":"SHRINKY","TITLE":"ADMINISTRATOR","SALARY":7500.0000000000000000},{"SERIALNO":"70012","NAME":"WERTHER","TITLE":"DIRECTOR","SALARY":14500.0000000000000000},{"SERIALNO":"78943","NAME":"MERCHANT","TITLE":"SALESMAN","SALARY":8700.0000000000000000},{"SERIALNO":"73111","NAME":"WHEELFOR","TITLE":"SALESMAN","SALARY":10030.0000000000000000}]} +{"DEPARTMENT":"0319","EMPLOYES":[{"SERIALNO":"76543","NAME":"BULLOZER","TITLE":"SALESMAN","SALARY":14800.0000000000000000},{"SERIALNO":"40567","NAME":"QUINN","TITLE":"DIRECTOR","SALARY":14000.0000000000000000},{"SERIALNO":"00137","NAME":"BROWNY","TITLE":"ENGINEER","SALARY":10500.0000000000000000},{"SERIALNO":"12345","NAME":"KITTY","TITLE":"TYPIST","SALARY":3000.4499999999998181},{"SERIALNO":"33333","NAME":"MONAPENNY","TITLE":"SECRETARY","SALARY":3800.0000000000000000},{"SERIALNO":"00023","NAME":"MARTIN","TITLE":"ENGINEER","SALARY":10000.0000000000000000},{"SERIALNO":"07654","NAME":"FUNNIGUY","TITLE":"ADMINISTRATOR","SALARY":8500.0000000000000000},{"SERIALNO":"45678","NAME":"BUGHAPPY","TITLE":"PROGRAMMER","SALARY":8500.0000000000000000},{"SERIALNO":"56789","NAME":"FODDERMAN","TITLE":"SALESMAN","SALARY":7000.0000000000000000},{"SERIALNO":"55555","NAME":"MESSIFUL","TITLE":"SECRETARY","SALARY":5000.5000000000000000},{"SERIALNO":"98765","NAME":"GOOSEPEN","TITLE":"ADMINISTRATOR","SALARY":4700.0000000000000000}]} +{"DEPARTMENT":"2452","EMPLOYES":[{"SERIALNO":"34567","NAME":"BIGHEAD","TITLE":"SCIENTIST","SALARY":8000.0000000000000000},{"SERIALNO":"31416","NAME":"ORELLY","TITLE":"ENGINEER","SALARY":13400.0000000000000000},{"SERIALNO":"36666","NAME":"BIGHORN","TITLE":"SCIENTIST","SALARY":11000.0000000000000000},{"SERIALNO":"02345","NAME":"SMITH","TITLE":"ENGINEER","SALARY":9000.0000000000000000},{"SERIALNO":"11111","NAME":"CHERRY","TITLE":"SECRETARY","SALARY":4500.0000000000000000}]} +SELECT Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT, TITLE; +Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) +{"DEPARTMENT":"0021","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"87777","NAME":"STRONG","SALARY":23000.0000000000000000}]} +{"DEPARTMENT":"0021","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"22222","NAME":"SHORTSIGHT","SALARY":5500.0000000000000000}]} +{"DEPARTMENT":"0318","TITLE":"ADMINISTRATOR","EMPLOYES":[{"SERIALNO":"77777","NAME":"SHRINKY","SALARY":7500.0000000000000000}]} +{"DEPARTMENT":"0318","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"70012","NAME":"WERTHER","SALARY":14500.0000000000000000}]} +{"DEPARTMENT":"0318","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"73452","NAME":"TONGHO","SALARY":6800.0000000000000000},{"SERIALNO":"74234","NAME":"WALTER","SALARY":7400.0000000000000000}]} +{"DEPARTMENT":"0318","TITLE":"SALESMAN","EMPLOYES":[{"SERIALNO":"74200","NAME":"BANCROFT","SALARY":9600.0000000000000000},{"SERIALNO":"78943","NAME":"MERCHANT","SALARY":8700.0000000000000000},{"SERIALNO":"73111","NAME":"WHEELFOR","SALARY":10030.0000000000000000}]} +{"DEPARTMENT":"0318","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"27845","NAME":"HONEY","SALARY":4900.0000000000000000}]} +{"DEPARTMENT":"0318","TITLE":"TYPIST","EMPLOYES":[{"SERIALNO":"24888","NAME":"PLUMHEAD","SALARY":2800.0000000000000000}]} +{"DEPARTMENT":"0319","TITLE":"ADMINISTRATOR","EMPLOYES":[{"SERIALNO":"98765","NAME":"GOOSEPEN","SALARY":4700.0000000000000000},{"SERIALNO":"07654","NAME":"FUNNIGUY","SALARY":8500.0000000000000000}]} +{"DEPARTMENT":"0319","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"40567","NAME":"QUINN","SALARY":14000.0000000000000000}]} +{"DEPARTMENT":"0319","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"00023","NAME":"MARTIN","SALARY":10000.0000000000000000},{"SERIALNO":"00137","NAME":"BROWNY","SALARY":10500.0000000000000000}]} +{"DEPARTMENT":"0319","TITLE":"PROGRAMMER","EMPLOYES":[{"SERIALNO":"45678","NAME":"BUGHAPPY","SALARY":8500.0000000000000000}]} +{"DEPARTMENT":"0319","TITLE":"SALESMAN","EMPLOYES":[{"SERIALNO":"76543","NAME":"BULLOZER","SALARY":14800.0000000000000000},{"SERIALNO":"56789","NAME":"FODDERMAN","SALARY":7000.0000000000000000}]} +{"DEPARTMENT":"0319","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"33333","NAME":"MONAPENNY","SALARY":3800.0000000000000000},{"SERIALNO":"55555","NAME":"MESSIFUL","SALARY":5000.5000000000000000}]} +{"DEPARTMENT":"0319","TITLE":"TYPIST","EMPLOYES":[{"SERIALNO":"12345","NAME":"KITTY","SALARY":3000.4499999999998181}]} +{"DEPARTMENT":"2452","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"31416","NAME":"ORELLY","SALARY":13400.0000000000000000},{"SERIALNO":"02345","NAME":"SMITH","SALARY":9000.0000000000000000}]} +{"DEPARTMENT":"2452","TITLE":"SCIENTIST","EMPLOYES":[{"SERIALNO":"34567","NAME":"BIGHEAD","SALARY":8000.0000000000000000},{"SERIALNO":"36666","NAME":"BIGHORN","SALARY":11000.0000000000000000}]} +{"DEPARTMENT":"2452","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"11111","NAME":"CHERRY","SALARY":4500.0000000000000000}]} +SELECT Bson_Object_Grp(SALARY) FROM t3; +ERROR HY000: Can't initialize function 'bson_object_grp'; This function requires 2 arguments (key, value) +SELECT Bson_Object_Grp(NAME, SALARY) FROM t3; +Bson_Object_Grp(NAME, SALARY) +{"":"MARTIN","ffffp@":"KITTY"} +SELECT Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES") FROM t3 GROUP BY DEPARTMENT; +Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES") +{"DEPARTMENT":"0021","SALARIES":{"":"SHORTSIGHT"}} +{"DEPARTMENT":"0318","SALARIES":{"":"WHEELFOR"}} +{"DEPARTMENT":"0319","SALARIES":{"":"GOOSEPEN","ffffp@":"KITTY"}} +{"DEPARTMENT":"2452","SALARIES":{"":"CHERRY"}} +SELECT Bson_Array_Grp(NAME) FROM t3; +Bson_Array_Grp(NAME) +["BANCROFT","SMITH","MERCHANT","FUNNIGUY","BUGHAPPY","BIGHEAD","SHRINKY","WALTER","FODDERMAN","TONGHO","SHORTSIGHT","MESSIFUL","HONEY","GOOSEPEN","CHERRY","MONAPENNY","KITTY","PLUMHEAD","STRONG","BULLOZER","WERTHER","QUINN","ORELLY","BIGHORN","BROWNY","WHEELFOR","MARTIN"] +SELECT Bson_Object_Key(name, title) FROM t3 WHERE DEPARTMENT = 318; +Bson_Object_Key(name, title) +{"BANCROFT":"SALESMAN"} +{"MERCHANT":"SALESMAN"} +{"SHRINKY":"ADMINISTRATOR"} +{"WALTER":"ENGINEER"} +{"TONGHO":"ENGINEER"} +{"HONEY":"SECRETARY"} +{"PLUMHEAD":"TYPIST"} +{"WERTHER":"DIRECTOR"} +{"WHEELFOR":"SALESMAN"} +SELECT Bson_Object_Grp(name, title) FROM t3 WHERE DEPARTMENT = 318; +Bson_Object_Grp(name, title) +{"SALESMAN":"WHEELFOR","ADMINISTRATOR":"SHRINKY","ENGINEER":"TONGHO","SECRETARY":"HONEY","TYPIST":"PLUMHEAD","DIRECTOR":"WERTHER"} +# +# Test value getting UDF's +# +SELECT BsonGet_String(Bson_Array_Grp(name),'[#]') FROM t3; +BsonGet_String(Bson_Array_Grp(name),'[#]') +27 +SELECT BsonGet_String(Bson_Array_Grp(name),'[","]') FROM t3; +BsonGet_String(Bson_Array_Grp(name),'[","]') +BANCROFT,SMITH,MERCHANT,FUNNIGUY,BUGHAPPY,BIGHEAD,SHRINKY,WALTER,FODDERMAN,TONGHO,SHORTSIGHT,MESSIFUL,HONEY,GOOSEPEN,CHERRY,MONAPENNY,KITTY,PLUMHEAD,STRONG,BULLOZER,WERTHER,QUINN,ORELLY,BIGHORN,BROWNY,WHEELFOR,MARTIN +SELECT BsonGet_String(Bson_Array_Grp(name),'[>]') FROM t3; +BsonGet_String(Bson_Array_Grp(name),'[>]') +WHEELFOR +SET @j1 = '[45,28,36,45,89]'; +SELECT BsonGet_String(@j1,'1'); +BsonGet_String(@j1,'1') +28 +SELECT BsonGet_String(@j1 json_,'3'); +BsonGet_String(@j1 json_,'3') +45 +SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3'); +BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3') +45 +SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'["+"]') "list",'=' as "egal",BsonGet_String(Bson_Make_Array(45,28,36,45,89),'[+]') "sum"; +list egal sum +45+28+36+45+89 = 243.00 +SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0'); +BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0') +36 +SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.*'); +BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.*') +[36,45,89] +SELECT BsonGet_String(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'truc'); +BsonGet_String(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'truc') +machin +SET @j2 = '{"qty":56,"price":3.141600,"truc":"machin","garanty":null}'; +SELECT BsonGet_String(@j2 json_,'truc'); +BsonGet_String(@j2 json_,'truc') +machin +SELECT BsonGet_String(@j2,'truc'); +BsonGet_String(@j2,'truc') +machin +SELECT BsonGet_String(@j2,'chose'); +BsonGet_String(@j2,'chose') +NULL +SELECT BsonGet_String(NULL json_, NULL); +BsonGet_String(NULL json_, NULL) +NULL +Warnings: +Warning 1105 +/* NULL WARNING */ +SELECT department, BsonGet_String(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department; +department Sumsal +0021 28500.00 +0318 72230.00 +0319 89800.95 +2452 45900.00 +SELECT BsonGet_Int(@j1, '4'); +BsonGet_Int(@j1, '4') +89 +SELECT BsonGet_Int(@j1, '[#]'); +BsonGet_Int(@j1, '[#]') +5 +SELECT BsonGet_Int(@j1, '[+]'); +BsonGet_Int(@j1, '[+]') +243 +SELECT BsonGet_Int(@j1 json_, '3'); +BsonGet_Int(@j1 json_, '3') +45 +SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '3'); +BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '3') +45 +SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '["+"]'); +BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '["+"]') +45 +SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]'); +BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]') +243 +SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0'); +BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0') +36 +SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '0.1'); +BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '0.1') +28 +SELECT BsonGet_Int(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'qty'); +BsonGet_Int(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'qty') +56 +SELECT BsonGet_Int(@j2 json_, 'price'); +BsonGet_Int(@j2 json_, 'price') +3 +SELECT BsonGet_Int(@j2, 'qty'); +BsonGet_Int(@j2, 'qty') +56 +SELECT BsonGet_Int('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose'); +BsonGet_Int('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose') +NULL +SELECT BsonGet_Int(BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)), '1.*'), '[+]') sum; +sum +170 +SELECT department, BsonGet_Int(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"), 'salaries.[+]') Sumsal FROM t3 GROUP BY department; +department Sumsal +0021 28500 +0318 72230 +0319 89800 +2452 45900 +SELECT BsonGet_Real(@j1, '2'); +BsonGet_Real(@j1, '2') +36.000000000000000 +SELECT BsonGet_Real(@j1 json_, '3', 2); +BsonGet_Real(@j1 json_, '3', 2) +45.00 +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '3'); +BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '3') +45.000000000000000 +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '["+"]'); +BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '["+"]') +45.000000000000000 +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[+]'); +BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[+]') +243.000000000000000 +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]'); +BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]') +48.600000000000000 +SELECT BsonGet_Real(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0'); +BsonGet_Real(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0') +36.000000000000000 +SELECT BsonGet_Real(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'price'); +BsonGet_Real(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'price') +3.141600000000000 +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}' json_, 'qty'); +BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}' json_, 'qty') +56.000000000000000 +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price'); +BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price') +3.141600000000000 +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price', 4); +BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price', 4) +3.1416 +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose'); +BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose') +NULL +SELECT department, BsonGet_Real(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department; +department Sumsal +0021 28500.000000000000000 +0318 72230.000000000000000 +0319 89800.950000000000000 +2452 45900.000000000000000 +# +# Documentation examples +# +SELECT +BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '4') "Rank", +BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[#]') "Number", +BsonGet_String(Bson_Make_Array(45,28,36,45,89), '[","]') "Concat", +BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]') "Sum", +BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]', 2) "Avg"; +Rank Number Concat Sum Avg +89 5 45,28,36,45,89 243 48.60 +SELECT +BsonGet_String('{"qty":7,"price":29.50,"garanty":null}', 'price') "String", +BsonGet_Int('{"qty":7,"price":29.50,"garanty":null}', 'price') "Int", +BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price') "Real"; +String Int Real +29.50 29 29.500000000000000 +SELECT BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price', 3) "Real"; +Real +29.500 +# +# Testing Locate +# +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'machin'); +BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'machin') +$.truc +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),56); +BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),56) +$.qty +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),3.1416); +BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),3.1416) +$.price +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'chose'); +BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'chose') +NULL +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'Jack') Path; +Path +$.AUTHORS[1].FN +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'jack' ci) Path; +Path +$.AUTHORS[1].FN +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"Jack", "LN":"London"}' json_) Path; +Path +$.AUTHORS[1] +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"jack", "LN":"London"}' json_) Path; +Path +NULL +SELECT BsonLocate('[45,28,36,45,89]',36); +BsonLocate('[45,28,36,45,89]',36) +$[2] +SELECT BsonLocate('[45,28,36,45,89]' json_,28.0); +BsonLocate('[45,28,36,45,89]' json_,28.0) +NULL +SELECT Bson_Locate_All('[45,28,36,45,89]',10); +Bson_Locate_All('[45,28,36,45,89]',10) +[] +SELECT Bson_Locate_All('[45,28,36,45,89]',45); +Bson_Locate_All('[45,28,36,45,89]',45) +["$[0]","$[3]"] +SELECT Bson_Locate_All('[[45,28],36,45,89]',45); +Bson_Locate_All('[[45,28],36,45,89]',45) +["$[0][0]","$[2]"] +SELECT Bson_Locate_All('[[45,28,45],36,45,89]',45); +Bson_Locate_All('[[45,28,45],36,45,89]',45) +["$[0][0]","$[0][2]","$[2]"] +SELECT Bson_Locate_All('[[45,28,45],36,45,89]',BsonGet_Int('[3,45]','[1]')); +Bson_Locate_All('[[45,28,45],36,45,89]',BsonGet_Int('[3,45]','[1]')) +["$[0][0]","$[0][2]","$[2]"] +SELECT BsonLocate('[[45,28,45],36,45,89]',45,n) from t1; +BsonLocate('[[45,28,45],36,45,89]',45,n) +$[0][0] +$[0][2] +$[2] +NULL +NULL +SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) FROM t1; +BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) +$[0][0] +$[0][2] +$[2] +NULL +NULL +SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) AS `Path` FROM t1 GROUP BY n HAVING `Path` IS NOT NULL; +Path +$[0][0] +$[0][2] +$[2] +SELECT Bson_Locate_All('[45,28,[36,45,89]]',45); +Bson_Locate_All('[45,28,[36,45,89]]',45) +["$[0]","$[2][1]"] +SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',BsonValue(45.0)); +Bson_Locate_All('[[45,28],[36,45.0,89]]',BsonValue(45.0)) +[] +SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',45.0); +Bson_Locate_All('[[45,28],[36,45.0,89]]',45.0) +["$[1][1]"] +SELECT BsonLocate('[[45,28],[36,45,89]]','[36,45,89]' json_); +BsonLocate('[[45,28],[36,45,89]]','[36,45,89]' json_) +$[1] +SELECT BsonLocate('[[45,28],[36,45,89]]','[45,28]' json_); +BsonLocate('[[45,28],[36,45,89]]','[45,28]' json_) +$[0] +SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','45') "All paths"; +All paths +[] +SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','[36,45]' json_); +Bson_Locate_All('[[45,28],[[36,45],89]]','[36,45]' json_) +["$[1][0]"] +SELECT BsonGet_Int(Bson_Locate_All('[[45,28],[[36,45],89]]',45), '[#]') "Nb of occurs"; +Nb of occurs +2 +SELECT Bson_Locate_All('[[45,28],[[36,45],89]]',45,2); +Bson_Locate_All('[[45,28],[[36,45],89]]',45,2) +["$[0][0]"] +SELECT BsonGet_String(Bson_Locate_All('[45,28,36,45,89]',45),'0'); +BsonGet_String(Bson_Locate_All('[45,28,36,45,89]',45),'0') +$[0] +SELECT BsonLocate(Bson_File('test/biblio.json'), 'Knab'); +BsonLocate(Bson_File('test/biblio.json'), 'Knab') +$[0].AUTHOR[1].LASTNAME +SELECT Bson_Locate_All('test/biblio.json' jfile_, 'Knab'); +Bson_Locate_All('test/biblio.json' jfile_, 'Knab') +["$[0].AUTHOR[1].LASTNAME"] +# +# Testing json files +# +SELECT Bfile_Make('[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]}, +{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]}, +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]}, +{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]', 'test/fx.json', 0) AS NewFile; +NewFile +test/fx.json +SELECT Bfile_Make('test/fx.json', 1); +Bfile_Make('test/fx.json', 1) +test/fx.json +SELECT Bfile_Make('test/fx.json' jfile_); +Bfile_Make('test/fx.json' jfile_) +test/fx.json +SELECT Bfile_Make(Bbin_File('test/fx.json'), 0); +Bfile_Make(Bbin_File('test/fx.json'), 0) +test/fx.json +SELECT Bson_File('test/fx.json', 1); +Bson_File('test/fx.json', 1) +[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}] +Warnings: +Warning 1105 File pretty format doesn't match the specified pretty value +SELECT Bson_File('test/fx.json', 2); +Bson_File('test/fx.json', 2) +[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}] +Warnings: +Warning 1105 File pretty format doesn't match the specified pretty value +SELECT Bson_File('test/fx.json', 0); +Bson_File('test/fx.json', 0) +[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}] +SELECT Bson_File('test/fx.json', '0'); +Bson_File('test/fx.json', '0') +NULL +Warnings: +Warning 1105 +SELECT Bson_File('test/fx.json', '[?]'); +Bson_File('test/fx.json', '[?]') +NULL +Warnings: +Warning 1105 +SELECT BsonGet_String(Bson_File('test/fx.json'), '1.*'); +BsonGet_String(Bson_File('test/fx.json'), '1.*') +{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]} +SELECT BsonGet_String(Bson_File('test/fx.json'), '1'); +BsonGet_String(Bson_File('test/fx.json'), '1') +6 car roadster 56000 (6, 9) +SELECT BsonGet_Int(Bson_File('test/fx.json'), '1.mileage') AS Mileage; +Mileage +56000 +SELECT BsonGet_Real(Bson_File('test/fx.json'), '0.price', 2) AS Price; +Price +5.65 +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings'); +Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings') +NULL +Warnings: +Warning 1105 +Warning 1105 No sub-item at 'ratings' +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings'); +Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings') +NULL +Warnings: +Warning 1105 +Warning 1105 No sub-item at 'ratings' +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1); +Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1) +NULL +Warnings: +Warning 1105 +Warning 1105 No sub-item at 'ratings' +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0); +Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0) +[6,null] +Warnings: +Warning 1105 +SELECT Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1); +Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1) +NULL +Warnings: +Warning 1105 +Warning 1105 No sub-item at 'ratings' +SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin); +Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin) +NULL +Warnings: +Warning 1105 +Warning 1105 First argument target is not an object +SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size'); +Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size') +NULL +Warnings: +Warning 1105 +Warning 1105 No sub-item at 'size' +SELECT Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size'); +Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size') +NULL +Warnings: +Warning 1105 +Warning 1105 No sub-item at 'size' +SELECT Bson_Object_List(Bson_File('test/fx.json', '3.size')); +Bson_Object_List(Bson_File('test/fx.json', '3.size')) +NULL +Warnings: +Warning 1105 +Warning 1105 First argument is not an object +# +# Testing new functions +# +SELECT Bson_Item_Merge('["a","b","c"]','["d","e","f"]') as "Result"; +Result +["a","b","c","d","e","f"] +SELECT Bson_Item_Merge(Bson_Make_Array('a','b','c'), Bson_Make_Array('d','e','f')) as "Result"; +Result +["a","b","c","d","e","f"] +SELECT +Bson_Set_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Set", +Bson_Insert_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Insert", +Bson_Update_Item(Bson_Make_Array(1,2,3,Bson_Object_Key('quatre',4)),'foo','$[1]',5,'$[3].cinq') "Update"; +Set Insert Update +[1,"foo",3,{"quatre":4,"cinq":5}] [1,2,3,{"quatre":4,"cinq":5}] [1,"foo",3,{"quatre":4}] +SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux'); +bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux') +[1,3,{"quatre":4}] +SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]'); +bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]') +[1,3,{"quatre":4}] +SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux'); +bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux') +[1,2,3,{"quatre":4}] +SELECT Bson_Item_Merge(Bson_Get_Item("C:/Data/Json/bibdoc.json", '$[0].AUTHOR'), Bson_Make_Array(Bson_Object_Key('FIRSTNAME','Olivier','LASTNAME','Bertrand'))) "Result"; +Result +[{"FIRSTNAME":"Jean-Christophe","LASTNAME":"Bernadac"},{"FIRSTNAME":"Franois","LASTNAME":"Knab"},{"FIRSTNAME":"Olivier","LASTNAME":"Bertrand"}] +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; diff --git a/storage/connect/mysql-test/connect/t/bson_udf.inc b/storage/connect/mysql-test/connect/t/bson_udf.inc new file mode 100644 index 00000000000..366f48e5861 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson_udf.inc @@ -0,0 +1,70 @@ +--disable_query_log +# +# Check if server has support for loading plugins +# +if (`SELECT @@have_dynamic_loading != 'YES'`) { + --skip UDF requires dynamic loading +} +if (!$HA_CONNECT_SO) { + --skip Needs a dynamically built ha_connect.so +} + +--eval CREATE FUNCTION bson_test RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonvalue RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_make_array RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_array_add_values RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_array_add RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_array_delete RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_make_object RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_object_nonull RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_object_key RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_object_add RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_object_delete RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_object_list RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_object_values RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonset_grp_size RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonget_grp_size RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE AGGREGATE FUNCTION bson_array_grp RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE AGGREGATE FUNCTION bson_object_grp RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonlocate RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_locate_all RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_contains RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsoncontains_path RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_item_merge RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_get_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_delete_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonget_string RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonget_int RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonget_real RETURNS REAL SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_set_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_insert_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_update_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_file RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bson_serialize RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bfile_make RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bfile_convert RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bfile_bjson RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_make_array RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_array_add RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_array_add_values RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_array_delete RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE AGGREGATE FUNCTION bbin_array_grp RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE AGGREGATE FUNCTION bbin_object_grp RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_make_object RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_object_nonull RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_object_key RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_object_add RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_object_delete RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_object_list RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_object_values RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_get_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_item_merge RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_set_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_insert_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_update_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_delete_item RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_locate_all RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bbin_file RETURNS STRING SONAME '$HA_CONNECT_SO'; + +--enable_query_log + diff --git a/storage/connect/mysql-test/connect/t/bson_udf.test b/storage/connect/mysql-test/connect/t/bson_udf.test new file mode 100644 index 00000000000..cec2d5f62f6 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson_udf.test @@ -0,0 +1,281 @@ +--source bson_udf.inc + +let $MYSQLD_DATADIR= `select @@datadir`; + +--copy_file $MTR_SUITE_DIR/std_data/biblio.json $MYSQLD_DATADIR/test/biblio.json +--copy_file $MTR_SUITE_DIR/std_data/employee.dat $MYSQLD_DATADIR/test/employee.dat + +CREATE TABLE t1 ENGINE=CONNECT TABLE_TYPE=VIR BLOCK_SIZE=5; + +--echo # +--echo # Test UDF's with constant arguments +--echo # +--error ER_CANT_INITIALIZE_UDF +SELECT BsonValue(56, 3.1416, 'foo', NULL); +SELECT BsonValue(3.1416); +SELECT BsonValue(-80); +SELECT BsonValue('foo'); +SELECT BsonValue(9223372036854775807); +SELECT BsonValue(NULL); +SELECT BsonValue(TRUE); +SELECT BsonValue(FALSE); +SELECT BsonValue(); +SELECT BsonValue('[11, 22, 33]' json_) FROM t1; +# +SELECT Bson_Make_Array(); +SELECT Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL); +SELECT Bson_Make_Array(Bson_Make_Array(56, 3.1416, 'foo'), TRUE); +# +--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL)) Array; +SELECT Bson_Array_Add(Bson_Make_Array(56, 3.1416, 'foo', NULL), 'One more') Array; +#--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Array_Add(BsonValue('one value'), 'One more'); +#--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Array_Add('one value', 'One more'); +SELECT Bson_Array_Add('one value' json_, 'One more'); +#--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Array_Add(5 json_, 'One more'); +SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 0); +SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 2) Array; +SELECT Bson_Array_Add('[5,3,8,7,9]' json_, 4, 9); +SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), '[2]', 33, 1); +SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, '[2]', 1); +SELECT Bson_Array_Add(Bson_Make_Array(1, 2, Bson_Make_Array(11, 22)), 33, 1, '[2]'); +# +SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin', NULL), 'One more', 'Two more') Array; +SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), 'One more', 'Two more') Array FROM t1; +SELECT Bson_Array_Add_Values(Bson_Make_Array(56, 3.1416, 'machin'), n) Array FROM t1; +SELECT Bson_Array_Add_Values(Bson_Make_Array(n, 3.1416, 'machin'), n) Array FROM t1; +SELECT Bson_Array_Add_Values('[56]', 3.1416, 'machin') Array; +# +SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), 0); +SELECT Bson_Array_Delete(Bson_Make_Object(56, 3.1416, 'My name is Foo', NULL), 2); +SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2'); +SELECT Bson_Array_Delete(Bson_Make_Array(56, 3.1416, 'My name is "Foo"', NULL), '2', 2); /* WARNING VOID */ +# +SELECT Bson_Make_Object(56, 3.1416, 'foo', NULL); +SELECT Bson_Make_Object(56 qty, 3.1416 price, 'foo' truc, NULL garanty); +SELECT Bson_Make_Object(); +SELECT Bson_Make_Object(Bson_Make_Array(56, 3.1416, 'foo'), NULL); +SELECT Bson_Make_Array(Bson_Make_Object(56 "qty", 3.1416 "price", 'foo') ,NULL); +SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty', NULL); +--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Object_Key('qty', 56, 'price', 3.1416, 'truc', 'machin', 'garanty'); +# +SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'blue' color); +SELECT Bson_Object_Add(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 45.99 price); +SELECT Bson_Object_Add(Bson_File('notexist.json'), 'cheese' item, '[1]', 1); +# +SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'truc'); +SELECT Bson_Object_Delete(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'chose'); +# +SELECT Bson_Object_List(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty)) "Key List"; +SELECT Bson_Object_List('{"qty":56, "price":3.1416, "truc":"machin", "garanty":null}') "Key List"; +SELECT Bson_Object_Values('{"One":1,"Two":2,"Three":3}') "Value List"; + +--echo # +--echo # Test UDF's with column arguments +--echo # +CREATE TABLE t2 +( + ISBN CHAR(15), + LANG CHAR(2), + SUBJECT CHAR(32), + AUTHOR CHAR(64), + TITLE CHAR(32), + TRANSLATION CHAR(32), + TRANSLATOR CHAR(80), + PUBLISHER CHAR(32), + DATEPUB int(4) +) ENGINE=CONNECT TABLE_TYPE=BSON FILE_NAME='biblio.json'; + +SELECT Bson_Make_Array(AUTHOR, TITLE, DATEPUB) FROM t2; +SELECT Bson_Make_Object(AUTHOR, TITLE, DATEPUB) FROM t2; +--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Array_Grp(TITLE, DATEPUB) FROM t2; +SELECT Bson_Array_Grp(TITLE) FROM t2; + +CREATE TABLE t3 ( + SERIALNO CHAR(5) NOT NULL, + NAME VARCHAR(12) NOT NULL FLAG=6, + SEX SMALLINT(1) NOT NULL, + TITLE VARCHAR(15) NOT NULL FLAG=20, + MANAGER CHAR(5) DEFAULT NULL, + DEPARTMENT CHAr(4) NOT NULL FLAG=41, + SECRETARY CHAR(5) DEFAULT NULL FLAG=46, + SALARY DOUBLE(8,2) NOT NULL FLAG=52 +) ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=8 FILE_NAME='employee.dat' ENDING=1; + +SELECT Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY) FROM t3 WHERE NAME = 'MERCHANT'; +SELECT DEPARTMENT, Bson_Array_Grp(NAME) FROM t3 GROUP BY DEPARTMENT; +#SET connect_json_grp_size=30; Deprecated +SELECT BsonSet_Grp_Size(30); +SELECT Bson_Make_Object(title, Bson_Array_Grp(name) `json_names`) from t3 GROUP BY title; +SELECT Bson_Make_Array(DEPARTMENT, Bson_Array_Grp(NAME)) FROM t3 GROUP BY DEPARTMENT; +SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES) FROM t3 GROUP BY DEPARTMENT; +SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT; +SELECT Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT, TITLE; +--error ER_CANT_INITIALIZE_UDF +SELECT Bson_Object_Grp(SALARY) FROM t3; +SELECT Bson_Object_Grp(NAME, SALARY) FROM t3; +SELECT Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES") FROM t3 GROUP BY DEPARTMENT; +SELECT Bson_Array_Grp(NAME) FROM t3; +# +SELECT Bson_Object_Key(name, title) FROM t3 WHERE DEPARTMENT = 318; +SELECT Bson_Object_Grp(name, title) FROM t3 WHERE DEPARTMENT = 318; + +--echo # +--echo # Test value getting UDF's +--echo # +SELECT BsonGet_String(Bson_Array_Grp(name),'[#]') FROM t3; +SELECT BsonGet_String(Bson_Array_Grp(name),'[","]') FROM t3; +SELECT BsonGet_String(Bson_Array_Grp(name),'[>]') FROM t3; +SET @j1 = '[45,28,36,45,89]'; +SELECT BsonGet_String(@j1,'1'); +SELECT BsonGet_String(@j1 json_,'3'); +SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3'); +SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'["+"]') "list",'=' as "egal",BsonGet_String(Bson_Make_Array(45,28,36,45,89),'[+]') "sum"; +SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0'); +SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.*'); +SELECT BsonGet_String(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'truc'); +SET @j2 = '{"qty":56,"price":3.141600,"truc":"machin","garanty":null}'; +SELECT BsonGet_String(@j2 json_,'truc'); +SELECT BsonGet_String(@j2,'truc'); +SELECT BsonGet_String(@j2,'chose'); +SELECT BsonGet_String(NULL json_, NULL); /* NULL WARNING */ +SELECT department, BsonGet_String(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department; +# +SELECT BsonGet_Int(@j1, '4'); +SELECT BsonGet_Int(@j1, '[#]'); +SELECT BsonGet_Int(@j1, '[+]'); +SELECT BsonGet_Int(@j1 json_, '3'); +SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '3'); +SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '["+"]'); +SELECT BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]'); +SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0'); +SELECT BsonGet_Int(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '0.1'); +SELECT BsonGet_Int(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'qty'); +SELECT BsonGet_Int(@j2 json_, 'price'); +SELECT BsonGet_Int(@j2, 'qty'); +SELECT BsonGet_Int('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose'); +SELECT BsonGet_Int(BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)), '1.*'), '[+]') sum; +SELECT department, BsonGet_Int(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"), 'salaries.[+]') Sumsal FROM t3 GROUP BY department; +# +SELECT BsonGet_Real(@j1, '2'); +SELECT BsonGet_Real(@j1 json_, '3', 2); +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '3'); +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '["+"]'); +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[+]'); +SELECT BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]'); +SELECT BsonGet_Real(Bson_Make_Array(Bson_Make_Array(45,28), Bson_Make_Array(36,45,89)), '1.0'); +SELECT BsonGet_Real(Bson_Make_Object(56 qty, 3.1416 price, 'machin' truc, NULL garanty), 'price'); +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}' json_, 'qty'); +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price'); +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'price', 4); +SELECT BsonGet_Real('{"qty":56,"price":3.141600,"truc":"machin","garanty":null}', 'chose'); +SELECT department, BsonGet_Real(Bson_Make_Object(department, Bson_Array_Grp(salary) "Json_salaries"),'salaries.[+]') Sumsal FROM t3 GROUP BY department; + +--echo # +--echo # Documentation examples +--echo # +SELECT + BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '4') "Rank", + BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[#]') "Number", + BsonGet_String(Bson_Make_Array(45,28,36,45,89), '[","]') "Concat", + BsonGet_Int(Bson_Make_Array(45,28,36,45,89), '[+]') "Sum", + BsonGet_Real(Bson_Make_Array(45,28,36,45,89), '[!]', 2) "Avg"; +SELECT + BsonGet_String('{"qty":7,"price":29.50,"garanty":null}', 'price') "String", + BsonGet_Int('{"qty":7,"price":29.50,"garanty":null}', 'price') "Int", + BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price') "Real"; +SELECT BsonGet_Real('{"qty":7,"price":29.50,"garanty":null}', 'price', 3) "Real"; + +--echo # +--echo # Testing Locate +--echo # +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'machin'); +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),56); +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),3.1416); +SELECT BsonLocate(Bson_Make_Object(56 qty,3.1416 price,'machin' truc, NULL garanty),'chose'); +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'Jack') Path; +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, 'jack' ci) Path; +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"Jack", "LN":"London"}' json_) Path; +SELECT BsonLocate('{"AUTHORS":[{"FN":"Jules", "LN":"Verne"}, {"FN":"Jack", "LN":"London"}]}' json_, '{"FN":"jack", "LN":"London"}' json_) Path; +SELECT BsonLocate('[45,28,36,45,89]',36); +SELECT BsonLocate('[45,28,36,45,89]' json_,28.0); +SELECT Bson_Locate_All('[45,28,36,45,89]',10); +SELECT Bson_Locate_All('[45,28,36,45,89]',45); +SELECT Bson_Locate_All('[[45,28],36,45,89]',45); +SELECT Bson_Locate_All('[[45,28,45],36,45,89]',45); +SELECT Bson_Locate_All('[[45,28,45],36,45,89]',BsonGet_Int('[3,45]','[1]')); +SELECT BsonLocate('[[45,28,45],36,45,89]',45,n) from t1; +SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) FROM t1; +SELECT BsonGet_String(Bson_Locate_All('[[45,28,45],36,45,89]',45),concat('[',n-1,']')) AS `Path` FROM t1 GROUP BY n HAVING `Path` IS NOT NULL; +SELECT Bson_Locate_All('[45,28,[36,45,89]]',45); +SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',BsonValue(45.0)); +SELECT Bson_Locate_All('[[45,28],[36,45.0,89]]',45.0); +SELECT BsonLocate('[[45,28],[36,45,89]]','[36,45,89]' json_); +SELECT BsonLocate('[[45,28],[36,45,89]]','[45,28]' json_); +SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','45') "All paths"; +SELECT Bson_Locate_All('[[45,28],[[36,45],89]]','[36,45]' json_); +SELECT BsonGet_Int(Bson_Locate_All('[[45,28],[[36,45],89]]',45), '[#]') "Nb of occurs"; +SELECT Bson_Locate_All('[[45,28],[[36,45],89]]',45,2); +SELECT BsonGet_String(Bson_Locate_All('[45,28,36,45,89]',45),'0'); +SELECT BsonLocate(Bson_File('test/biblio.json'), 'Knab'); +SELECT Bson_Locate_All('test/biblio.json' jfile_, 'Knab'); + +--echo # +--echo # Testing json files +--echo # +SELECT Bfile_Make('[{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]}, +{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]}, +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]}, +{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}]', 'test/fx.json', 0) AS NewFile; +SELECT Bfile_Make('test/fx.json', 1); +SELECT Bfile_Make('test/fx.json' jfile_); +SELECT Bfile_Make(Bbin_File('test/fx.json'), 0); +SELECT Bson_File('test/fx.json', 1); +SELECT Bson_File('test/fx.json', 2); +SELECT Bson_File('test/fx.json', 0); +SELECT Bson_File('test/fx.json', '0'); +SELECT Bson_File('test/fx.json', '[?]'); +SELECT BsonGet_String(Bson_File('test/fx.json'), '1.*'); +SELECT BsonGet_String(Bson_File('test/fx.json'), '1'); +SELECT BsonGet_Int(Bson_File('test/fx.json'), '1.mileage') AS Mileage; +SELECT BsonGet_Real(Bson_File('test/fx.json'), '0.price', 2) AS Price; +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings'); +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings'); +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1); +SELECT Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0); +SELECT Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1); +SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin); +SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size'); +SELECT Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size'); +SELECT Bson_Object_List(Bson_File('test/fx.json', '3.size')); + +--echo # +--echo # Testing new functions +--echo # +SELECT Bson_Item_Merge('["a","b","c"]','["d","e","f"]') as "Result"; +SELECT Bson_Item_Merge(Bson_Make_Array('a','b','c'), Bson_Make_Array('d','e','f')) as "Result"; +SELECT +Bson_Set_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Set", +Bson_Insert_Item('[1,2,3,{"quatre":4}]', 'foo', '$[1]', 5, '$[3].cinq') as "Insert", +Bson_Update_Item(Bson_Make_Array(1,2,3,Bson_Object_Key('quatre',4)),'foo','$[1]',5,'$[3].cinq') "Update"; +SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux'); +SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]'); +SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux'); +SELECT Bson_Item_Merge(Bson_Get_Item("C:/Data/Json/bibdoc.json", '$[0].AUTHOR'), Bson_Make_Array(Bson_Object_Key('FIRSTNAME','Olivier','LASTNAME','Bertrand'))) "Result"; +DROP TABLE t1; +DROP TABLE t2; +DROP TABLE t3; + +# +# Clean up +# +--source bson_udf2.inc +--remove_file $MYSQLD_DATADIR/test/biblio.json +--remove_file $MYSQLD_DATADIR/test/employee.dat +--remove_file $MYSQLD_DATADIR/test/fx.json + diff --git a/storage/connect/mysql-test/connect/t/bson_udf2.inc b/storage/connect/mysql-test/connect/t/bson_udf2.inc new file mode 100644 index 00000000000..ceddf8b0632 --- /dev/null +++ b/storage/connect/mysql-test/connect/t/bson_udf2.inc @@ -0,0 +1,61 @@ +--disable_query_log + +DROP FUNCTION bson_test; +DROP FUNCTION bsonvalue; +DROP FUNCTION bson_make_array; +DROP FUNCTION bson_array_add_values; +DROP FUNCTION bson_array_add; +DROP FUNCTION bson_array_delete; +DROP FUNCTION bson_make_object; +DROP FUNCTION bson_object_nonull; +DROP FUNCTION bson_object_key; +DROP FUNCTION bson_object_add; +DROP FUNCTION bson_object_delete; +DROP FUNCTION bson_object_list; +DROP FUNCTION bson_object_values; +DROP FUNCTION bsonset_grp_size; +DROP FUNCTION bsonget_grp_size; +DROP FUNCTION bson_array_grp; +DROP FUNCTION bson_object_grp; +DROP FUNCTION bsonlocate; +DROP FUNCTION bson_locate_all; +DROP FUNCTION bson_contains; +DROP FUNCTION bsoncontains_path; +DROP FUNCTION bson_item_merge; +DROP FUNCTION bson_get_item; +DROP FUNCTION bson_delete_item; +DROP FUNCTION bsonget_string; +DROP FUNCTION bsonget_int; +DROP FUNCTION bsonget_real; +DROP FUNCTION bson_set_item; +DROP FUNCTION bson_insert_item; +DROP FUNCTION bson_update_item; +DROP FUNCTION bson_serialize; +DROP FUNCTION bson_file; +DROP FUNCTION bfile_make; +DROP FUNCTION bfile_convert; +DROP FUNCTION bfile_bjson; +DROP FUNCTION bbin_make_array; +DROP FUNCTION bbin_array_add; +DROP FUNCTION bbin_array_add_values; +DROP FUNCTION bbin_array_delete; +DROP FUNCTION bbin_array_grp; +DROP FUNCTION bbin_object_grp; +DROP FUNCTION bbin_make_object; +DROP FUNCTION bbin_object_nonull; +DROP FUNCTION bbin_object_key; +DROP FUNCTION bbin_object_add; +DROP FUNCTION bbin_object_delete; +DROP FUNCTION bbin_object_list; +DROP FUNCTION bbin_object_values; +DROP FUNCTION bbin_get_item; +DROP FUNCTION bbin_set_item; +DROP FUNCTION bbin_insert_item; +DROP FUNCTION bbin_update_item; +DROP FUNCTION bbin_item_merge; +DROP FUNCTION bbin_delete_item; +DROP FUNCTION bbin_locate_all; +DROP FUNCTION bbin_file; + +--enable_query_log + From c9b5e5286bb013bb0669d7e82194baec644d5986 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 12 Jan 2021 22:28:44 +0100 Subject: [PATCH 065/150] Fix failed test --- storage/connect/mysql-test/connect/r/bson_udf.result | 3 --- storage/connect/mysql-test/connect/t/bson_udf.test | 1 - 2 files changed, 4 deletions(-) diff --git a/storage/connect/mysql-test/connect/r/bson_udf.result b/storage/connect/mysql-test/connect/r/bson_udf.result index ea3e0e28f68..9aa411448a4 100644 --- a/storage/connect/mysql-test/connect/r/bson_udf.result +++ b/storage/connect/mysql-test/connect/r/bson_udf.result @@ -700,9 +700,6 @@ bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]') SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux'); bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux') [1,2,3,{"quatre":4}] -SELECT Bson_Item_Merge(Bson_Get_Item("C:/Data/Json/bibdoc.json", '$[0].AUTHOR'), Bson_Make_Array(Bson_Object_Key('FIRSTNAME','Olivier','LASTNAME','Bertrand'))) "Result"; -Result -[{"FIRSTNAME":"Jean-Christophe","LASTNAME":"Bernadac"},{"FIRSTNAME":"Franois","LASTNAME":"Knab"},{"FIRSTNAME":"Olivier","LASTNAME":"Bertrand"}] DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; diff --git a/storage/connect/mysql-test/connect/t/bson_udf.test b/storage/connect/mysql-test/connect/t/bson_udf.test index cec2d5f62f6..2323afcb54d 100644 --- a/storage/connect/mysql-test/connect/t/bson_udf.test +++ b/storage/connect/mysql-test/connect/t/bson_udf.test @@ -266,7 +266,6 @@ Bson_Update_Item(Bson_Make_Array(1,2,3,Bson_Object_Key('quatre',4)),'foo','$[1]' SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','1','[2].Deux'); SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','["[1]","[3].Deux"]'); SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux'); -SELECT Bson_Item_Merge(Bson_Get_Item("C:/Data/Json/bibdoc.json", '$[0].AUTHOR'), Bson_Make_Array(Bson_Object_Key('FIRSTNAME','Olivier','LASTNAME','Bertrand'))) "Result"; DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; From 1d627ce47ca241a0730bc6cb94d9ec41a302b814 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 13 Jan 2021 00:23:08 +0100 Subject: [PATCH 066/150] Fix failed test --- storage/connect/mysql-test/connect/r/bson_udf.result | 3 +++ storage/connect/mysql-test/connect/t/bson_udf.test | 1 + 2 files changed, 4 insertions(+) diff --git a/storage/connect/mysql-test/connect/r/bson_udf.result b/storage/connect/mysql-test/connect/r/bson_udf.result index 9aa411448a4..a0b93f2e547 100644 --- a/storage/connect/mysql-test/connect/r/bson_udf.result +++ b/storage/connect/mysql-test/connect/r/bson_udf.result @@ -703,3 +703,6 @@ bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux') DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; +SELECT BsonSet_Grp_Size(10); +BsonSet_Grp_Size(10) +10 diff --git a/storage/connect/mysql-test/connect/t/bson_udf.test b/storage/connect/mysql-test/connect/t/bson_udf.test index 2323afcb54d..84a3db6d061 100644 --- a/storage/connect/mysql-test/connect/t/bson_udf.test +++ b/storage/connect/mysql-test/connect/t/bson_udf.test @@ -269,6 +269,7 @@ SELECT bson_delete_item('[1,2,3,{"quatre":4,"Deux":2}]','$.[3].Deux'); DROP TABLE t1; DROP TABLE t2; DROP TABLE t3; +SELECT BsonSet_Grp_Size(10); # # Clean up From 9e4a5a81fc062c082312afb230d2b8d4cf6b11cf Mon Sep 17 00:00:00 2001 From: Dmitry Shulga Date: Wed, 13 Jan 2021 16:16:13 +0700 Subject: [PATCH 067/150] MDEV-24208 SHOW RELAYLOG EVENTS command is not supported in the prepared statement protocol yet Added sending of metadata in response to preparing request for the commands SQLCOM_SHOW_BINLOG_EVENTS, SQLCOM_SHOW_RELAYLOG_EVENTS --- mysql-test/r/ps_show_log.result | 65 +++++++++++++++++ .../suite/rpl/t/rpl_row_create_table.test | 5 -- mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test | 6 -- mysql-test/t/ps_show_log.test | 73 +++++++++++++++++++ sql/sql_prepare.cc | 11 +++ 5 files changed, 149 insertions(+), 11 deletions(-) create mode 100644 mysql-test/r/ps_show_log.result create mode 100644 mysql-test/t/ps_show_log.test diff --git a/mysql-test/r/ps_show_log.result b/mysql-test/r/ps_show_log.result new file mode 100644 index 00000000000..54eabaeded6 --- /dev/null +++ b/mysql-test/r/ps_show_log.result @@ -0,0 +1,65 @@ +# +# MDEV-24208 SHOW RELAYLOG EVENTS command is not supported in the prepared +# statement protocol yet +# +CREATE USER u1; +include/master-slave.inc +[connection master] +connection master; +CREATE TABLE t1(n INT); +DROP TABLE t1; +connection slave; +PREPARE stmt_1 FROM 'SHOW BINLOG EVENTS'; +EXECUTE stmt_1; +Log_name Pos Event_type Server_id End_log_pos Info +# # Format_desc # # # +# # Gtid_list # # [] +# # Binlog_checkpoint # # # +# # Gtid # # GTID 0-1-1 +# # Query # # use `test`; CREATE TABLE t1(n INT) +# # Gtid # # GTID 0-1-2 +# # Query # # use `test`; DROP TABLE IF EXISTS `t1` /* generated by server */ +# Execute the same prepared statement the second time to check that +# no internal structures used for handling the statement +# 'SHOW BINLOG EVENTS' were damaged. +EXECUTE stmt_1; +Log_name Pos Event_type Server_id End_log_pos Info +# # Format_desc # # # +# # Gtid_list # # [] +# # Binlog_checkpoint # # # +# # Gtid # # GTID 0-1-1 +# # Query # # use `test`; CREATE TABLE t1(n INT) +# # Gtid # # GTID 0-1-2 +# # Query # # use `test`; DROP TABLE IF EXISTS `t1` /* generated by server */ +DEALLOCATE PREPARE stmt_1; +connection slave; +PREPARE stmt_1 FROM 'SHOW RELAYLOG EVENTS'; +EXECUTE stmt_1; +Log_name Pos Event_type Server_id End_log_pos Info +slave-relay-bin.000001 # Format_desc # # # +slave-relay-bin.000001 # Rotate # # # +# Execute the same prepared statement the second time to check that +# no internal structures used for handling the statement +# 'SHOW RELAYLOG EVENTS' were damaged. +EXECUTE stmt_1; +Log_name Pos Event_type Server_id End_log_pos Info +slave-relay-bin.000001 # Format_desc # # # +slave-relay-bin.000001 # Rotate # # # +DEALLOCATE PREPARE stmt_1; +# Create the user u1 without the REPLICATION SLAVE privilege required +# for running the statements SHOW BINLOG EVENTS/SHOW RELAYLOG EVENTS +# and check that attempt to execute the statements SHOW BINLOG EVENTS/ +# SHOW RELAYLOG EVENTS as a prepred statements by a user without required +# privileges results in error. +connect con2,localhost,u1,,test; +PREPARE stmt_1 FROM 'SHOW BINLOG EVENTS'; +EXECUTE stmt_1; +ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE privilege(s) for this operation +PREPARE stmt_1 FROM 'SHOW RELAYLOG EVENTS'; +EXECUTE stmt_1; +ERROR 42000: Access denied; you need (at least one of) the REPLICATION SLAVE privilege(s) for this operation +DEALLOCATE PREPARE stmt_1; +include/rpl_end.inc +connection default; +DROP USER u1; +# End of 10.2 tests diff --git a/mysql-test/suite/rpl/t/rpl_row_create_table.test b/mysql-test/suite/rpl/t/rpl_row_create_table.test index 65f14295c19..cb76d6c4dcb 100644 --- a/mysql-test/suite/rpl/t/rpl_row_create_table.test +++ b/mysql-test/suite/rpl/t/rpl_row_create_table.test @@ -7,11 +7,6 @@ connection slave; --source include/have_innodb.inc connection master; -# Bug#18326: Do not lock table for writing during prepare of statement -# The use of the ps protocol causes extra table maps in the binlog, so -# we disable the ps-protocol for this statement. ---disable_ps_protocol - # Set the default storage engine to different values on master and # slave. We need to stop the slave for the server variable to take # effect, since the variable is only read on start-up. diff --git a/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test b/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test index 71cd4a5b9ae..4d786794f5f 100644 --- a/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test +++ b/mysql-test/suite/rpl/t/rpl_row_flsh_tbls.test @@ -4,10 +4,4 @@ let $rename_event_pos= `select @binlog_start_pos + 819`; -# Bug#18326: Do not lock table for writing during prepare of statement -# The use of the ps protocol causes extra table maps in the binlog, so -# we disable the ps-protocol for this statement. - ---disable_ps_protocol -- source extra/rpl_tests/rpl_flsh_tbls.test ---enable_ps_protocol diff --git a/mysql-test/t/ps_show_log.test b/mysql-test/t/ps_show_log.test new file mode 100644 index 00000000000..95000d2d7e0 --- /dev/null +++ b/mysql-test/t/ps_show_log.test @@ -0,0 +1,73 @@ +--echo # +--echo # MDEV-24208 SHOW RELAYLOG EVENTS command is not supported in the prepared +--echo # statement protocol yet +--echo # + +CREATE USER u1; + +--source include/have_binlog_format_statement.inc +--source include/master-slave.inc +--connection master +CREATE TABLE t1(n INT); + +DROP TABLE t1; + +--sync_slave_with_master +PREPARE stmt_1 FROM 'SHOW BINLOG EVENTS'; + +--replace_column 2 # 4 # 5 # +--replace_regex /Server ver:.*Binlog ver: .*/#/ /slave-bin.*/#/ +EXECUTE stmt_1; + +--echo # Execute the same prepared statement the second time to check that +--echo # no internal structures used for handling the statement +--echo # 'SHOW BINLOG EVENTS' were damaged. + +--replace_column 2 # 4 # 5 # +--replace_regex /Server ver:.*Binlog ver: .*/#/ /slave-bin.*/#/ +EXECUTE stmt_1; + +DEALLOCATE PREPARE stmt_1; + +--connection slave +PREPARE stmt_1 FROM 'SHOW RELAYLOG EVENTS'; +--replace_column 2 # 4 # 5 # +--replace_regex /Server ver:.*Binlog ver: .*/#/ /slave-relay-bin.*;pos=.*/#/ +EXECUTE stmt_1; + +--echo # Execute the same prepared statement the second time to check that +--echo # no internal structures used for handling the statement +--echo # 'SHOW RELAYLOG EVENTS' were damaged. + +--replace_column 2 # 4 # 5 # +--replace_regex /Server ver:.*Binlog ver: .*/#/ /slave-relay-bin.*;pos=.*/#/ +EXECUTE stmt_1; + +DEALLOCATE PREPARE stmt_1; + +--echo # Create the user u1 without the REPLICATION SLAVE privilege required +--echo # for running the statements SHOW BINLOG EVENTS/SHOW RELAYLOG EVENTS +--echo # and check that attempt to execute the statements SHOW BINLOG EVENTS/ +--echo # SHOW RELAYLOG EVENTS as a prepred statements by a user without required +--echo # privileges results in error. + +--connect (con2,localhost,u1,,test) +PREPARE stmt_1 FROM 'SHOW BINLOG EVENTS'; + +--error ER_SPECIFIC_ACCESS_DENIED_ERROR +EXECUTE stmt_1; + +PREPARE stmt_1 FROM 'SHOW RELAYLOG EVENTS'; + +--error ER_SPECIFIC_ACCESS_DENIED_ERROR +EXECUTE stmt_1; + +DEALLOCATE PREPARE stmt_1; + +--source include/rpl_end.inc + +--connection default +# Clean up +DROP USER u1; + +--echo # End of 10.2 tests diff --git a/sql/sql_prepare.cc b/sql/sql_prepare.cc index dc2fb414de9..3dff2cb8106 100644 --- a/sql/sql_prepare.cc +++ b/sql/sql_prepare.cc @@ -121,6 +121,7 @@ When one supplies long data for a placeholder: static const uint PARAMETER_FLAG_UNSIGNED= 128U << 8; #endif #include "lock.h" // MYSQL_OPEN_FORCE_SHARED_MDL +#include "log_event.h" // class Log_event #include "sql_handler.h" #include "transaction.h" // trans_rollback_implicit #include "wsrep_mysqld.h" @@ -2521,6 +2522,16 @@ static bool check_prepared_statement(Prepared_statement *stmt) DBUG_RETURN(FALSE); } break; + case SQLCOM_SHOW_BINLOG_EVENTS: + case SQLCOM_SHOW_RELAYLOG_EVENTS: + { + List field_list; + Log_event::init_show_field_list(thd, &field_list); + + if ((res= send_stmt_metadata(thd, stmt, &field_list)) == 2) + DBUG_RETURN(FALSE); + } + break; #endif /* EMBEDDED_LIBRARY */ case SQLCOM_SHOW_CREATE_PROC: if ((res= mysql_test_show_create_routine(stmt, TYPE_ENUM_PROCEDURE)) == 2) From 25db70f912f9c2535d7b49b123e042815cb7b1a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 13 Jan 2021 18:54:53 +0200 Subject: [PATCH 068/150] Fix innodb.innodb_mysql It is unclear for how long the result has been broken, because ./mtr --big-test is not run regularly. --- mysql-test/suite/innodb/r/innodb_mysql.result | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/suite/innodb/r/innodb_mysql.result b/mysql-test/suite/innodb/r/innodb_mysql.result index 0272ecd82b5..877f20cf195 100644 --- a/mysql-test/suite/innodb/r/innodb_mysql.result +++ b/mysql-test/suite/innodb/r/innodb_mysql.result @@ -1166,7 +1166,7 @@ drop table t1,t2; create table t1(f1 varchar(800) binary not null, key(f1)) character set utf8 collate utf8_general_ci; Warnings: -Warning 1071 Specified key was too long; max key length is 767 bytes +Note 1071 Specified key was too long; max key length is 767 bytes insert into t1 values('aaa'); drop table t1; CREATE TABLE t1 (a INT PRIMARY KEY, b INT, c FLOAT, KEY b(b)) ENGINE = INNODB; From ea9cd97f855fddf91f011434e8289ce5eba52528 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 13 Jan 2021 18:55:56 +0200 Subject: [PATCH 069/150] MDEV-24536 innodb_idle_flush_pct has no effect The parameter innodb_idle_flush_pct that was introduced in MariaDB Server 10.1.2 by MDEV-6932 has no effect ever since the InnoDB changes from MySQL 5.7.9 were applied in commit 2e814d4702d71a04388386a9f591d14a35980bfe. Let us declare the parameter as deprecated and having no effect. --- .../r/innodb_idle_flush_pct_basic.result | 12 +++++++++ .../sys_vars/r/sysvars_innodb,32bit.rdiff | 2 +- .../suite/sys_vars/r/sysvars_innodb.result | 2 +- storage/innobase/handler/ha_innodb.cc | 27 ++++++++++++++----- storage/innobase/include/srv0srv.h | 4 +-- storage/innobase/srv/srv0srv.cc | 5 +--- 6 files changed, 37 insertions(+), 15 deletions(-) diff --git a/mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result b/mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result index a2c328f38fd..915343fcff2 100644 --- a/mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result +++ b/mysql-test/suite/sys_vars/r/innodb_idle_flush_pct_basic.result @@ -24,6 +24,8 @@ select * from information_schema.session_variables where variable_name='innodb_i VARIABLE_NAME VARIABLE_VALUE INNODB_IDLE_FLUSH_PCT 100 set global innodb_idle_flush_pct=10; +Warnings: +Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect. select @@global.innodb_idle_flush_pct; @@global.innodb_idle_flush_pct 10 @@ -44,6 +46,7 @@ ERROR 42000: Incorrect argument type to variable 'innodb_idle_flush_pct' set global innodb_idle_flush_pct=-7; Warnings: Warning 1292 Truncated incorrect innodb_idle_flush_pct value: '-7' +Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect. select @@global.innodb_idle_flush_pct; @@global.innodb_idle_flush_pct 0 @@ -53,6 +56,7 @@ INNODB_IDLE_FLUSH_PCT 0 set global innodb_idle_flush_pct=106; Warnings: Warning 1292 Truncated incorrect innodb_idle_flush_pct value: '106' +Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect. select @@global.innodb_idle_flush_pct; @@global.innodb_idle_flush_pct 100 @@ -60,18 +64,26 @@ select * from information_schema.global_variables where variable_name='innodb_id VARIABLE_NAME VARIABLE_VALUE INNODB_IDLE_FLUSH_PCT 100 set global innodb_idle_flush_pct=0; +Warnings: +Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect. select @@global.innodb_idle_flush_pct; @@global.innodb_idle_flush_pct 0 set global innodb_idle_flush_pct=100; +Warnings: +Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect. select @@global.innodb_idle_flush_pct; @@global.innodb_idle_flush_pct 100 set global innodb_idle_flush_pct=DEFAULT; +Warnings: +Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect. select @@global.innodb_idle_flush_pct; @@global.innodb_idle_flush_pct 100 SET @@global.innodb_idle_flush_pct = @start_global_value; +Warnings: +Warning 131 innodb_idle_flush_pct is DEPRECATED and has no effect. SELECT @@global.innodb_idle_flush_pct; @@global.innodb_idle_flush_pct 100 diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff index 0e4bb7cf7d6..0a1954f6cd5 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb,32bit.rdiff @@ -331,7 +331,7 @@ VARIABLE_SCOPE GLOBAL -VARIABLE_TYPE BIGINT UNSIGNED +VARIABLE_TYPE INT UNSIGNED - VARIABLE_COMMENT Up to what percentage of dirty pages should be flushed when innodb finds it has spare resources to do so. + VARIABLE_COMMENT DEPRECATED. This setting has no effect. NUMERIC_MIN_VALUE 0 NUMERIC_MAX_VALUE 100 @@ -1306,22 +1306,22 @@ diff --git a/mysql-test/suite/sys_vars/r/sysvars_innodb.result b/mysql-test/suite/sys_vars/r/sysvars_innodb.result index 89138c2fedd..e67a3d28fb6 100644 --- a/mysql-test/suite/sys_vars/r/sysvars_innodb.result +++ b/mysql-test/suite/sys_vars/r/sysvars_innodb.result @@ -1283,7 +1283,7 @@ SESSION_VALUE NULL DEFAULT_VALUE 100 VARIABLE_SCOPE GLOBAL VARIABLE_TYPE BIGINT UNSIGNED -VARIABLE_COMMENT Up to what percentage of dirty pages should be flushed when innodb finds it has spare resources to do so. +VARIABLE_COMMENT DEPRECATED. This setting has no effect. NUMERIC_MIN_VALUE 0 NUMERIC_MAX_VALUE 100 NUMERIC_BLOCK_SIZE 0 diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index fb3d6637dec..0dea402b32b 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -4,7 +4,7 @@ Copyright (c) 2000, 2020, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -3611,7 +3611,12 @@ static const char* deprecated_mtflush_threads " and the parameter will be removed in MariaDB 10.3." " Use innodb-page-cleaners instead. "; +/** Deprecation message about innodb_idle_flush_pct */ +static const char* deprecated_idle_flush_pct + = "innodb_idle_flush_pct is DEPRECATED and has no effect."; + static my_bool innodb_instrument_semaphores; +static ulong innodb_idle_flush_pct; /** If applicable, emit a message that log checksums cannot be disabled. @param[in,out] thd client session, or NULL if at startup @@ -4232,6 +4237,10 @@ innobase_change_buffering_inited_ok: " It will be removed in MariaDB 10.3."; } + if (innodb_idle_flush_pct != 100) { + ib::warn() << deprecated_idle_flush_pct; + } + srv_use_atomic_writes = innobase_use_atomic_writes && my_may_have_atomic_write; if (srv_use_atomic_writes && !srv_file_per_table) @@ -19845,6 +19854,14 @@ innodb_instrument_semaphores_update( HA_ERR_WRONG_COMMAND, deprecated_instrument_semaphores); } +static void innodb_idle_flush_pct_update(THD *thd, st_mysql_sys_var *var, + void*, const void *save) +{ + innodb_idle_flush_pct = *static_cast(save); + push_warning(thd, Sql_condition::WARN_LEVEL_WARN, + HA_ERR_WRONG_COMMAND, deprecated_idle_flush_pct); +} + /* plugin options */ static MYSQL_SYSVAR_ENUM(checksum_algorithm, srv_checksum_algorithm, @@ -19928,12 +19945,10 @@ static MYSQL_SYSVAR_ULONG(io_capacity_max, srv_max_io_capacity, SRV_MAX_IO_CAPACITY_DUMMY_DEFAULT, 100, SRV_MAX_IO_CAPACITY_LIMIT, 0); -static MYSQL_SYSVAR_ULONG(idle_flush_pct, - srv_idle_flush_pct, +static MYSQL_SYSVAR_ULONG(idle_flush_pct, innodb_idle_flush_pct, PLUGIN_VAR_RQCMDARG, - "Up to what percentage of dirty pages should be flushed when innodb " - "finds it has spare resources to do so.", - NULL, NULL, 100, 0, 100, 0); + "DEPRECATED. This setting has no effect.", + NULL, innodb_idle_flush_pct_update, 100, 0, 100, 0); #ifdef UNIV_DEBUG static MYSQL_SYSVAR_BOOL(background_drop_list_empty, diff --git a/storage/innobase/include/srv0srv.h b/storage/innobase/include/srv0srv.h index 5214953f308..684d8bbaf90 100644 --- a/storage/innobase/include/srv0srv.h +++ b/storage/innobase/include/srv0srv.h @@ -3,7 +3,7 @@ Copyright (c) 1995, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, 2009, Google Inc. Copyright (c) 2009, Percona Inc. -Copyright (c) 2013, 2018, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -435,8 +435,6 @@ extern double srv_defragment_fill_factor; extern uint srv_defragment_frequency; extern ulonglong srv_defragment_interval; -extern ulong srv_idle_flush_pct; - extern uint srv_change_buffer_max_size; /* Number of IO operations per second the server can do */ diff --git a/storage/innobase/srv/srv0srv.cc b/storage/innobase/srv/srv0srv.cc index f1216dcd51e..4974b50d6c6 100644 --- a/storage/innobase/srv/srv0srv.cc +++ b/storage/innobase/srv/srv0srv.cc @@ -3,7 +3,7 @@ Copyright (c) 1995, 2017, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, 2009 Google Inc. Copyright (c) 2009, Percona Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -272,9 +272,6 @@ ulong srv_buf_pool_dump_pct; /** Lock table size in bytes */ ulint srv_lock_table_size = ULINT_MAX; -/** innodb_idle_flush_pct */ -ulong srv_idle_flush_pct; - /** copy of innodb_read_io_threads */ ulint srv_n_read_io_threads; /** copy of innodb_write_io_threads */ From c89f37983ec82e5c6140f098e5672fde7fbf1002 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Thu, 14 Jan 2021 08:57:24 +0200 Subject: [PATCH 070/150] MDEV-21478 fixup: Avoid a memory leak --- storage/innobase/handler/handler0alter.cc | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 7655b93e736..4b077c44f17 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 2005, 2019, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -4657,6 +4657,7 @@ prepare_inplace_alter_table_dict( } if (dict_col_name_is_reserved(field->field_name)) { +wrong_column_name: dict_mem_table_free(ctx->new_table); my_error(ER_WRONG_COLUMN_NAME, MYF(0), field->field_name); @@ -4674,9 +4675,7 @@ prepare_inplace_alter_table_dict( || col_len != sizeof(doc_id_t) || strcmp(field->field_name, FTS_DOC_ID_COL_NAME)) { - my_error(ER_WRONG_COLUMN_NAME, MYF(0), - field->field_name); - goto new_clustered_failed; + goto wrong_column_name; } } From db9b54f16365465deda047a3d5da9696606162a0 Mon Sep 17 00:00:00 2001 From: Alice Sherepa Date: Thu, 14 Jan 2021 18:06:41 +0100 Subject: [PATCH 071/150] MDEV-12908 binlog_encryption.binlog_xa_recover, binlog.binlog_xa_recover failed in bb with extra checkpoint --- mysql-test/extra/binlog_tests/binlog_xa_recover.inc | 3 +-- mysql-test/suite/binlog/r/binlog_xa_recover.result | 2 +- mysql-test/suite/binlog_encryption/binlog_xa_recover.result | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/mysql-test/extra/binlog_tests/binlog_xa_recover.inc b/mysql-test/extra/binlog_tests/binlog_xa_recover.inc index de2703377cc..9e0906c90f0 100644 --- a/mysql-test/extra/binlog_tests/binlog_xa_recover.inc +++ b/mysql-test/extra/binlog_tests/binlog_xa_recover.inc @@ -180,12 +180,11 @@ connection default; # commit checkpoint, otherwise we get nondeterministic results. SET @old_dbug= @@global.DEBUG_DBUG; SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed"; - +SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed"; SET DEBUG_SYNC= "now SIGNAL con12_cont"; connection con12; reap; connection default; -SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed"; SET GLOBAL debug_dbug= @old_dbug; SET DEBUG_SYNC= "now SIGNAL con11_cont"; diff --git a/mysql-test/suite/binlog/r/binlog_xa_recover.result b/mysql-test/suite/binlog/r/binlog_xa_recover.result index 25aa1389b71..cb2cad957f0 100644 --- a/mysql-test/suite/binlog/r/binlog_xa_recover.result +++ b/mysql-test/suite/binlog/r/binlog_xa_recover.result @@ -148,10 +148,10 @@ connection con10; connection default; SET @old_dbug= @@global.DEBUG_DBUG; SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed"; +SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed"; SET DEBUG_SYNC= "now SIGNAL con12_cont"; connection con12; connection default; -SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed"; SET GLOBAL debug_dbug= @old_dbug; SET DEBUG_SYNC= "now SIGNAL con11_cont"; connection con11; diff --git a/mysql-test/suite/binlog_encryption/binlog_xa_recover.result b/mysql-test/suite/binlog_encryption/binlog_xa_recover.result index af36fe277a1..6e33595eb9e 100644 --- a/mysql-test/suite/binlog_encryption/binlog_xa_recover.result +++ b/mysql-test/suite/binlog_encryption/binlog_xa_recover.result @@ -153,10 +153,10 @@ connection con10; connection default; SET @old_dbug= @@global.DEBUG_DBUG; SET GLOBAL debug_dbug="+d,binlog_background_checkpoint_processed"; +SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed"; SET DEBUG_SYNC= "now SIGNAL con12_cont"; connection con12; connection default; -SET DEBUG_SYNC= "now WAIT_FOR binlog_background_checkpoint_processed"; SET GLOBAL debug_dbug= @old_dbug; SET DEBUG_SYNC= "now SIGNAL con11_cont"; connection con11; From 9e3aa83f01176373d86166257192a175256a16aa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Tue, 12 Jan 2021 17:00:00 +0200 Subject: [PATCH 072/150] MDEV-24443 : galera.lp1376747-4 MTR fails: Result length mismatch Use debug_sync to force FTWRL to pause in correct state. --- mysql-test/suite/galera/r/lp1376747-4.result | 13 +++++---- mysql-test/suite/galera/t/lp1376747-4.test | 28 +++++++++++++------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/mysql-test/suite/galera/r/lp1376747-4.result b/mysql-test/suite/galera/r/lp1376747-4.result index f1d32aa8f69..3370e1d3d8e 100644 --- a/mysql-test/suite/galera/r/lp1376747-4.result +++ b/mysql-test/suite/galera/r/lp1376747-4.result @@ -3,26 +3,23 @@ CREATE TABLE t1 (id INT PRIMARY KEY) ENGINE=InnoDB; INSERT INTO t1 VALUES (1); connection node_2; SET session wsrep_sync_wait=0; -SET session wsrep_causal_reads=OFF; -Warnings: -Warning 1287 '@@wsrep_causal_reads' is deprecated and will be removed in a future release. Please use '@@wsrep_sync_wait=1' instead -FLUSH TABLE WITH READ LOCK; +FLUSH TABLES WITH READ LOCK; connection node_1; ALTER TABLE t1 ADD COLUMN f2 INTEGER; INSERT INTO t1 VALUES (2,3); connection node_2a; SET session wsrep_sync_wait=0; -SET session wsrep_causal_reads=OFF; -Warnings: -Warning 1287 '@@wsrep_causal_reads' is deprecated and will be removed in a future release. Please use '@@wsrep_sync_wait=1' instead SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 +SET debug_sync='flush_tables_with_read_lock_after_acquire_locks SIGNAL parked2 WAIT_FOR go2'; FLUSH TABLES t1 WITH READ LOCK;; connection node_2; +SET debug_sync='now WAIT_FOR parked2'; +SET debug_sync='now SIGNAL go2'; UNLOCK TABLES; SHOW CREATE TABLE t1; Table Create Table @@ -30,8 +27,10 @@ t1 CREATE TABLE `t1` ( `id` int(11) NOT NULL, PRIMARY KEY (`id`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 +set debug_sync= 'RESET'; connection node_2a; UNLOCK TABLES; +SET SESSION wsrep_sync_wait = DEFAULT; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( diff --git a/mysql-test/suite/galera/t/lp1376747-4.test b/mysql-test/suite/galera/t/lp1376747-4.test index 6ae89fe9df4..d19ff422ab0 100644 --- a/mysql-test/suite/galera/t/lp1376747-4.test +++ b/mysql-test/suite/galera/t/lp1376747-4.test @@ -5,7 +5,8 @@ # after provider is unpaused # --source include/galera_cluster.inc ---source include/have_innodb.inc +--source include/have_debug.inc +--source include/have_debug_sync.inc --let $galera_connection_name = node_2a --let $galera_server_number = 2 @@ -17,8 +18,7 @@ INSERT INTO t1 VALUES (1); --connection node_2 SET session wsrep_sync_wait=0; -SET session wsrep_causal_reads=OFF; -FLUSH TABLE WITH READ LOCK; +FLUSH TABLES WITH READ LOCK; --connection node_1 ALTER TABLE t1 ADD COLUMN f2 INTEGER; @@ -26,25 +26,33 @@ INSERT INTO t1 VALUES (2,3); --connection node_2a SET session wsrep_sync_wait=0; -SET session wsrep_causal_reads=OFF; - SHOW CREATE TABLE t1; +SET debug_sync='flush_tables_with_read_lock_after_acquire_locks SIGNAL parked2 WAIT_FOR go2'; --send FLUSH TABLES t1 WITH READ LOCK; --connection node_2 ---let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE USER = 'system user' AND STATE LIKE 'committed%'; ---source include/wait_condition.inc +SET debug_sync='now WAIT_FOR parked2'; +# let the flush table wait in pause state before we unlock +# table otherwise there is window where-in flush table is +# yet to wait in pause and unlock allows alter table to proceed. +# this is because send is asynchronous. +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.PROCESSLIST WHERE Info LIKE 'FLUSH TABLES t1 WITH READ LOCK'; +--let $wait_condition_on_error_output = SHOW PROCESSLIST +--source include/wait_condition_with_debug.inc + +SET debug_sync='now SIGNAL go2'; +# this will release existing lock but will not resume +# the cluster as there is new FTRL that is still pausing it. UNLOCK TABLES; SHOW CREATE TABLE t1; +set debug_sync= 'RESET'; --connection node_2a --reap UNLOCK TABLES; ---let $wait_condition = SELECT COUNT(*) = 2 FROM t1; ---source include/wait_condition.inc - +SET SESSION wsrep_sync_wait = DEFAULT; SHOW CREATE TABLE t1; SELECT * from t1; From cf6114ebea0224f265d1ac25592fce9b984b0e6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Wed, 13 Jan 2021 08:32:10 +0200 Subject: [PATCH 073/150] MDEV-24432 : galera.galera_fk_cascade_delete_debug MTR failed: query 'reap' failed: 1205: Lock wait timeout exceeded Add wait_conditions to verify correct database state before next operation. --- .../galera/r/galera_fk_cascade_delete.result | 26 +++++++++----- .../r/galera_fk_cascade_delete_debug.result | 21 +++++------- .../galera/t/galera_fk_cascade_delete.test | 19 ++++++++--- .../t/galera_fk_cascade_delete_debug.test | 34 ++++++++++++++----- 4 files changed, 67 insertions(+), 33 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_fk_cascade_delete.result b/mysql-test/suite/galera/r/galera_fk_cascade_delete.result index a6c6504dc39..ba5feadb1ff 100644 --- a/mysql-test/suite/galera/r/galera_fk_cascade_delete.result +++ b/mysql-test/suite/galera/r/galera_fk_cascade_delete.result @@ -2,7 +2,6 @@ # test phase with cascading foreign key through 3 tables # connection node_1; -set wsrep_sync_wait=0; CREATE TABLE grandparent ( id INT NOT NULL PRIMARY KEY ) ENGINE=InnoDB; @@ -24,15 +23,26 @@ INSERT INTO grandparent VALUES (1),(2); INSERT INTO parent VALUES (1,1), (2,2); INSERT INTO child VALUES (1,1), (2,2); connection node_2; -set wsrep_sync_wait=0; DELETE FROM grandparent WHERE id = 1; +SELECT * FROM grandparent; +id +2 +SELECT * FROM parent; +id grandparent_id +2 2 +SELECT * FROM child; +id parent_id +2 2 connection node_1; -SELECT COUNT(*), COUNT(*) = 0 FROM parent WHERE grandparent_id = 1; -COUNT(*) COUNT(*) = 0 -0 1 -SELECT COUNT(*), COUNT(*) = 0 FROM child WHERE parent_id = 1; -COUNT(*) COUNT(*) = 0 -0 1 +SELECT * FROM grandparent; +id +2 +SELECT * FROM parent; +id grandparent_id +2 2 +SELECT * FROM child; +id parent_id +2 2 DROP TABLE child; DROP TABLE parent; DROP TABLE grandparent; diff --git a/mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result b/mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result index 89613b2856a..e2879c30a98 100644 --- a/mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result +++ b/mysql-test/suite/galera/r/galera_fk_cascade_delete_debug.result @@ -18,12 +18,10 @@ INSERT INTO child VALUES (1,'row one'), (2,'row two'); connection node_2; DELETE FROM parent; connection node_1; -SELECT COUNT(*), COUNT(*) = 0 FROM parent; -COUNT(*) COUNT(*) = 0 -0 1 -SELECT COUNT(*), COUNT(*) = 0 FROM child; -COUNT(*) COUNT(*) = 0 -0 1 +SELECT * FROM parent; +id +SELECT * FROM child; +id parent_id DROP TABLE child; DROP TABLE parent; # @@ -44,6 +42,7 @@ ON DELETE CASCADE ) ENGINE=InnoDB; INSERT INTO parent VALUES (1); INSERT INTO child VALUES (1,0,1); +connection node_2; connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb"; connection node_2; @@ -57,11 +56,9 @@ SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; SET GLOBAL debug_dbug = ""; SET DEBUG_SYNC = "RESET"; connection node_1; -SELECT COUNT(*), COUNT(*) = 0 FROM parent; -COUNT(*) COUNT(*) = 0 -0 1 -SELECT COUNT(*), COUNT(*) = 0 FROM child; -COUNT(*) COUNT(*) = 0 -0 1 +SELECT * FROM parent; +id +SELECT * FROM child; +id j parent_id DROP TABLE child; DROP TABLE parent; diff --git a/mysql-test/suite/galera/t/galera_fk_cascade_delete.test b/mysql-test/suite/galera/t/galera_fk_cascade_delete.test index a3e0dbcf36f..49b54f0f7f0 100644 --- a/mysql-test/suite/galera/t/galera_fk_cascade_delete.test +++ b/mysql-test/suite/galera/t/galera_fk_cascade_delete.test @@ -9,7 +9,6 @@ --echo # --connection node_1 -set wsrep_sync_wait=0; CREATE TABLE grandparent ( id INT NOT NULL PRIMARY KEY @@ -36,8 +35,12 @@ INSERT INTO parent VALUES (1,1), (2,2); INSERT INTO child VALUES (1,1), (2,2); --connection node_2 -set wsrep_sync_wait=0; - +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'grandparent' +--source include/wait_condition.inc +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'parent' +--source include/wait_condition.inc +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'child' +--source include/wait_condition.inc --let $wait_condition = SELECT COUNT(*) = 2 FROM child; --source include/wait_condition.inc --let $wait_condition = SELECT COUNT(*) = 2 FROM parent; @@ -46,6 +49,10 @@ set wsrep_sync_wait=0; --source include/wait_condition.inc DELETE FROM grandparent WHERE id = 1; +SELECT * FROM grandparent; +SELECT * FROM parent; +SELECT * FROM child; + --connection node_1 --let $wait_condition = SELECT COUNT(*) = 1 FROM child; --source include/wait_condition.inc @@ -53,8 +60,10 @@ DELETE FROM grandparent WHERE id = 1; --source include/wait_condition.inc --let $wait_condition = SELECT COUNT(*) = 1 FROM grandparent; --source include/wait_condition.inc -SELECT COUNT(*), COUNT(*) = 0 FROM parent WHERE grandparent_id = 1; -SELECT COUNT(*), COUNT(*) = 0 FROM child WHERE parent_id = 1; + +SELECT * FROM grandparent; +SELECT * FROM parent; +SELECT * FROM child; DROP TABLE child; DROP TABLE parent; diff --git a/mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test b/mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test index f38c028b7d6..d902783ed64 100644 --- a/mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test +++ b/mysql-test/suite/galera/t/galera_fk_cascade_delete_debug.test @@ -25,16 +25,25 @@ INSERT INTO parent VALUES ('row one'), ('row two'); INSERT INTO child VALUES (1,'row one'), (2,'row two'); --connection node_2 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'parent' +--source include/wait_condition.inc +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'child' +--source include/wait_condition.inc +--let $wait_condition = SELECT COUNT(*) = 2 FROM parent; +--source include/wait_condition.inc --let $wait_condition = SELECT COUNT(*) = 2 FROM child; --source include/wait_condition.inc + DELETE FROM parent; --connection node_1 +--let $wait_condition = SELECT COUNT(*) = 0 FROM parent; +--source include/wait_condition.inc --let $wait_condition = SELECT COUNT(*) = 0 FROM child; --source include/wait_condition.inc -SELECT COUNT(*), COUNT(*) = 0 FROM parent; -SELECT COUNT(*), COUNT(*) = 0 FROM child; +SELECT * FROM parent; +SELECT * FROM child; DROP TABLE child; DROP TABLE parent; @@ -61,20 +70,27 @@ CREATE TABLE child ( INSERT INTO parent VALUES (1); INSERT INTO child VALUES (1,0,1); +--connection node_2 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'parent' +--source include/wait_condition.inc +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_NAME = 'child' +--source include/wait_condition.inc +--let $wait_condition = SELECT COUNT(*) = 1 FROM parent; +--source include/wait_condition.inc +--let $wait_condition = SELECT COUNT(*) = 1 FROM child; +--source include/wait_condition.inc + # block applier before applying --connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 SET GLOBAL debug_dbug = "d,sync.wsrep_apply_cb"; --connection node_2 ---let $wait_condition = SELECT COUNT(*) = 1 FROM child; ---source include/wait_condition.inc DELETE FROM parent; --connection node_1a # wait until applier has reached the sync point SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached"; - --connection node_1 # issue conflicting write to child table, it should fail in certification --error ER_LOCK_DEADLOCK @@ -88,11 +104,13 @@ SET DEBUG_SYNC = "RESET"; --connection node_1 --reap - +--let $wait_condition = SELECT COUNT(*) = 0 FROM parent; +--source include/wait_condition.inc --let $wait_condition = SELECT COUNT(*) = 0 FROM child; --source include/wait_condition.inc -SELECT COUNT(*), COUNT(*) = 0 FROM parent; -SELECT COUNT(*), COUNT(*) = 0 FROM child; + +SELECT * FROM parent; +SELECT * FROM child; DROP TABLE child; DROP TABLE parent; From beaea31ab12ab56ea8a6eb5e99cf82648675ea78 Mon Sep 17 00:00:00 2001 From: sjaakola Date: Wed, 9 Dec 2020 21:53:18 +0200 Subject: [PATCH 074/150] MDEV-23851 BF-BF Conflict issue because of UK GAP locks MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some DML operations on tables having unique secondary keys cause scanning in the secondary index, for instance to find potential unique key violations in the seconday index. This scanning may involve GAP locking in the index. As this locking happens also when applying replication events in high priority applier threads, there is a probabality for lock conflicts between two wsrep high priority threads. This PR avoids lock conflicts of high priority wsrep threads, which do secondary index scanning e.g. for duplicate key detection. The actual fix is the patch in sql_class.cc:thd_need_ordering_with(), where we allow relaxed GAP locking protocol between wsrep high priority threads. wsrep high priority threads (replication appliers, replayers and TOI processors) are ordered by the replication provider, and they will not need serializability support gained by secondary index GAP locks. PR contains also a mtr test, which exercises a scenario where two replication applier threads have a false positive conflict in GAP of unique secondary index. The conflicting local committing transaction has to replay, and the test verifies also that the replaying phase will not conflict with the latter repllication applier. Commit also contains new test scenario for galera.galera_UK_conflict.test, where replayer starts applying after a slave applier thread, with later seqno, has advanced to commit phase. The applier and replayer have false positive GAP lock conflict on secondary unique index, and replayer should ignore this. This test scenario caused crash with earlier version in this PR, and to fix this, the secondary index uniquenes checking has been relaxed even further. Now innodb trx_t structure has new member: bool wsrep_UK_scan, which is set to true, when high priority thread is performing unique secondary index scanning. The member trx_t::wsrep_UK_scan is defined inside WITH_WSREP directive, to make it possible to prepare a MariaDB build where this additional trx_t member is not present and is not used in the code base. trx->wsrep_UK_scan is set to true only for the duration of function call for: lock_rec_lock() trx->wsrep_UK_scan is used only in lock_rec_has_to_wait() function to relax the need to wait if wsrep_UK_scan is set and conflicting transaction is also high priority. Reviewed-by: Jan Lindström --- .../suite/galera/r/galera_UK_conflict.result | 89 +++++++++++ .../suite/galera/t/galera_UK_conflict.test | 148 ++++++++++++++++++ sql/mdl.cc | 8 +- sql/sql_class.cc | 12 +- sql/wsrep_thd.cc | 14 ++ storage/innobase/btr/btr0cur.cc | 29 +++- storage/innobase/include/trx0trx.h | 5 +- storage/innobase/lock/lock0lock.cc | 44 +++++- storage/innobase/trx/trx0trx.cc | 10 +- 9 files changed, 349 insertions(+), 10 deletions(-) create mode 100644 mysql-test/suite/galera/r/galera_UK_conflict.result create mode 100644 mysql-test/suite/galera/t/galera_UK_conflict.test diff --git a/mysql-test/suite/galera/r/galera_UK_conflict.result b/mysql-test/suite/galera/r/galera_UK_conflict.result new file mode 100644 index 00000000000..76649f1b268 --- /dev/null +++ b/mysql-test/suite/galera/r/galera_UK_conflict.result @@ -0,0 +1,89 @@ +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 int, f3 int, unique key keyj (f2)); +INSERT INTO t1 VALUES (1, 1, 0); +INSERT INTO t1 VALUES (3, 3, 0); +INSERT INTO t1 VALUES (10, 10, 0); +SET GLOBAL wsrep_slave_threads = 3; +SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb"; +connection node_1; +SET SESSION wsrep_sync_wait=0; +START TRANSACTION; +DELETE FROM t1 WHERE f2 = 3; +INSERT INTO t1 VALUES (3, 3, 1); +connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1; +connection node_1a; +SET SESSION wsrep_sync_wait=0; +connection node_2; +INSERT INTO t1 VALUES (5, 5, 2); +connection node_1a; +SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached"; +SET GLOBAL wsrep_provider_options = 'dbug=d,apply_monitor_slave_enter_sync'; +connection node_2; +INSERT INTO t1 VALUES (4, 4, 2); +connection node_1a; +SET SESSION wsrep_on = 0; +SET SESSION wsrep_on = 1; +SET GLOBAL wsrep_provider_options = 'dbug='; +SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync'; +connection node_1; +COMMIT; +connection node_1a; +SET SESSION wsrep_on = 0; +SET SESSION wsrep_on = 1; +SET GLOBAL wsrep_provider_options = 'dbug='; +SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync'; +SET GLOBAL wsrep_provider_options = 'dbug='; +SET GLOBAL DEBUG_DBUG = ""; +SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; +SET GLOBAL debug_dbug = NULL; +SET debug_sync='RESET'; +SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb"; +SET GLOBAL wsrep_provider_options = 'signal=apply_monitor_slave_enter_sync'; +SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached"; +SET GLOBAL wsrep_provider_options = 'dbug=d,commit_monitor_enter_sync'; +SET GLOBAL wsrep_provider_options = 'dbug='; +SET GLOBAL DEBUG_DBUG = ""; +SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; +SET GLOBAL debug_dbug = NULL; +SET debug_sync='RESET'; +SET GLOBAL wsrep_provider_options = 'signal=commit_monitor_enter_sync'; +SET GLOBAL wsrep_provider_options = 'dbug='; +connection node_1; +SELECT * FROM t1; +f1 f2 f3 +1 1 0 +3 3 1 +4 4 2 +5 5 2 +10 10 0 +wsrep_local_replays +1 +SET GLOBAL wsrep_slave_threads = DEFAULT; +connection node_2; +SELECT * FROM t1; +f1 f2 f3 +1 1 0 +3 3 1 +4 4 2 +5 5 2 +10 10 0 +INSERT INTO t1 VALUES (7,7,7); +INSERT INTO t1 VALUES (8,8,8); +SELECT * FROM t1; +f1 f2 f3 +1 1 0 +3 3 1 +4 4 2 +5 5 2 +7 7 7 +8 8 8 +10 10 0 +connection node_1; +SELECT * FROM t1; +f1 f2 f3 +1 1 0 +3 3 1 +4 4 2 +5 5 2 +7 7 7 +10 10 0 +DROP TABLE t1; diff --git a/mysql-test/suite/galera/t/galera_UK_conflict.test b/mysql-test/suite/galera/t/galera_UK_conflict.test new file mode 100644 index 00000000000..57bafbf8ae0 --- /dev/null +++ b/mysql-test/suite/galera/t/galera_UK_conflict.test @@ -0,0 +1,148 @@ +# +# This test tests the operation of transaction replay with a scenario +# where two subsequent write sets in applying conflict with local transaction +# in commit phase. The conflict is "false positive" confict on GAP lock in +# secondary unique index. +# The first applier will cause BF abort for the local committer, which +# starts replaying because of positive certification. +# In buggy version, scenatio continues so that ehile the local transaction +# is replaying, the latter applier experiences similar UK GAP lock conflict +# and forces the replayer to abort second time. +# In fixed version, this latter BF abort should not happen. +# + +--source include/galera_cluster.inc +--source include/have_innodb.inc +--source include/have_debug_sync.inc +--source include/galera_have_debug_sync.inc + +--let $wsrep_local_replays_old = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'` + +CREATE TABLE t1 (f1 INTEGER PRIMARY KEY, f2 int, f3 int, unique key keyj (f2)); +INSERT INTO t1 VALUES (1, 1, 0); +INSERT INTO t1 VALUES (3, 3, 0); +INSERT INTO t1 VALUES (10, 10, 0); + +# we will need 2 appliers threads for applyin two write sets in parallel in node1 +# and 1 applier thread for handling replaying +SET GLOBAL wsrep_slave_threads = 3; +SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb"; + +--connection node_1 +# starting a transaction, which deletes and inserts the middle row in test table +# this will be victim of false positive conflict with appliers +SET SESSION wsrep_sync_wait=0; +START TRANSACTION; + +DELETE FROM t1 WHERE f2 = 3; +INSERT INTO t1 VALUES (3, 3, 1); + +# Control connection to manage sync points for appliers +--connect node_1a, 127.0.0.1, root, , test, $NODE_MYPORT_1 +--connection node_1a +SET SESSION wsrep_sync_wait=0; + +# send from node 2 first INSERT transaction, which will conflict on GAP lock in node 1 +--connection node_2 +INSERT INTO t1 VALUES (5, 5, 2); + +--connection node_1a +# wait to see the INSERT in apply_cb sync point +SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached"; + +# first applier seen in wait point, set sync point for the second INSERT +--let $galera_sync_point = apply_monitor_slave_enter_sync +--source include/galera_set_sync_point.inc + +--connection node_2 +# send second insert into same GAP in test table +INSERT INTO t1 VALUES (4, 4, 2); + +--connection node_1a +# wait for the second insert to arrive in his sync point +--let $galera_sync_point = apply_monitor_slave_enter_sync +--source include/galera_wait_sync_point.inc +--source include/galera_clear_sync_point.inc + +# both appliers are now waiting in separate sync points + +# Block the local commit, send the COMMIT and wait until it gets blocked +--let $galera_sync_point = commit_monitor_enter_sync +--source include/galera_set_sync_point.inc + +--connection node_1 +--send COMMIT + +--connection node_1a +# wait for the local commit to enter in commit monitor wait state +--let $galera_sync_point = apply_monitor_slave_enter_sync commit_monitor_enter_sync +--source include/galera_wait_sync_point.inc +--source include/galera_clear_sync_point.inc + +# release the local transaction to continue with commit +--let $galera_sync_point = commit_monitor_enter_sync +--source include/galera_signal_sync_point.inc +--source include/galera_clear_sync_point.inc + +# and now release the first applier, it should force local trx to abort +SET GLOBAL DEBUG_DBUG = ""; +SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; +SET GLOBAL debug_dbug = NULL; +SET debug_sync='RESET'; + +# set another sync point for second applier +SET GLOBAL DEBUG_DBUG = "d,sync.wsrep_apply_cb"; + +# letting the second appier to move forward +--let $galera_sync_point = apply_monitor_slave_enter_sync +--source include/galera_signal_sync_point.inc + +# waiting until second applier is in wait +SET SESSION DEBUG_SYNC = "now WAIT_FOR sync.wsrep_apply_cb_reached"; + +# stopping second applier before commit +--let $galera_sync_point = commit_monitor_enter_sync +--source include/galera_set_sync_point.inc +--source include/galera_clear_sync_point.inc + +# releasing the second insert, with buggy version it will conflict with +# replayer +SET GLOBAL DEBUG_DBUG = ""; +SET DEBUG_SYNC = "now SIGNAL signal.wsrep_apply_cb"; +SET GLOBAL debug_dbug = NULL; +SET debug_sync='RESET'; + +# with fixed version, second applier has reached commit monitor, and we can +# release it to complete +--let $galera_sync_point = commit_monitor_enter_sync +--source include/galera_signal_sync_point.inc +--source include/galera_clear_sync_point.inc + +# local commit should succeed +--connection node_1 +--reap + +SELECT * FROM t1; + +# wsrep_local_replays has increased by 1 +--let $wsrep_local_replays_new = `SELECT VARIABLE_VALUE FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_local_replays'` +--disable_query_log +--eval SELECT $wsrep_local_replays_new - $wsrep_local_replays_old = 1 AS wsrep_local_replays; +--enable_query_log + +# returning original slave thread count +SET GLOBAL wsrep_slave_threads = DEFAULT; + +--connection node_2 +SELECT * FROM t1; + +# replicate some transactions, so that wsrep slave thread count can reach +# original state in node 1 +INSERT INTO t1 VALUES (7,7,7); +INSERT INTO t1 VALUES (8,8,8); +SELECT * FROM t1; + +--connection node_1 +SELECT * FROM t1; + +DROP TABLE t1; diff --git a/sql/mdl.cc b/sql/mdl.cc index f2b205a86f2..8cb21771991 100644 --- a/sql/mdl.cc +++ b/sql/mdl.cc @@ -1082,7 +1082,7 @@ MDL_wait::timed_wait(MDL_context_owner *owner, struct timespec *abs_timeout, DBUG_ASSERT(!debug_sync_set_action((owner->get_thd()), STRING_WITH_LEN(act))); };); - if (wsrep_thd_is_BF(owner->get_thd(), false)) + if (WSREP_ON && wsrep_thd_is_BF(owner->get_thd(), false)) { wait_result= mysql_cond_wait(&m_COND_wait_status, &m_LOCK_wait_status); } @@ -1155,7 +1155,7 @@ void MDL_lock::Ticket_list::add_ticket(MDL_ticket *ticket) */ DBUG_ASSERT(ticket->get_lock()); #ifdef WITH_WSREP - if ((this == &(ticket->get_lock()->m_waiting)) && + if (WSREP_ON && (this == &(ticket->get_lock()->m_waiting)) && wsrep_thd_is_BF(ticket->get_ctx()->get_thd(), false)) { Ticket_iterator itw(ticket->get_lock()->m_waiting); @@ -1581,7 +1581,7 @@ MDL_lock::can_grant_lock(enum_mdl_type type_arg, ticket->is_incompatible_when_granted(type_arg)) { #ifdef WITH_WSREP - if (wsrep_thd_is_BF(requestor_ctx->get_thd(),false) && + if (WSREP_ON && wsrep_thd_is_BF(requestor_ctx->get_thd(),false) && key.mdl_namespace() == MDL_key::GLOBAL) { WSREP_DEBUG("global lock granted for BF: %lu %s", @@ -1615,7 +1615,7 @@ MDL_lock::can_grant_lock(enum_mdl_type type_arg, } else { - if (wsrep_thd_is_BF(requestor_ctx->get_thd(), false) && + if (WSREP_ON && wsrep_thd_is_BF(requestor_ctx->get_thd(), false) && key.mdl_namespace() == MDL_key::GLOBAL) { WSREP_DEBUG("global lock granted for BF (waiting queue): %lu %s", diff --git a/sql/sql_class.cc b/sql/sql_class.cc index c3274ae9b82..92736eacee2 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1,6 +1,6 @@ /* Copyright (c) 2000, 2015, Oracle and/or its affiliates. - Copyright (c) 2008, 2020, MariaDB Corporation. + Copyright (c) 2008, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by @@ -4730,6 +4730,16 @@ thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd) DBUG_EXECUTE_IF("disable_thd_need_ordering_with", return 1;); if (!thd || !other_thd) return 1; +#ifdef WITH_WSREP + /* wsrep applier, replayer and TOI processing threads are ordered + by replication provider, relaxed GAP locking protocol can be used + between high priority wsrep threads + */ + if (WSREP_ON && + wsrep_thd_is_BF(const_cast(thd), false) && + wsrep_thd_is_BF(const_cast(other_thd), true)) + return 0; +#endif /* WITH_WSREP */ rgi= thd->rgi_slave; other_rgi= other_thd->rgi_slave; if (!rgi || !other_rgi) diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index 1c43aeaaead..d8ca70d1cbe 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -22,6 +22,7 @@ //#include "global_threads.h" // LOCK_thread_count, etc. #include "sql_base.h" // close_thread_tables() #include "mysqld.h" // start_wsrep_THD(); +#include "debug_sync.h" #include "slave.h" // opt_log_slave_updates #include "rpl_filter.h" @@ -371,6 +372,19 @@ void wsrep_replay_transaction(THD *thd) thd->variables.option_bits|= OPTION_BEGIN; thd->server_status|= SERVER_STATUS_IN_TRANS; + /* Allow tests to block the replayer thread using the DBUG facilities */ +#ifdef ENABLED_DEBUG_SYNC + DBUG_EXECUTE_IF("sync.wsrep_replay_cb", + { + const char act[]= + "now " + "SIGNAL sync.wsrep_replay_cb_reached " + "WAIT_FOR signal.wsrep_replay_cb"; + DBUG_ASSERT(!debug_sync_set_action(thd, + STRING_WITH_LEN(act))); + };); +#endif /* ENABLED_DEBUG_SYNC */ + int rcode = wsrep->replay_trx(wsrep, &thd->wsrep_ws_handle, (void *)thd); diff --git a/storage/innobase/btr/btr0cur.cc b/storage/innobase/btr/btr0cur.cc index 7fd34c5d652..3d03c55bf15 100644 --- a/storage/innobase/btr/btr0cur.cc +++ b/storage/innobase/btr/btr0cur.cc @@ -3,7 +3,7 @@ Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2012, Facebook Inc. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -67,6 +67,9 @@ Created 10/16/1994 Heikki Tuuri #include "srv0start.h" #include "mysql_com.h" #include "dict0stats.h" +#ifdef WITH_WSREP +#include "mysql/service_wsrep.h" +#endif /* WITH_WSREP */ /** Buffered B-tree operation types, introduced as part of delete buffering. */ enum btr_op_t { @@ -2941,7 +2944,8 @@ btr_cur_ins_lock_and_undo( /* Check if there is predicate or GAP lock preventing the insertion */ if (!(flags & BTR_NO_LOCKING_FLAG)) { - if (dict_index_is_spatial(index)) { + const unsigned type = index->type; + if (UNIV_UNLIKELY(type & DICT_SPATIAL)) { lock_prdt_t prdt; rtr_mbr_t mbr; @@ -2958,9 +2962,30 @@ btr_cur_ins_lock_and_undo( index, thr, mtr, &prdt); *inherit = false; } else { +#ifdef WITH_WSREP + trx_t* trx= thr_get_trx(thr); + /* If transaction scanning an unique secondary + key is wsrep high priority thread (brute + force) this scanning may involve GAP-locking + in the index. As this locking happens also + when applying replication events in high + priority applier threads, there is a + probability for lock conflicts between two + wsrep high priority threads. To avoid this + GAP-locking we mark that this transaction + is using unique key scan here. */ + if ((type & (DICT_CLUSTERED | DICT_UNIQUE)) == DICT_UNIQUE + && trx->is_wsrep() + && wsrep_thd_is_BF(trx->mysql_thd, false)) { + trx->wsrep_UK_scan= true; + } +#endif /* WITH_WSREP */ err = lock_rec_insert_check_and_lock( flags, rec, btr_cur_get_block(cursor), index, thr, mtr, inherit); +#ifdef WITH_WSREP + trx->wsrep_UK_scan= false; +#endif /* WITH_WSREP */ } } diff --git a/storage/innobase/include/trx0trx.h b/storage/innobase/include/trx0trx.h index 048a050a28d..5354c77db25 100644 --- a/storage/innobase/include/trx0trx.h +++ b/storage/innobase/include/trx0trx.h @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -875,6 +875,9 @@ public: /** whether wsrep_on(mysql_thd) held at the start of transaction */ bool wsrep; bool is_wsrep() const { return UNIV_UNLIKELY(wsrep); } + /** true, if BF thread is performing unique secondary index scanning */ + bool wsrep_UK_scan; + bool is_wsrep_UK_scan() const { return UNIV_UNLIKELY(wsrep_UK_scan); } #else /* WITH_WSREP */ bool is_wsrep() const { return false; } #endif /* WITH_WSREP */ diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index c95506abc39..edd29066c97 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2014, 2020, MariaDB Corporation. +Copyright (c) 2014, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -816,6 +816,17 @@ lock_rec_has_to_wait( } #ifdef WITH_WSREP + /* New lock request from a transaction is using unique key + scan and this transaction is a wsrep high priority transaction + (brute force). If conflicting transaction is also wsrep high + priority transaction we should avoid lock conflict because + ordering of these transactions is already decided and + conflicting transaction will be later replayed. */ + if (trx->is_wsrep_UK_scan() + && wsrep_thd_is_BF(lock2->trx->mysql_thd, true)) { + return (FALSE); + } + /* There should not be two conflicting locks that are brute force. If there is it is a bug. */ wsrep_assert_no_bf_bf_wait(NULL, lock2, trx); @@ -5928,6 +5939,19 @@ lock_sec_rec_modify_check_and_lock( heap_no = page_rec_get_heap_no(rec); +#ifdef WITH_WSREP + trx_t *trx= thr_get_trx(thr); + /* If transaction scanning an unique secondary key is wsrep + high priority thread (brute force) this scanning may involve + GAP-locking in the index. As this locking happens also when + applying replication events in high priority applier threads, + there is a probability for lock conflicts between two wsrep + high priority threads. To avoid this GAP-locking we mark that + this transaction is using unique key scan here. */ + if (trx->is_wsrep() && wsrep_thd_is_BF(trx->mysql_thd, false)) + trx->wsrep_UK_scan= true; +#endif /* WITH_WSREP */ + /* Another transaction cannot have an implicit lock on the record, because when we come here, we already have modified the clustered index record, and this would not have been possible if another active @@ -5943,6 +5967,9 @@ lock_sec_rec_modify_check_and_lock( MONITOR_INC(MONITOR_NUM_RECLOCK_REQ); lock_mutex_exit(); +#ifdef WITH_WSREP + trx->wsrep_UK_scan= false; +#endif /* WITH_WSREP */ #ifdef UNIV_DEBUG { @@ -6032,6 +6059,18 @@ lock_sec_rec_read_check_and_lock( lock_rec_convert_impl_to_expl(block, rec, index, offsets); } +#ifdef WITH_WSREP + trx_t *trx= thr_get_trx(thr); + /* If transaction scanning an unique secondary key is wsrep + high priority thread (brute force) this scanning may involve + GAP-locking in the index. As this locking happens also when + applying replication events in high priority applier threads, + there is a probability for lock conflicts between two wsrep + high priority threads. To avoid this GAP-locking we mark that + this transaction is using unique key scan here. */ + if (trx->is_wsrep() && wsrep_thd_is_BF(trx->mysql_thd, false)) + trx->wsrep_UK_scan= true; +#endif /* WITH_WSREP */ lock_mutex_enter(); ut_ad(mode != LOCK_X @@ -6045,6 +6084,9 @@ lock_sec_rec_read_check_and_lock( MONITOR_INC(MONITOR_NUM_RECLOCK_REQ); lock_mutex_exit(); +#ifdef WITH_WSREP + trx->wsrep_UK_scan= false; +#endif /* WITH_WSREP */ ut_ad(lock_rec_queue_validate(FALSE, block, rec, index, offsets)); diff --git a/storage/innobase/trx/trx0trx.cc b/storage/innobase/trx/trx0trx.cc index 60e534e0f43..42bd67cb24b 100644 --- a/storage/innobase/trx/trx0trx.cc +++ b/storage/innobase/trx/trx0trx.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -154,6 +154,11 @@ trx_init( trx->lock.rec_cached = 0; trx->lock.table_cached = 0; +#ifdef WITH_WSREP + ut_ad(!trx->wsrep); + ut_ad(!trx->wsrep_event); + ut_ad(!trx->wsrep_UK_scan); +#endif /* WITH_WSREP */ ut_ad(trx->get_flush_observer() == NULL); } @@ -355,6 +360,7 @@ trx_t *trx_allocate_for_background() #ifdef WITH_WSREP trx->wsrep_event = NULL; + ut_ad(!trx->wsrep_UK_scan); #endif /* WITH_WSREP */ return(trx); @@ -466,6 +472,8 @@ inline void trx_t::free() MEM_NOACCESS(&flush_observer, sizeof flush_observer); #ifdef WITH_WSREP MEM_NOACCESS(&wsrep_event, sizeof wsrep_event); + ut_ad(!wsrep_UK_scan); + MEM_NOACCESS(&wsrep_UK_scan, sizeof wsrep_UK_scan); #endif /* WITH_WSREP */ MEM_NOACCESS(&magic_n, sizeof magic_n); trx_pools->mem_free(this); From 94890a749a217bb831a5c260703d27b0a3e290d4 Mon Sep 17 00:00:00 2001 From: Rucha Deodhar Date: Fri, 18 Dec 2020 00:28:38 +0530 Subject: [PATCH 075/150] MDEV-24179: Assertion `m_status == DA_ERROR || m_status == DA_OK || m_status == DA_OK_BULK' failed in Diagnostics_area::message() Analysis: Assertion failure happens because we reach the maximum limit to examine rows. Fix: Return the error state. --- mysql-test/r/information_schema.result | 12 ++++++++++++ mysql-test/t/information_schema.test | 17 +++++++++++++++++ sql/sql_show.cc | 6 ++++++ 3 files changed, 35 insertions(+) diff --git a/mysql-test/r/information_schema.result b/mysql-test/r/information_schema.result index db416f983e3..1594ebf75e0 100644 --- a/mysql-test/r/information_schema.result +++ b/mysql-test/r/information_schema.result @@ -2196,5 +2196,17 @@ TABLE_CATALOG TABLE_SCHEMA TABLE_NAME COLUMN_NAME ORDINAL_POSITION COLUMN_DEFAUL Warnings: Warning 1931 Query execution was interrupted. The query examined at least ### rows, which exceeds LIMIT ROWS EXAMINED (10). The query result may be incomplete # +# MDEV-24179: AAssertion `m_status == DA_ERROR || m_status == DA_OK || +# m_status == DA_OK_BULK' failed in Diagnostics_area::message() +# +call mtr.add_suppression("Sort aborted.*"); +DROP DATABASE test; +CREATE DATABASE test; +USE test; +CREATE VIEW v AS SELECT table_schema AS object_schema, table_name AS object_name, table_type AS object_type FROM information_schema.tables ORDER BY object_schema; +SELECT * FROM v LIMIT ROWS EXAMINED 9; +ERROR HY000: Sort aborted: +DROP VIEW v; +# # End of 10.2 Test # diff --git a/mysql-test/t/information_schema.test b/mysql-test/t/information_schema.test index cd30c2103a0..9ff94d2deb7 100644 --- a/mysql-test/t/information_schema.test +++ b/mysql-test/t/information_schema.test @@ -1917,6 +1917,23 @@ SELECT SCHEMA_NAME from information_schema.schemata where schema_name=REPEAT('a' replace_regex /at least \d+ rows/at least ### rows/; SELECT * FROM INFORMATION_SCHEMA.`COLUMNS` LIMIT ROWS EXAMINED 10; +--echo # +--echo # MDEV-24179: AAssertion `m_status == DA_ERROR || m_status == DA_OK || +--echo # m_status == DA_OK_BULK' failed in Diagnostics_area::message() +--echo # + +call mtr.add_suppression("Sort aborted.*"); + +DROP DATABASE test; +CREATE DATABASE test; +USE test; +CREATE VIEW v AS SELECT table_schema AS object_schema, table_name AS object_name, table_type AS object_type FROM information_schema.tables ORDER BY object_schema; + +--error ER_FILSORT_ABORT +SELECT * FROM v LIMIT ROWS EXAMINED 9; + +DROP VIEW v; + --echo # --echo # End of 10.2 Test --echo # diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 4294157cce3..3e9916816b9 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -5058,6 +5058,12 @@ int get_all_tables(THD *thd, TABLE_LIST *tables, COND *cond) continue; } + if (thd->killed == ABORT_QUERY) + { + error= 0; + goto err; + } + DEBUG_SYNC(thd, "before_open_in_get_all_tables"); if (fill_schema_table_by_open(thd, FALSE, table, schema_table, From 479b4214fadcb0b8dedcaf780b180160d60b3986 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Mon, 11 Jan 2021 15:23:09 +0530 Subject: [PATCH 076/150] MDEV-24547 Update fails when online alter does rollback due to MDL time out MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When online alter rollbacks due to MDL time out, it doesn't mark the index online status as ONLINE_INDEX_ABORTED. Concurrent update fails to update the secondary index while building the entry. InnoDB should check the online status of the secondary index before building the secondary index entry. Reviewed-by: Marko Mäkelä --- .../suite/innodb/r/alter_mdl_timeout.result | 23 +++++++++++++ .../suite/innodb/t/alter_mdl_timeout.opt | 1 + .../suite/innodb/t/alter_mdl_timeout.test | 32 ++++++++++++++++++ storage/innobase/row/row0upd.cc | 33 +++++++++++++++++-- 4 files changed, 87 insertions(+), 2 deletions(-) create mode 100644 mysql-test/suite/innodb/r/alter_mdl_timeout.result create mode 100644 mysql-test/suite/innodb/t/alter_mdl_timeout.opt create mode 100644 mysql-test/suite/innodb/t/alter_mdl_timeout.test diff --git a/mysql-test/suite/innodb/r/alter_mdl_timeout.result b/mysql-test/suite/innodb/r/alter_mdl_timeout.result new file mode 100644 index 00000000000..7af1362c69e --- /dev/null +++ b/mysql-test/suite/innodb/r/alter_mdl_timeout.result @@ -0,0 +1,23 @@ +create table t1(f1 char(10), f2 char(10) not null, f3 int not null, +f4 int not null, primary key(f3))engine=innodb; +insert into t1 values('a','a', 1, 1), ('b','b', 2, 2), ('c', 'c', 3, 3), ('d', 'd', 4, 4); +SET DEBUG_SYNC="row_merge_after_scan SIGNAL con1_start WAIT_FOR con1_insert"; +SET DEBUG_SYNC="innodb_commit_inplace_alter_table_wait SIGNAL con1_wait WAIT_FOR con1_update"; +ALTER TABLE t1 ADD UNIQUE INDEX(f1(3), f4), ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE; +connect con1,localhost,root,,,; +SET DEBUG_SYNC="now WAIT_FOR con1_start"; +begin; +INSERT INTO t1 VALUES('e','e',5, 5); +SET DEBUG_SYNC="now SIGNAL con1_insert"; +SET DEBUG_SYNC="now WAIT_FOR con1_wait"; +SET DEBUG_SYNC="before_row_upd_sec_new_index_entry SIGNAL con1_update WAIT_FOR alter_rollback"; +UPDATE t1 set f4 = 10 order by f1 desc limit 2; +connection default; +ERROR HY000: Lock wait timeout exceeded; try restarting transaction +SET DEBUG_SYNC="now SIGNAL alter_rollback"; +connection con1; +commit; +connection default; +disconnect con1; +DROP TABLE t1; +SET DEBUG_SYNC="RESET"; diff --git a/mysql-test/suite/innodb/t/alter_mdl_timeout.opt b/mysql-test/suite/innodb/t/alter_mdl_timeout.opt new file mode 100644 index 00000000000..9e0e38bd64a --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_mdl_timeout.opt @@ -0,0 +1 @@ +--lock_wait_timeout=2 diff --git a/mysql-test/suite/innodb/t/alter_mdl_timeout.test b/mysql-test/suite/innodb/t/alter_mdl_timeout.test new file mode 100644 index 00000000000..15e7f524fd0 --- /dev/null +++ b/mysql-test/suite/innodb/t/alter_mdl_timeout.test @@ -0,0 +1,32 @@ +--source include/have_innodb.inc +--source include/have_debug.inc + +create table t1(f1 char(10), f2 char(10) not null, f3 int not null, + f4 int not null, primary key(f3))engine=innodb; +insert into t1 values('a','a', 1, 1), ('b','b', 2, 2), ('c', 'c', 3, 3), ('d', 'd', 4, 4); +SET DEBUG_SYNC="row_merge_after_scan SIGNAL con1_start WAIT_FOR con1_insert"; +SET DEBUG_SYNC="innodb_commit_inplace_alter_table_wait SIGNAL con1_wait WAIT_FOR con1_update"; +send ALTER TABLE t1 ADD UNIQUE INDEX(f1(3), f4), ADD UNIQUE INDEX(f2), ALGORITHM=INPLACE; + +connect(con1,localhost,root,,,); +SET DEBUG_SYNC="now WAIT_FOR con1_start"; +begin; +INSERT INTO t1 VALUES('e','e',5, 5); +SET DEBUG_SYNC="now SIGNAL con1_insert"; +SET DEBUG_SYNC="now WAIT_FOR con1_wait"; +SET DEBUG_SYNC="before_row_upd_sec_new_index_entry SIGNAL con1_update WAIT_FOR alter_rollback"; +SEND UPDATE t1 set f4 = 10 order by f1 desc limit 2; + +connection default; +--error ER_LOCK_WAIT_TIMEOUT +reap; +SET DEBUG_SYNC="now SIGNAL alter_rollback"; + +connection con1; +reap; +commit; + +connection default; +disconnect con1; +DROP TABLE t1; +SET DEBUG_SYNC="RESET"; diff --git a/storage/innobase/row/row0upd.cc b/storage/innobase/row/row0upd.cc index 10156c7d3ab..51e8246e80a 100644 --- a/storage/innobase/row/row0upd.cc +++ b/storage/innobase/row/row0upd.cc @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1996, 2017, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2015, 2020, MariaDB Corporation. +Copyright (c) 2015, 2021, MariaDB Corporation. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software @@ -2320,7 +2320,9 @@ row_upd_sec_index_entry( break; } - if (!index->is_committed()) { + bool uncommitted = !index->is_committed(); + + if (uncommitted) { /* The index->online_status may change if the index is or was being created online, but not committed yet. It is protected by index->lock. */ @@ -2517,11 +2519,38 @@ row_upd_sec_index_entry( mem_heap_empty(heap); + DEBUG_SYNC_C_IF_THD(trx->mysql_thd, + "before_row_upd_sec_new_index_entry"); + + uncommitted = !index->is_committed(); + if (uncommitted) { + mtr.start(); + /* The index->online_status may change if the index is + being rollbacked. It is protected by index->lock. */ + + mtr_s_lock(dict_index_get_lock(index), &mtr); + + switch (dict_index_get_online_status(index)) { + case ONLINE_INDEX_COMPLETE: + case ONLINE_INDEX_CREATION: + break; + case ONLINE_INDEX_ABORTED: + case ONLINE_INDEX_ABORTED_DROPPED: + mtr_commit(&mtr); + goto func_exit; + } + + } + /* Build a new index entry */ entry = row_build_index_entry(node->upd_row, node->upd_ext, index, heap); ut_a(entry); + if (uncommitted) { + mtr_commit(&mtr); + } + /* Insert new index entry */ err = row_ins_sec_index_entry(index, entry, thr); From 82d39d4374c358c2378af05e42a2e679ffe91306 Mon Sep 17 00:00:00 2001 From: Thirunarayanan Balathandayuthapani Date: Mon, 18 Jan 2021 15:18:33 +0530 Subject: [PATCH 077/150] MDEV-24491 db_name mismatch happens during virtual column computation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Database name mismatch happens while opening the table for virtual column computation. Because table_name_parse() returns the length of database and table name before converting the filename to table name. This issue is caused by commit 8b0d4cff0760b0a35285c315d82c49631c108baf (MDEV-15855). Fix should be that table_name_parse() should return the length of database and table name after converting the filename to table name. Reviewed-by: Marko Mäkelä --- mysql-test/suite/gcol/r/innodb_virtual_fk.result | 5 +++++ mysql-test/suite/gcol/t/innodb_virtual_fk.test | 6 +++++- storage/innobase/handler/ha_innodb.cc | 4 ++-- 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/mysql-test/suite/gcol/r/innodb_virtual_fk.result b/mysql-test/suite/gcol/r/innodb_virtual_fk.result index 68601823e31..252274f3e0a 100644 --- a/mysql-test/suite/gcol/r/innodb_virtual_fk.result +++ b/mysql-test/suite/gcol/r/innodb_virtual_fk.result @@ -793,6 +793,9 @@ DROP TABLE t1; # # MDEV-24041 Generated column DELETE with FOREIGN KEY crash InnoDB # +SET FOREIGN_KEY_CHECKS=1; +CREATE DATABASE `a-b`; +USE `a-b`; CREATE TABLE emails ( id int, PRIMARY KEY (id) @@ -802,6 +805,7 @@ id int, email_id int, date_sent char(4), generated_email_id int as (email_id), +#generated_sent_date DATE GENERATED ALWAYS AS (date_sent), PRIMARY KEY (id), KEY mautic_generated_sent_date_email_id (generated_email_id), FOREIGN KEY (email_id) REFERENCES emails (id) ON DELETE SET NULL @@ -818,3 +822,4 @@ DELETE FROM emails; DROP TABLE email_stats; DROP TABLE emails_metadata; DROP TABLE emails; +DROP DATABASE `a-b`; diff --git a/mysql-test/suite/gcol/t/innodb_virtual_fk.test b/mysql-test/suite/gcol/t/innodb_virtual_fk.test index da20612f0a1..24b6a4631e6 100644 --- a/mysql-test/suite/gcol/t/innodb_virtual_fk.test +++ b/mysql-test/suite/gcol/t/innodb_virtual_fk.test @@ -653,7 +653,9 @@ DROP TABLE t1; --echo # --echo # MDEV-24041 Generated column DELETE with FOREIGN KEY crash InnoDB --echo # - +SET FOREIGN_KEY_CHECKS=1; +CREATE DATABASE `a-b`; +USE `a-b`; CREATE TABLE emails ( id int, PRIMARY KEY (id) @@ -664,6 +666,7 @@ CREATE TABLE email_stats ( email_id int, date_sent char(4), generated_email_id int as (email_id), + #generated_sent_date DATE GENERATED ALWAYS AS (date_sent), PRIMARY KEY (id), KEY mautic_generated_sent_date_email_id (generated_email_id), FOREIGN KEY (email_id) REFERENCES emails (id) ON DELETE SET NULL @@ -686,3 +689,4 @@ DELETE FROM emails; DROP TABLE email_stats; DROP TABLE emails_metadata; DROP TABLE emails; +DROP DATABASE `a-b`; diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 0dea402b32b..70e4ba79bb7 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -21543,7 +21543,7 @@ static bool table_name_parse( memcpy(tbl_buf, tbl_name.m_name + dbnamelen + 1, tblnamelen); tbl_buf[tblnamelen] = 0; - filename_to_tablename(db_buf, dbname, MAX_DATABASE_NAME_LEN + 1, true); + dbnamelen = filename_to_tablename(db_buf, dbname, MAX_DATABASE_NAME_LEN + 1, true); if (tblnamelen > TEMP_FILE_PREFIX_LENGTH && !strncmp(tbl_buf, TEMP_FILE_PREFIX, TEMP_FILE_PREFIX_LENGTH)) { @@ -21555,7 +21555,7 @@ static bool table_name_parse( tblnamelen = is_part - tbl_buf; } - filename_to_tablename(tbl_buf, tblname, MAX_TABLE_NAME_LEN + 1, true); + tblnamelen = filename_to_tablename(tbl_buf, tblname, MAX_TABLE_NAME_LEN + 1, true); return true; } From 775aa6f08a33042c11039450006cd5a65554aa80 Mon Sep 17 00:00:00 2001 From: Aleksey Midenkov Date: Tue, 19 Jan 2021 14:25:51 +0300 Subject: [PATCH 078/150] MDEV-24403 Segfault on CREATE TABLE with explicit FTS_DOC_ID_INDEX by multiple fields Ignore table->fts freed previously by create_table_info_t::create_table(). --- mysql-test/suite/innodb_fts/r/create.result | 10 ++++++++++ mysql-test/suite/innodb_fts/t/create.test | 11 +++++++++++ storage/innobase/fts/fts0fts.cc | 2 +- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/mysql-test/suite/innodb_fts/r/create.result b/mysql-test/suite/innodb_fts/r/create.result index c3a14fa0281..963b31fdbd7 100644 --- a/mysql-test/suite/innodb_fts/r/create.result +++ b/mysql-test/suite/innodb_fts/r/create.result @@ -182,3 +182,13 @@ Table Op Msg_type Msg_text test.t1 optimize status OK DROP TABLE t1; SET GLOBAL innodb_optimize_fulltext_only= @optimize_fulltext.save; +# +# MDEV-24403 Segfault on CREATE TABLE with explicit FTS_DOC_ID_INDEX by multiple fields +# +create table t1 ( +f1 int, f2 text, +FTS_DOC_ID bigint unsigned not null, +unique key FTS_DOC_ID_INDEX(FTS_DOC_ID, f1), +fulltext (f2)) +engine=innodb; +ERROR 42000: Incorrect index name 'FTS_DOC_ID_INDEX' diff --git a/mysql-test/suite/innodb_fts/t/create.test b/mysql-test/suite/innodb_fts/t/create.test index 4e522994fcc..38c93de4982 100644 --- a/mysql-test/suite/innodb_fts/t/create.test +++ b/mysql-test/suite/innodb_fts/t/create.test @@ -106,3 +106,14 @@ SET GLOBAL innodb_optimize_fulltext_only= 1; OPTIMIZE TABLE t1; DROP TABLE t1; SET GLOBAL innodb_optimize_fulltext_only= @optimize_fulltext.save; + +--echo # +--echo # MDEV-24403 Segfault on CREATE TABLE with explicit FTS_DOC_ID_INDEX by multiple fields +--echo # +--error ER_WRONG_NAME_FOR_INDEX +create table t1 ( + f1 int, f2 text, + FTS_DOC_ID bigint unsigned not null, + unique key FTS_DOC_ID_INDEX(FTS_DOC_ID, f1), + fulltext (f2)) +engine=innodb; diff --git a/storage/innobase/fts/fts0fts.cc b/storage/innobase/fts/fts0fts.cc index fb06d300803..1bdfbddca04 100644 --- a/storage/innobase/fts/fts0fts.cc +++ b/storage/innobase/fts/fts0fts.cc @@ -1709,7 +1709,7 @@ fts_drop_tables( error = fts_drop_common_tables(trx, &fts_table); - if (error == DB_SUCCESS) { + if (error == DB_SUCCESS && table->fts) { error = fts_drop_all_index_tables(trx, table->fts); } From 959dfac4d0e715725d05448a77c08d870d5aa247 Mon Sep 17 00:00:00 2001 From: Alexey Botchkov Date: Tue, 19 Jan 2021 15:29:03 +0400 Subject: [PATCH 079/150] MDEV-19723 Assertion `je->state == JST_KEY' failed while SELECT ST_GEOMFROMGEOJSON() and Assertion `!mysql_bin_log.is_open() || thd.is_current_stmt_binlog_format_row()' The invalid GeoJSON case wasn't handled here. --- mysql-test/r/gis-json.result | 3 +++ mysql-test/t/gis-json.test | 2 ++ sql/spatial.cc | 1 + 3 files changed, 6 insertions(+) diff --git a/mysql-test/r/gis-json.result b/mysql-test/r/gis-json.result index 1d6e2193fc9..d507a9994ff 100644 --- a/mysql-test/r/gis-json.result +++ b/mysql-test/r/gis-json.result @@ -104,6 +104,9 @@ a NULL Warnings: Warning 4076 Incorrect GeoJSON format - empty 'coordinates' array. +SELECT ST_GEOMFROMGEOJSON("{ \"type\": \"Feature\", \"geometry\": [10, 20] }"); +ST_GEOMFROMGEOJSON("{ \"type\": \"Feature\", \"geometry\": [10, 20] }") +NULL # # End of 10.2 tests # diff --git a/mysql-test/t/gis-json.test b/mysql-test/t/gis-json.test index b91ef235fd0..a97e9411e5c 100644 --- a/mysql-test/t/gis-json.test +++ b/mysql-test/t/gis-json.test @@ -44,6 +44,8 @@ SELECT st_astext(st_geomfromgeojson('{"type": "MultiLineString","coordinates": [ SELECT st_astext(st_geomfromgeojson('{"type": "Polygon","coordinates": []}')) as a; SELECT st_astext(st_geomfromgeojson('{"type": "MultiPolygon","coordinates": []}')) as a; +SELECT ST_GEOMFROMGEOJSON("{ \"type\": \"Feature\", \"geometry\": [10, 20] }"); + --echo # --echo # End of 10.2 tests --echo # diff --git a/sql/spatial.cc b/sql/spatial.cc index 83905fc9f3d..840f8bd809c 100644 --- a/sql/spatial.cc +++ b/sql/spatial.cc @@ -605,6 +605,7 @@ Geometry *Geometry::create_from_json(Geometry_buffer *buffer, if (feature_type_found) goto handle_geometry_key; } + goto err_return; } else { From 48ac7e1a42ac932582d0650ac54ec8900ae801bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 19 Jan 2021 11:53:22 +0200 Subject: [PATCH 080/150] MDEV-24609: innodb_io_capacity can exceed innodb_io_capacity_max innodb_io_capacity_update(): When the requested innodb_io_capacity exceeds innodb_io_capacity_max and is more than half the maximum, do not double it for computing innodb_io_capacity_max. This integer arithmetics overflow was introduced in commit 0f32299437a036ddaad04fe14a6ff63d15e3a72b (MDEV-7035). No test case is added, because sizeof(ulong) varies between platforms. --- storage/innobase/handler/ha_innodb.cc | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 70e4ba79bb7..c8be848fabe 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -17532,7 +17532,8 @@ innodb_io_capacity_update( " higher than innodb_io_capacity_max %lu", in_val, srv_max_io_capacity); - srv_max_io_capacity = in_val * 2; + srv_max_io_capacity = (in_val & ~(~0UL >> 1)) + ? in_val : in_val * 2; push_warning_printf(thd, Sql_condition::WARN_LEVEL_WARN, ER_WRONG_ARGUMENTS, From 3caccc7bcd7d28730d5d741480665c0b52135963 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Tue, 19 Jan 2021 14:36:36 +0200 Subject: [PATCH 081/150] Update InnoDB version number to 5.7.33 There are only two InnoDB changes between mysql-5.7.32 and mysql-5.7.33: mysql/mysql-server@95dc4f5f08e479262234498490de2d64cfe9ebc9 duplicates commit 8e8e65ed1c747a9b53b07ba5147103210b517c32 (MDEV-10829). mysql/mysql-server@26e849762f1b2f79434bd3e6e611beccec39efc9 could be an attempt to fix something that was fixed in commit dc58987eb7112bd60122114abd56e6f9438f457f (MDEV-22765). --- storage/innobase/include/univ.i | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/innobase/include/univ.i b/storage/innobase/include/univ.i index 7819b0ae92b..fcdc8a5ae9f 100644 --- a/storage/innobase/include/univ.i +++ b/storage/innobase/include/univ.i @@ -1,7 +1,7 @@ /***************************************************************************** Copyright (c) 1994, 2016, Oracle and/or its affiliates. All Rights Reserved. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Copyright (c) 2008, Google Inc. Portions of this file contain modifications contributed and copyrighted by @@ -41,7 +41,7 @@ Created 1/20/1994 Heikki Tuuri #define INNODB_VERSION_MAJOR 5 #define INNODB_VERSION_MINOR 7 -#define INNODB_VERSION_BUGFIX 32 +#define INNODB_VERSION_BUGFIX 33 /* The following is the InnoDB version as shown in SELECT plugin_version FROM information_schema.plugins; From b22285e4821b49546de9b88990bbc9c453dc14b2 Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Tue, 19 Jan 2021 08:02:37 -0800 Subject: [PATCH 082/150] MDEV-16940 Server crashes in unsafe_key_update upon attempt to update view through 2nd execution of SP This bug caused a server crash on the second call of any stored procedure that contained an UPDATE statement over a multi-table view reporting an error message at the prepare stage. On the first call of the stored procedure after reporting an error at the preparation stage of the UPDATE statement finished without calling the function SELECT_LEX::save_prep_leaf_tables() for the SELECT used as the definition of the view. This left the SELECT_LEX structure used by the UPDATE statement in an inconsistent state for second call of the stored procedure. Approved by Oleksandr Byelkin --- mysql-test/r/view.result | 16 ++++++++++++++++ mysql-test/t/view.test | 22 ++++++++++++++++++++++ sql/sql_lex.cc | 3 +++ sql/sql_update.cc | 3 +++ 4 files changed, 44 insertions(+) diff --git a/mysql-test/r/view.result b/mysql-test/r/view.result index 0da11c7c355..e48a99f6aff 100644 --- a/mysql-test/r/view.result +++ b/mysql-test/r/view.result @@ -6802,5 +6802,21 @@ drop database db1; create database test; use test; # +# MDEV-16940: update of multi-table view returning error used in SP +# +CREATE TABLE t1 (a INT) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1), (2); +CREATE TABLE t2 (b INT) ENGINE=MyISAM; +INSERT INTO t2 VALUES (2), (3); +CREATE VIEW v1 AS SELECT a, b FROM t1,t2; +CREATE PROCEDURE sp1() UPDATE v1 SET a = 8, b = 9; +CALL sp1; +ERROR HY000: Can not modify more than one base table through a join view 'test.v1' +CALL sp1; +ERROR HY000: Can not modify more than one base table through a join view 'test.v1' +DROP PROCEDURE sp1; +DROP VIEW v1; +DROP TABLE t1, t2; +# # End of 10.2 tests # diff --git a/mysql-test/t/view.test b/mysql-test/t/view.test index 2486887600b..b5ce6a1cabf 100644 --- a/mysql-test/t/view.test +++ b/mysql-test/t/view.test @@ -6521,6 +6521,28 @@ drop database db1; create database test; use test; +--echo # +--echo # MDEV-16940: update of multi-table view returning error used in SP +--echo # + +CREATE TABLE t1 (a INT) ENGINE=MyISAM; +INSERT INTO t1 VALUES (1), (2); +CREATE TABLE t2 (b INT) ENGINE=MyISAM; +INSERT INTO t2 VALUES (2), (3); + +CREATE VIEW v1 AS SELECT a, b FROM t1,t2; + +CREATE PROCEDURE sp1() UPDATE v1 SET a = 8, b = 9; + +--error ER_VIEW_MULTIUPDATE +CALL sp1; +--error ER_VIEW_MULTIUPDATE +CALL sp1; + +DROP PROCEDURE sp1; +DROP VIEW v1; +DROP TABLE t1, t2; + --echo # --echo # End of 10.2 tests --echo # diff --git a/sql/sql_lex.cc b/sql/sql_lex.cc index 77e6b2b6571..5059e4f656e 100644 --- a/sql/sql_lex.cc +++ b/sql/sql_lex.cc @@ -4585,6 +4585,9 @@ bool LEX::save_prep_leaf_tables() bool st_select_lex::save_prep_leaf_tables(THD *thd) { + if (prep_leaf_list_state == SAVED) + return FALSE; + List_iterator_fast li(leaf_tables); TABLE_LIST *table; diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 7454d16d55d..01743a6751e 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1398,6 +1398,9 @@ bool Multiupdate_prelocking_strategy::handle_end(THD *thd) if (select_lex->handle_derived(thd->lex, DT_MERGE)) DBUG_RETURN(1); + if (thd->lex->save_prep_leaf_tables()) + DBUG_RETURN(1); + List *fields= &lex->select_lex.item_list; if (setup_fields_with_no_wrap(thd, Ref_ptr_array(), *fields, MARK_COLUMNS_WRITE, 0, 0)) From 9a07f30ba296c1431cc51fe6123bc31dd6aad05d Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Wed, 20 Jan 2021 19:19:54 +0100 Subject: [PATCH 083/150] Fix some Json and Bson bugs --- storage/connect/tabbson.cpp | 16 ++++++++++++++-- storage/connect/tabbson.h | 1 + storage/connect/tabjson.cpp | 12 ++++++++++-- storage/connect/tabjson.h | 6 ++++-- 4 files changed, 29 insertions(+), 6 deletions(-) diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp index 309eef2e292..ba4380c5f89 100644 --- a/storage/connect/tabbson.cpp +++ b/storage/connect/tabbson.cpp @@ -784,6 +784,12 @@ PVAL BCUTIL::MakeBson(PGLOBAL g, PBVAL jsp) { if (Cp->Value->IsTypeNum()) { strcpy(g->Message, "Cannot make Json for a numeric column"); + + if (!Cp->Warned) { + PushWarning(g, Tp); + Cp->Warned = true; + } // endif Warned + Cp->Value->Reset(); #if 0 } else if (Value->GetType() == TYPE_BIN) { @@ -1635,6 +1641,7 @@ BSONCOL::BSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i) Xnod = -1; Xpd = false; Parsed = false; + Warned = false; } // end of BSONCOL constructor /***********************************************************************/ @@ -1653,6 +1660,7 @@ BSONCOL::BSONCOL(BSONCOL* col1, PTDB tdbp) : DOSCOL(col1, tdbp) Xnod = col1->Xnod; Xpd = col1->Xpd; Parsed = col1->Parsed; + Warned = col1->Warned; } // end of BSONCOL copy constructor /***********************************************************************/ @@ -1986,8 +1994,10 @@ void BSONCOL::ReadColumn(PGLOBAL g) if (!Tbp->SameRow || Xnod >= Tbp->SameRow) Value->SetValue_pval(Cp->GetColumnValue(g, Tbp->Row, 0)); +#if defined(DEVELOPMENT) if (Xpd && Value->IsNull() && !((PBDEF)Tbp->To_Def)->Accept) - throw("Null expandable JSON value"); + htrc("Null expandable JSON value for column %s\n", Name); +#endif // DEVELOPMENT // Set null when applicable if (!Nullable) @@ -2274,8 +2284,10 @@ int TDBBSON::MakeDocument(PGLOBAL g) Docp = Bp->NewVal(TYPE_JAR); Bp->AddArrayValue(Docp, jsp); Bp->SetArrayValue(arp, Docp, i); - } else + } else { Top = Docp = Bp->NewVal(TYPE_JAR); + Bp->AddArrayValue(Docp, jsp); + } // endif's } // endif jsp diff --git a/storage/connect/tabbson.h b/storage/connect/tabbson.h index a53f33bd737..bb3f32bd945 100644 --- a/storage/connect/tabbson.h +++ b/storage/connect/tabbson.h @@ -265,6 +265,7 @@ protected: char Sep; // The Jpath separator bool Xpd; // True for expandable column bool Parsed; // True when parsed + bool Warned; // True when warning issued }; // end of class BSONCOL /* -------------------------- TDBBSON class -------------------------- */ diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 0ef281f2aae..19f721f692b 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1255,6 +1255,7 @@ JSONCOL::JSONCOL(PGLOBAL g, PCOLDEF cdp, PTDB tdbp, PCOL cprec, int i) Xnod = -1; Xpd = false; Parsed = false; + Warned = false; } // end of JSONCOL constructor /***********************************************************************/ @@ -1273,6 +1274,7 @@ JSONCOL::JSONCOL(JSONCOL *col1, PTDB tdbp) : DOSCOL(col1, tdbp) Xnod = col1->Xnod; Xpd = col1->Xpd; Parsed = col1->Parsed; + Warned = col1->Warned; } // end of JSONCOL copy constructor /***********************************************************************/ @@ -1606,6 +1608,12 @@ PVAL JSONCOL::MakeJson(PGLOBAL g, PJSON jsp) { if (Value->IsTypeNum()) { strcpy(g->Message, "Cannot make Json for a numeric column"); + + if (!Warned) { + PushWarning(g, Tjp); + Warned = true; + } // endif Warned + Value->Reset(); #if 0 } else if (Value->GetType() == TYPE_BIN) { @@ -1703,8 +1711,8 @@ void JSONCOL::ReadColumn(PGLOBAL g) if (!Tjp->SameRow || Xnod >= Tjp->SameRow) Value->SetValue_pval(GetColumnValue(g, Tjp->Row, 0)); - if (Xpd && Value->IsNull() && !((PJDEF)Tjp->To_Def)->Accept) - throw("Null expandable JSON value"); +// if (Xpd && Value->IsNull() && !((PJDEF)Tjp->To_Def)->Accept) +// throw("Null expandable JSON value"); // Set null when applicable if (!Nullable) diff --git a/storage/connect/tabjson.h b/storage/connect/tabjson.h index 9b4f508880e..b47dc9b0665 100644 --- a/storage/connect/tabjson.h +++ b/storage/connect/tabjson.h @@ -192,7 +192,8 @@ protected: char Sep; // The Jpath separator bool Strict; // Strict syntax checking bool Comma; // Row has final comma - }; // end of class TDBJSN + bool Xpdable; // False: expandable columns are NULL +}; // end of class TDBJSN /* -------------------------- JSONCOL class -------------------------- */ @@ -247,7 +248,8 @@ public: char Sep; // The Jpath separator bool Xpd; // True for expandable column bool Parsed; // True when parsed - }; // end of class JSONCOL + bool Warned; // True when warning issued +}; // end of class JSONCOL /* -------------------------- TDBJSON class -------------------------- */ From f2fea295b4567cb919437d047b55e32ffdf26840 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Thu, 21 Jan 2021 16:46:59 +1100 Subject: [PATCH 084/150] ucs2: cppcheck - add va_end --- strings/ctype-ucs2.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/strings/ctype-ucs2.c b/strings/ctype-ucs2.c index c5182911c4a..29d56919633 100644 --- a/strings/ctype-ucs2.c +++ b/strings/ctype-ucs2.c @@ -1160,9 +1160,12 @@ static size_t my_snprintf_mb2(CHARSET_INFO *cs __attribute__((unused)), char* to, size_t n, const char* fmt, ...) { + size_t ret; va_list args; va_start(args,fmt); - return my_vsnprintf_mb2(to, n, fmt, args); + ret= my_vsnprintf_mb2(to, n, fmt, args); + va_end(args); + return ret; } @@ -2424,9 +2427,12 @@ static size_t my_snprintf_utf32(CHARSET_INFO *cs __attribute__((unused)), char* to, size_t n, const char* fmt, ...) { + size_t ret; va_list args; va_start(args,fmt); - return my_vsnprintf_utf32(to, n, fmt, args); + ret= my_vsnprintf_utf32(to, n, fmt, args); + va_end(args); + return ret; } From 53acd1c1d88be82190c56af3e4cc11fb2770a169 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Thu, 21 Jan 2021 16:20:57 +1100 Subject: [PATCH 085/150] maria: ma_recovery cppcheck va_start called twice Per cppcheck, va_start is called twice which it is. Remove the second instance. --- storage/maria/ma_recovery_util.c | 1 - 1 file changed, 1 deletion(-) diff --git a/storage/maria/ma_recovery_util.c b/storage/maria/ma_recovery_util.c index 3b617f625f0..9443ba90f6c 100644 --- a/storage/maria/ma_recovery_util.c +++ b/storage/maria/ma_recovery_util.c @@ -95,7 +95,6 @@ void eprint(FILE *trace_file __attribute__ ((unused)), fputc('\n', trace_file); if (trace_file != stderr) { - va_start(args, format); my_printv_error(HA_ERR_INITIALIZATION, format, MYF(0), args); } va_end(args); From eb75e8705d9a444e10057967eaebf947b1115ff8 Mon Sep 17 00:00:00 2001 From: Sujatha Date: Thu, 7 Jan 2021 17:34:57 +0530 Subject: [PATCH 086/150] MDEV-8134: The relay-log is not flushed after the slave-relay-log.999999 showed Problem: ======== Auto purge of relaylogs stops when relay-log-file is 'slave-relay-log.999999' and slave_parallel_threads is enabled. Analysis: ========= The problem is that in Relay_log_info::inc_group_relay_log_pos() function, when two log names are compared via strcmp() function, it gives correct result, when log name sequence numbers are of same digits(6 digits), But when the number goes to 7 digits, a 999999 compares greater than 1000000, which is wrong, hence the bug. Fix: ==== Extract the numeric extension part of the file name, convert it into unsigned long and compare. Thanks to David Zhao for the contribution. --- .../rpl/r/rpl_relay_max_extension.result | 37 ++++++ .../suite/rpl/t/rpl_relay_max_extension.test | 109 ++++++++++++++++++ sql/rpl_parallel.cc | 5 +- sql/rpl_rli.cc | 4 +- sql/sql_repl.cc | 17 +++ sql/sql_repl.h | 1 + 6 files changed, 169 insertions(+), 4 deletions(-) create mode 100644 mysql-test/suite/rpl/r/rpl_relay_max_extension.result create mode 100644 mysql-test/suite/rpl/t/rpl_relay_max_extension.test diff --git a/mysql-test/suite/rpl/r/rpl_relay_max_extension.result b/mysql-test/suite/rpl/r/rpl_relay_max_extension.result new file mode 100644 index 00000000000..4444398203e --- /dev/null +++ b/mysql-test/suite/rpl/r/rpl_relay_max_extension.result @@ -0,0 +1,37 @@ +include/rpl_init.inc [topology=1->2] +connection server_2; +include/stop_slave.inc +RESET SLAVE; +include/start_slave.inc +include/stop_slave.inc +# +# Stop slave server +# +# +# Simulate file number get close to 999997 +# by renaming relay logs and modifying index/info files +# +# Restart slave server +# +SET @save_slave_parallel_threads= @@GLOBAL.slave_parallel_threads; +SET @save_max_relay_log_size= @@GLOBAL.max_relay_log_size; +SET GLOBAL slave_parallel_threads=1; +SET GLOBAL max_relay_log_size=100 * 1024; +include/start_slave.inc +connection server_1; +create table t1 (i int, c varchar(1024)); +# +# Insert some data to generate enough amount of binary logs +# +connection server_2; +# +# Assert that 'slave-relay-bin.999999' is purged. +# +NOT FOUND /slave-relay-bin.999999/ in slave-relay-bin.index +include/stop_slave.inc +SET GLOBAL slave_parallel_threads= @save_slave_parallel_threads; +SET GLOBAL max_relay_log_size= @save_max_relay_log_size; +include/start_slave.inc +connection server_1; +DROP TABLE t1; +include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_relay_max_extension.test b/mysql-test/suite/rpl/t/rpl_relay_max_extension.test new file mode 100644 index 00000000000..e1e087f2e0e --- /dev/null +++ b/mysql-test/suite/rpl/t/rpl_relay_max_extension.test @@ -0,0 +1,109 @@ +# ==== Purpose ==== +# +# Test verifies that auto purging mechanism of relay logs works fine when the +# file extension grows beyond 999999. +# +# ==== Implementation ==== +# +# Steps: +# 0 - In master-slave setup clear all the relay logs on the slave server. +# 1 - Start the slave so that new relay logs starting from +# 'slave-relay-bin.000001' are created. +# 2 - Get the active relay-log file name by using SHOW SLAVE STATUS. +# Shutdown the slave server. +# 3 - Rename active relay log to '999997' in both 'relay-log.info' and +# 'slave-relay-bin.index' files. +# 4 - Restart the slave server by configuring 'slave_parallel_threads=1' +# and 'max_relay_log_size=100K'. +# 5 - Generate load on master such that few relay logs are generated on +# slave. The relay log sequence number will change to 7 digits. +# 6 - Sync slave with master to ensure that relay logs are applied on +# slave. They should have been automatically purged. +# 7 - Assert that there is no 'slave-relay-bin.999999' file in +# 'relay-log.info'. +# +# ==== References ==== +# +# MDEV-8134: The relay-log is not flushed after the slave-relay-log.999999 +# showed +# + +--source include/have_innodb.inc +--source include/have_binlog_format_row.inc +--let $rpl_topology=1->2 +--source include/rpl_init.inc + +--connection server_2 +--source include/stop_slave.inc +RESET SLAVE; +--source include/start_slave.inc +--source include/stop_slave.inc +--let $relay_log=query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1) + +--echo # +--echo # Stop slave server +--echo # + +--let $datadir = `select @@datadir` +--exec echo "wait" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect +--shutdown_server 10 +--source include/wait_until_disconnected.inc + +--exec sed -i "s/$relay_log/slave-relay-bin.999997/g" $datadir/relay-log.info +--exec sed -i "s/$relay_log/slave-relay-bin.999997/g" $datadir/slave-relay-bin.index + +--echo # +--echo # Simulate file number get close to 999997 +--echo # by renaming relay logs and modifying index/info files + +--move_file $datadir/$relay_log $datadir/slave-relay-bin.999997 + +--echo # +--echo # Restart slave server +--echo # + +--exec echo "restart" > $MYSQLTEST_VARDIR/tmp/mysqld.2.expect +--enable_reconnect +--source include/wait_until_connected_again.inc +SET @save_slave_parallel_threads= @@GLOBAL.slave_parallel_threads; +SET @save_max_relay_log_size= @@GLOBAL.max_relay_log_size; + +SET GLOBAL slave_parallel_threads=1; +SET GLOBAL max_relay_log_size=100 * 1024; +--source include/start_slave.inc + +--connection server_1 +create table t1 (i int, c varchar(1024)); +--echo # +--echo # Insert some data to generate enough amount of binary logs +--echo # +--let $count = 1000 +--disable_query_log +while ($count) +{ + eval insert into t1 values (1001 - $count, repeat('a',1000)); + dec $count; +} +--enable_query_log +--save_master_pos + +--connection server_2 +--sync_with_master + +--let $relay_log=query_get_value(SHOW SLAVE STATUS, Relay_Log_File, 1) + +--echo # +--echo # Assert that 'slave-relay-bin.999999' is purged. +--echo # +let SEARCH_FILE=$datadir/slave-relay-bin.index; +let SEARCH_PATTERN=slave-relay-bin.999999; +source include/search_pattern_in_file.inc; + +--source include/stop_slave.inc +SET GLOBAL slave_parallel_threads= @save_slave_parallel_threads; +SET GLOBAL max_relay_log_size= @save_max_relay_log_size; +--source include/start_slave.inc + +--connection server_1 +DROP TABLE t1; +--source include/rpl_end.inc diff --git a/sql/rpl_parallel.cc b/sql/rpl_parallel.cc index 4cf87ba73b7..869640fd46f 100644 --- a/sql/rpl_parallel.cc +++ b/sql/rpl_parallel.cc @@ -4,6 +4,7 @@ #include "rpl_mi.h" #include "sql_parse.h" #include "debug_sync.h" +#include "sql_repl.h" /* Code for optional parallel execution of replicated events on the slave. @@ -82,7 +83,7 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev) return; mysql_mutex_lock(&rli->data_lock); - cmp= strcmp(rli->group_relay_log_name, qev->event_relay_log_name); + cmp= compare_log_name(rli->group_relay_log_name, qev->event_relay_log_name); if (cmp < 0) { rli->group_relay_log_pos= qev->future_event_relay_log_pos; @@ -91,7 +92,7 @@ handle_queued_pos_update(THD *thd, rpl_parallel_thread::queued_event *qev) rli->group_relay_log_pos < qev->future_event_relay_log_pos) rli->group_relay_log_pos= qev->future_event_relay_log_pos; - cmp= strcmp(rli->group_master_log_name, qev->future_event_master_log_name); + cmp= compare_log_name(rli->group_master_log_name, qev->future_event_master_log_name); if (cmp < 0) { strcpy(rli->group_master_log_name, qev->future_event_master_log_name); diff --git a/sql/rpl_rli.cc b/sql/rpl_rli.cc index 40ab375571a..5273b33c728 100644 --- a/sql/rpl_rli.cc +++ b/sql/rpl_rli.cc @@ -989,7 +989,7 @@ void Relay_log_info::inc_group_relay_log_pos(ulonglong log_pos, if (rgi->is_parallel_exec) { /* In case of parallel replication, do not update the position backwards. */ - int cmp= strcmp(group_relay_log_name, rgi->event_relay_log_name); + int cmp= compare_log_name(group_relay_log_name, rgi->event_relay_log_name); if (cmp < 0) { group_relay_log_pos= rgi->future_event_relay_log_pos; @@ -1001,7 +1001,7 @@ void Relay_log_info::inc_group_relay_log_pos(ulonglong log_pos, In the parallel case we need to update the master_log_name here, rather than in Rotate_log_event::do_update_pos(). */ - cmp= strcmp(group_master_log_name, rgi->future_event_master_log_name); + cmp= compare_log_name(group_master_log_name, rgi->future_event_master_log_name); if (cmp <= 0) { if (cmp < 0) diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 4af8ebc2dd8..59a3f686e45 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -4541,5 +4541,22 @@ rpl_gtid_pos_update(THD *thd, char *str, size_t len) return false; } +int compare_log_name(const char *log_1, const char *log_2) { + int res= 1; + const char *ext1_str= strrchr(log_1, '.'); + const char *ext2_str= strrchr(log_2, '.'); + char file_name_1[255], file_name_2[255]; + strmake(file_name_1, log_1, (ext1_str - log_1)); + strmake(file_name_2, log_2, (ext2_str - log_2)); + char *endptr = NULL; + res= strcmp(file_name_1, file_name_2); + if (!res) + { + ulong ext1= strtoul(++ext1_str, &endptr, 10); + ulong ext2= strtoul(++ext2_str, &endptr, 10); + res= (ext1 > ext2 ? 1 : ((ext1 == ext2) ? 0 : -1)); + } + return res; +} #endif /* HAVE_REPLICATION */ diff --git a/sql/sql_repl.h b/sql/sql_repl.h index 8ddfa9239f6..9129aaeed5e 100644 --- a/sql/sql_repl.h +++ b/sql/sql_repl.h @@ -56,6 +56,7 @@ bool show_binlogs(THD* thd); extern int init_master_info(Master_info* mi); void kill_zombie_dump_threads(uint32 slave_server_id); int check_binlog_magic(IO_CACHE* log, const char** errmsg); +int compare_log_name(const char *log_1, const char *log_2); struct LOAD_FILE_IO_CACHE : public IO_CACHE { From 29d9897fe2f46bf72356671ee3ad094abfe032c3 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Wed, 22 Jun 2016 15:52:07 +0200 Subject: [PATCH 087/150] MDEV-10272: add master host/port info to slave thread exit messages Sample log error message generated: 2021-01-21 2:33:24 139912137520896 [Note] Slave SQL thread exiting, replication stopped in log 'master-bin.000001' at position 369 33:24 139912137520896 [Note] master was 127.0.0.1:16400 2021-01-21 2:33:24 139912137828096 [Note] Slave I/O thread exiting, read up to log 'master-bin.000001', position 369 2021-01-21 2:33:24 139912137828096 [Note] master was 127.0.0.1:16400 Based on work by Hartmut Holzgraefe. Reviewer: knielsen@knielsen-hq.org, Andrei, Sachin --- sql/slave.cc | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sql/slave.cc b/sql/slave.cc index a2c35e5f116..5685769bbfb 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -4646,8 +4646,11 @@ log space"); err: // print the current replication position if (mi->using_gtid == Master_info::USE_GTID_NO) + { sql_print_information("Slave I/O thread exiting, read up to log '%s', " "position %llu", IO_RPL_LOG_NAME, mi->master_log_pos); + sql_print_information("master was %s:%d", mi->host, mi->port); + } else { StringBuffer<100> tmp; @@ -4656,6 +4659,7 @@ err: "position %llu; GTID position %s", IO_RPL_LOG_NAME, mi->master_log_pos, tmp.c_ptr_safe()); + sql_print_information("master was %s:%d", mi->host, mi->port); } RUN_HOOK(binlog_relay_io, thread_stop, (thd, mi)); thd->reset_query(); @@ -5244,6 +5248,7 @@ pthread_handler_t handle_slave_sql(void *arg) sql_print_information("Slave SQL thread exiting, replication stopped in " "log '%s' at position %llu%s", RPL_LOG_NAME, rli->group_master_log_pos, tmp.c_ptr_safe()); + sql_print_information("master was %s:%d", mi->host, mi->port); } err_before_start: From 63db583158fd657fff929ef39f8918bf9a4bca3b Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Fri, 22 Jan 2021 16:31:18 +1100 Subject: [PATCH 088/150] man/mysqldump.1: typos INSERT INFO -> INTO --- man/mysqldump.1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/man/mysqldump.1 b/man/mysqldump.1 index ee2048d0008..097cd1ede20 100644 --- a/man/mysqldump.1 +++ b/man/mysqldump.1 @@ -2261,7 +2261,7 @@ servers \- remote (federated) servers as \fBCREATE SERVER\fR\&. .sp -1 .IP \(bu 2.3 .\} -stats \- statistics tables, InnoDB and Engine Independent Table Statistics (EITS), are dumped as \fBINSERT\fR/\fBREPLACE INFO\fR statements without (re)creating tables\&. +stats \- statistics tables, InnoDB and Engine Independent Table Statistics (EITS), are dumped as \fBINSERT\fR/\fBREPLACE INTO\fR statements without (re)creating tables\&. .RE .RS 4 .ie n \{\ @@ -2281,7 +2281,7 @@ With \fB\-\-system=user\fR (or \fBall\fR), and \fB\-\-replace\fR, SQL is generat .sp The \fB\-\-insert\-into\fR option will cause \fBCREATE IF NOT EXIST\fR forms of SQL to generated if available. .sp -For stats, and timezones, \fB\-\-replace\fR and \fB\-\-insert\-info\fR have the usual effects. +For stats, and timezones, \fB\-\-replace\fR and \fB\-\-insert\-into\fR have the usual effects. .sp Enabling specific options here will cause the relevant tables in the mysql database to be ignored when dumping the mysql database or \fB\-\-all\-databases\fR\&. .sp From 103200b380798e01419c7a27802f093df990f222 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Fri, 22 Jan 2021 16:41:40 +1100 Subject: [PATCH 089/150] MDEV-24557: Logical dump of MySQL users via MariaDB's mariadb-dump generates invalid commands Clarified working contributed by Vicentiu --- man/mysqldump.1 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/man/mysqldump.1 b/man/mysqldump.1 index 097cd1ede20..a7defd8bf47 100644 --- a/man/mysqldump.1 +++ b/man/mysqldump.1 @@ -2285,8 +2285,7 @@ For stats, and timezones, \fB\-\-replace\fR and \fB\-\-insert\-into\fR have the .sp Enabling specific options here will cause the relevant tables in the mysql database to be ignored when dumping the mysql database or \fB\-\-all\-databases\fR\&. .sp -Experimentally this option is designed to be able to dump system information from MySQL-5\&.7 and 8\&.0 servers\&. SQL generated is also -experimentally compatible with MySQL-5\&.7/8\&.0\&. Mappings of implemenation specific grants/plugins isn't always one-to-one however\&. +To help in migrating from MySQL to MariaDB, this option is designed to be able to dump system information from MySQL-5\&.7 and 8\&.0 servers\&. SQL generated is also experimentally compatible with MySQL-5\&.7/8\&.0. Mappings of implementation specific grants/plugins isn't always one-to-one however between MariaDB and MySQL and will require manual changes\&. .sp .RE .RS 4 From 7d528881496e761a6188edf17bf125163a765c1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Fri, 22 Jan 2021 08:34:09 +0200 Subject: [PATCH 090/150] MDEV-23659 : Update Galera disabled.def file Update * galera/disabled.def * galera_3_nodes/disabled.def --- mysql-test/suite/galera/disabled.def | 4 ++++ mysql-test/suite/galera_3nodes/disabled.def | 10 +++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index f53fbd2b08c..7fe03a94220 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -15,9 +15,12 @@ MW-328A : MDEV-22666 galera.MW-328A MTR failed: "Semaphore wait has lasted > 600 MW-328B : MDEV-22666 galera.MW-328A MTR failed: "Semaphore wait has lasted > 600 seconds" and do not release port 16002 MW-329 : MDEV-19962 Galera test failure on MW-329 galera_as_slave_replication_bundle : MDEV-15785 OPTION_GTID_BEGIN is set in Gtid_log_event::do_apply_event() +galera_bf_lock_wait : MDEV-24649 galera.galera_bf_lock_wait MTR failed with sigabrt: Assertion `!is_owned()' failed in sync0policy.ic on MutexDebug with Mutex = TTASEventMutex galera_binlog_stmt_autoinc : MDEV-19959 Galera test failure on galera_binlog_stmt_autoinc galera_gcache_recover_manytrx : MDEV-18834 Galera test failure +galera_mdl_race : MDEV-21524: galera.galera_mdl_race MTR failed: query 'reap' succeeded - should have failed with errno 1213 galera_parallel_simple : MDEV-20318 galera.galera_parallel_simple fails +galera_partition : MDEV-21806: galera.galera_partition MTR failed: failed to recover from DONOR state galera_shutdown_nonprim : MDEV-21493 galera.galera_shutdown_nonprim galera_ssl_upgrade : MDEV-19950 Galera test failure on galera_ssl_upgrade galera_sst_mariabackup_encrypt_with_key : MDEV-21484 galera_sst_mariabackup_encrypt_with_key @@ -26,3 +29,4 @@ galera_wan : MDEV-17259 Test failure on galera.galera_wan partition : MDEV-19958 Galera test failure on galera.partition query_cache: MDEV-15805 Test failure on galera.query_cache sql_log_bin : MDEV-21491 galera.sql_log_bin +versioning_trx_id: MDEV-18590: galera.versioning_trx_id: Test failure: mysqltest: Result content mismatch diff --git a/mysql-test/suite/galera_3nodes/disabled.def b/mysql-test/suite/galera_3nodes/disabled.def index 0e2706f2dc3..884c2a8c82a 100644 --- a/mysql-test/suite/galera_3nodes/disabled.def +++ b/mysql-test/suite/galera_3nodes/disabled.def @@ -10,7 +10,15 @@ # ############################################################################## +GAL-501 : MDEV-24645 galera_3nodes.GAL-501 MTR failed: failed to open gcomm backend connection: 110 galera_gtid_2_cluster : MDEV-23775 Galera test failure on galera_3nodes.galera_gtid_2_cluster +galera_ipv6_mariabackup : MDEV-24440: galera_3nodes.galera_ipv6_mariabackup MTR fails sporadically: Failed to read from: wsrep_sst_mariabackup --role 'donor' --address '[::1]:16028/xtrabackup_sst//1' +galera_ipv6_mariabackup_section : MDEV-22195: galera_3nodes.galera_ipv6_mariabackup_section MTR failed: assert_grep.inc failed +galera_ipv6_mysqldump : MDEV-24036: galera_3nodes.galera_ipv6_mysqldump: rare random crashes during shutdown +galera_ipv6_rsync_section : MDEV-23580: galera_3nodes.galera_ipv6_rsync_section MTR failed: WSREP_SST: [ERROR] rsync daemon port '16008' has been taken galera_ist_gcache_rollover : MDEV-23578 WSREP: exception caused by message: {v=0,t=1,ut=255,o=4,s=0,sr=0,as=1,f=6,src=50524cfe,srcvid=view_id(REG,50524cfe,4),insvid=view_id(UNKNOWN,00000000,0),ru=00000000,r=[-1,-1],fs=75,nl=(} -galera_slave_options_do :MDEV-8798 +galera_load_data_ist : MDEV-24639 galera_3nodes.galera_load_data_ist MTR failed with SIGABRT: query 'reap' failed: 2013: Lost connection to MySQL server during query +galera_load_data_ist : MDEV-24639 galera_3nodes.galera_load_data_ist MTR failed with SIGABRT: query 'reap' failed: 2013: Lost connection to MySQL server during query +galera_pc_bootstrap : MDEV-24650 galera_pc_bootstrap MTR failed: Could not execute 'check-testcase' before testcase +galera_slave_options_do : MDEV-8798 galera_slave_options_ignore : MDEV-8798 From 0d9c9f49bd62a86055aebc00a68f0e4645014637 Mon Sep 17 00:00:00 2001 From: Alice Sherepa Date: Thu, 21 Jan 2021 18:40:03 +0100 Subject: [PATCH 091/150] reenable rpl_spec_variables.test --- .../rpl_tests/rpl_binlog_max_cache_size.test | 4 +- mysql-test/suite/rpl/disabled.def | 3 +- .../suite/rpl/r/rpl_spec_variables.result | 60 ++++++++++++++++--- .../suite/rpl/t/rpl_spec_variables-slave.opt | 1 - .../suite/rpl/t/rpl_spec_variables.test | 8 ++- 5 files changed, 61 insertions(+), 15 deletions(-) delete mode 100644 mysql-test/suite/rpl/t/rpl_spec_variables-slave.opt diff --git a/mysql-test/extra/rpl_tests/rpl_binlog_max_cache_size.test b/mysql-test/extra/rpl_tests/rpl_binlog_max_cache_size.test index 0f46b00f683..4c93ad86209 100644 --- a/mysql-test/extra/rpl_tests/rpl_binlog_max_cache_size.test +++ b/mysql-test/extra/rpl_tests/rpl_binlog_max_cache_size.test @@ -49,14 +49,14 @@ connection master; --echo *** Single statement on transactional table *** --disable_query_log ---error ER_TRANS_CACHE_FULL, ER_STMT_CACHE_FULL, ER_ERROR_ON_WRITE +--error ER_TRANS_CACHE_FULL, ER_STMT_CACHE_FULL, ER_ERROR_ON_WRITE, 1534 eval INSERT INTO t1 (a, data) VALUES (1, CONCAT($data, $data, $data, $data, $data)); --enable_query_log --echo *** Single statement on non-transactional table *** --disable_query_log ---error ER_TRANS_CACHE_FULL, ER_STMT_CACHE_FULL, ER_ERROR_ON_WRITE +--error ER_TRANS_CACHE_FULL, ER_STMT_CACHE_FULL, ER_ERROR_ON_WRITE, 1534 eval INSERT INTO t2 (a, data) VALUES (2, CONCAT($data, $data, $data, $data, $data, $data)); --enable_query_log diff --git a/mysql-test/suite/rpl/disabled.def b/mysql-test/suite/rpl/disabled.def index 2bf2a69bd62..4508c8e4ca6 100644 --- a/mysql-test/suite/rpl/disabled.def +++ b/mysql-test/suite/rpl/disabled.def @@ -10,8 +10,7 @@ # ############################################################################## -rpl_spec_variables : BUG#11755836 2009-10-27 jasonh rpl_spec_variables fails on PB2 hpux -#rpl_get_master_version_and_clock : Bug#11766137 Jan 05 2011 joro Valgrind warnings rpl_get_master_version_and_clock +#rpl_get_master_version_and_clock : Bug#11766137 Jan 05 2011 joro Valgrind warnings rpl_partition_archive : MDEV-5077 2013-09-27 svoj Cannot exchange partition with archive table rpl_row_binlog_max_cache_size : MDEV-11092 rpl_row_index_choice : MDEV-11666 diff --git a/mysql-test/suite/rpl/r/rpl_spec_variables.result b/mysql-test/suite/rpl/r/rpl_spec_variables.result index 96f63a50ea9..8b4c398f308 100644 --- a/mysql-test/suite/rpl/r/rpl_spec_variables.result +++ b/mysql-test/suite/rpl/r/rpl_spec_variables.result @@ -2,14 +2,17 @@ include/master-slave.inc [connection master] * auto_increment_increment, auto_increment_offset * +connection master; SET @@global.auto_increment_increment=2; SET @@session.auto_increment_increment=2; SET @@global.auto_increment_offset=10; SET @@session.auto_increment_offset=10; +connection slave; SET @@global.auto_increment_increment=3; SET @@session.auto_increment_increment=3; SET @@global.auto_increment_offset=20; SET @@session.auto_increment_offset=20; +connection master; CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM; INSERT INTO t1 (b) VALUES ('master'); INSERT INTO t1 (b) VALUES ('master'); @@ -17,6 +20,7 @@ SELECT * FROM t1 ORDER BY a; a b 2 master 4 master +connection slave; CREATE TABLE t2 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM; INSERT INTO t1 (b) VALUES ('slave'); INSERT INTO t1 (b) VALUES ('slave'); @@ -32,102 +36,123 @@ SELECT * FROM t2 ORDER BY a; a b 1 slave 4 slave +connection master; DROP TABLE IF EXISTS t1,t2; SET @@global.auto_increment_increment=1; SET @@session.auto_increment_increment=1; SET @@global.auto_increment_offset=1; SET @@session.auto_increment_offset=1; +connection slave; SET @@global.auto_increment_increment=1; SET @@session.auto_increment_increment=1; SET @@global.auto_increment_offset=1; SET @@session.auto_increment_offset=1; +connection slave; SET auto_increment_increment=1; SET auto_increment_offset=1; * character_set_database, collation_server * +connection master; SET @restore_master_character_set_database=@@global.character_set_database; SET @restore_master_collation_server=@@global.collation_server; SET @@global.character_set_database=latin1; SET @@session.character_set_database=latin1; SET @@global.collation_server=latin1_german1_ci; SET @@session.collation_server=latin1_german1_ci; +connection slave; SET @restore_slave_character_set_database=@@global.character_set_database; SET @restore_slave_collation_server=@@global.collation_server; SET @@global.character_set_database=utf8; SET @@session.character_set_database=utf8; SET @@global.collation_server=utf8_bin; SET @@session.collation_server=utf8_bin; +connection master; CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL, - `b` varchar(10) COLLATE latin1_german1_ci DEFAULT NULL, + `b` varchar(10) DEFAULT NULL, PRIMARY KEY (`a`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_german1_ci +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +connection slave; CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10)) ENGINE=MyISAM; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( `a` int(11) NOT NULL, - `b` varchar(10) COLLATE latin1_german1_ci DEFAULT NULL, + `b` varchar(10) DEFAULT NULL, PRIMARY KEY (`a`) -) ENGINE=MyISAM DEFAULT CHARSET=latin1 COLLATE=latin1_german1_ci +) ENGINE=MyISAM DEFAULT CHARSET=latin1 SHOW CREATE TABLE t2; Table Create Table t2 CREATE TABLE `t2` ( `a` int(11) NOT NULL, - `b` varchar(10) COLLATE utf8_bin DEFAULT NULL, + `b` varchar(10) DEFAULT NULL, PRIMARY KEY (`a`) -) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin +) ENGINE=MyISAM DEFAULT CHARSET=latin1 SET @@global.collation_server=latin1_swedish_ci; SET @@session.collation_server=latin1_swedish_ci; +connection master; SET @@global.collation_server=latin1_swedish_ci; SET @@session.collation_server=latin1_swedish_ci; DROP TABLE IF EXISTS t1,t2; * default_week_format * +connection master; SET @@global.default_week_format=0; SET @@session.default_week_format=0; +connection slave; SET @@global.default_week_format=1; SET @@session.default_week_format=1; +connection master; CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c INT) ENGINE=MyISAM; INSERT INTO t1 VALUES (1, 'master ', WEEK('2008-01-07')); SELECT * FROM t1 ORDER BY a; a b c 1 master 1 +connection slave; INSERT INTO t1 VALUES (2, 'slave ', WEEK('2008-01-07')); SELECT * FROM t1 ORDER BY a; a b c 1 master 1 2 slave 2 +connection master; DROP TABLE t1; +connection slave; SET @@global.default_week_format=0; SET @@session.default_week_format=0; * local_infile * +connection slave; SET @@global.local_infile=0; +connection master; CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(20), c CHAR(254)) ENGINE=MyISAM; LOAD DATA LOCAL INFILE 'FILE' INTO TABLE t1 (b); SELECT COUNT(*) FROM t1; COUNT(*) 70 +connection slave; LOAD DATA LOCAL INFILE 'FILE2' INTO TABLE t1 (b); -ERROR 42000: The used command is not allowed with this MySQL version +ERROR 42000: The used command is not allowed with this MariaDB version SELECT COUNT(*) FROM t1; COUNT(*) 70 SET @@global.local_infile=1; +connection master; DROP TABLE t1; * max_heap_table_size * +connection slave; SET @restore_slave_max_heap_table_size=@@global.max_heap_table_size; SET @@global.max_heap_table_size=16384; SET @@session.max_heap_table_size=16384; +connection master; CREATE TABLE t1 (a INT NOT NULL AUTO_INCREMENT PRIMARY KEY, b VARCHAR(10), c CHAR(254)) ENGINE=MEMORY; SELECT COUNT(*)=2000 FROM t1; COUNT(*)=2000 1 +connection slave; SELECT COUNT(*)=2000 FROM t1 WHERE b='master' GROUP BY b ORDER BY b; COUNT(*)=2000 1 @@ -137,18 +162,24 @@ COUNT(*)<2000 AND COUNT(*)>0 SELECT COUNT(*)<2000 AND COUNT(*)>0 FROM t2 WHERE b='slave' GROUP BY b ORDER BY b; COUNT(*)<2000 AND COUNT(*)>0 1 +connection master; DROP TABLE IF EXISTS t1,t2; * storage_engine * +connection master; SET @restore_master_storage_engine=@@global.storage_engine; SET @@global.storage_engine=InnoDB; SET @@session.storage_engine=InnoDB; +connection slave; SET @restore_slave_storage_engine=@@global.storage_engine; SET @@global.storage_engine=Memory; SET @@session.storage_engine=Memory; +connection master; CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10)); CREATE TABLE t2 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10)) ENGINE=InnoDB; +connection slave; CREATE TABLE t3 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10)); +connection master; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -163,6 +194,7 @@ t2 CREATE TABLE `t2` ( `b` varchar(10) DEFAULT NULL, PRIMARY KEY (`a`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1 +connection slave; SHOW CREATE TABLE t1; Table Create Table t1 CREATE TABLE `t1` ( @@ -186,37 +218,49 @@ t3 CREATE TABLE `t3` ( ) ENGINE=MEMORY DEFAULT CHARSET=latin1 SET @@global.storage_engine=InnoDB; SET @@session.storage_engine=InnoDB; +connection master; DROP TABLE IF EXISTS t1,t2,t3; * sql_mode * +connection master; +SET @old_sql_mode_master= @@global.sql_mode; SET @@global.sql_mode=ANSI; SET @@session.sql_mode=ANSI; +connection slave; +SET @old_sql_mode_slave= @@global.sql_mode; SET @@global.sql_mode=TRADITIONAL; SET @@session.sql_mode=TRADITIONAL; +connection master; CREATE TABLE t1 (a INT NOT NULL PRIMARY KEY, b VARCHAR(10), c DATE); INSERT INTO t1 VALUES (1, 'master', '0000-00-00'); SELECT * FROM t1 ORDER BY a; a b c 1 master 0000-00-00 +connection slave; INSERT INTO t1 VALUES (1, 'slave', '0000-00-00'); -ERROR 22007: Incorrect date value: '0000-00-00' for column 'c' at row 1 +ERROR 22007: Incorrect date value: '0000-00-00' for column `test`.`t1`.`c` at row 1 SELECT * FROM t1 ORDER BY a; a b c 1 master 0000-00-00 SET @@global.sql_mode=''; SET @@session.sql_mode=''; +connection master; SET @@global.sql_mode=''; SET @@session.sql_mode=''; DROP TABLE t1; *** clean up *** +connection master; SET @@global.character_set_database=@restore_master_character_set_database; SET @@global.collation_server=@restore_master_collation_server; SET @@global.storage_engine=@restore_master_storage_engine; +SET @@global.sql_mode=@old_sql_mode_master; +connection slave; SET @@global.character_set_database=@restore_slave_character_set_database; SET @@global.collation_server=@restore_slave_collation_server; SET @@global.max_heap_table_size=@restore_slave_max_heap_table_size; SET @@global.storage_engine=@restore_slave_storage_engine; +SET @@global.sql_mode=@old_sql_mode_slave; call mtr.add_suppression("The table 't[12]' is full"); include/rpl_end.inc diff --git a/mysql-test/suite/rpl/t/rpl_spec_variables-slave.opt b/mysql-test/suite/rpl/t/rpl_spec_variables-slave.opt deleted file mode 100644 index 627becdbfb5..00000000000 --- a/mysql-test/suite/rpl/t/rpl_spec_variables-slave.opt +++ /dev/null @@ -1 +0,0 @@ ---innodb diff --git a/mysql-test/suite/rpl/t/rpl_spec_variables.test b/mysql-test/suite/rpl/t/rpl_spec_variables.test index 2cb580fce83..e2d5175036c 100644 --- a/mysql-test/suite/rpl/t/rpl_spec_variables.test +++ b/mysql-test/suite/rpl/t/rpl_spec_variables.test @@ -260,10 +260,12 @@ DROP TABLE IF EXISTS t1,t2,t3; --echo * sql_mode * --connection master +SET @old_sql_mode_master= @@global.sql_mode; SET @@global.sql_mode=ANSI; SET @@session.sql_mode=ANSI; --connection slave +SET @old_sql_mode_slave= @@global.sql_mode; SET @@global.sql_mode=TRADITIONAL; SET @@session.sql_mode=TRADITIONAL; @@ -292,14 +294,16 @@ DROP TABLE t1; SET @@global.character_set_database=@restore_master_character_set_database; SET @@global.collation_server=@restore_master_collation_server; SET @@global.storage_engine=@restore_master_storage_engine; +SET @@global.sql_mode=@old_sql_mode_master; --sync_slave_with_master SET @@global.character_set_database=@restore_slave_character_set_database; SET @@global.collation_server=@restore_slave_collation_server; SET @@global.max_heap_table_size=@restore_slave_max_heap_table_size; SET @@global.storage_engine=@restore_slave_storage_engine; - +SET @@global.sql_mode=@old_sql_mode_slave; # Put at the end since the test otherwise emptied the table. - +remove_file $MYSQLTEST_VARDIR/tmp/words.dat; +remove_file $MYSQLTEST_VARDIR/tmp/words2.dat; --echo call mtr.add_suppression("The table 't[12]' is full"); From 59e6d14c47aa87fcd61a60c8d4b73d66d7247557 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Thu, 5 Nov 2020 13:37:35 +1100 Subject: [PATCH 092/150] MDEV-24122: on previously MySQL-5.7 datadirs, adjust mysql.user column order MDEV-23201 correctly reordered columns in mysql.user table when upgrading from MySQL-5.7 Here we also correctly reorder columns in mysql.user table from an invalid order caused by an upgrade from MySQL-5.7 to MariaDB-before-MDEV-23201. --- mysql-test/r/mysql_upgrade.result | 130 +++++++++++++++++++++++++++- mysql-test/t/mysql_upgrade.test | 30 +++++++ scripts/mysql_system_tables_fix.sql | 24 +++-- 3 files changed, 177 insertions(+), 7 deletions(-) diff --git a/mysql-test/r/mysql_upgrade.result b/mysql-test/r/mysql_upgrade.result index a3e88765554..3961c7e8c27 100644 --- a/mysql-test/r/mysql_upgrade.result +++ b/mysql-test/r/mysql_upgrade.result @@ -711,9 +711,9 @@ user CREATE TABLE `user` ( `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '', `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000, - `password_last_changed` timestamp NULL DEFAULT NULL, + `password_last_changed` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), `password_lifetime` smallint(5) unsigned DEFAULT NULL, - `account_locked` enum('N','Y') COLLATE utf8_bin NOT NULL DEFAULT 'N', + `account_locked` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', PRIMARY KEY (`Host`,`User`) ) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges' CREATE ROLE `aRole`; @@ -726,6 +726,132 @@ mysql.session N root N mysql.sys N aRole Y +DROP ROLE aRole; +# +# MDEV-24122: Fix previously MySQL-5.7 data directories that upgraded prior to MDEV-23201 +# +# +DROP TABLE IF EXISTS mysql.user; +FLUSH TABLES mysql.user; +ALTER TABLE mysql.user ADD is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL; +ALTER TABLE mysql.user ADD default_role char(80) binary DEFAULT '' NOT NULL; +ALTER TABLE mysql.user ADD max_statement_time decimal(12,6) DEFAULT 0 NOT NULL; +FLUSH PRIVILEGES; +Phase 1/7: Checking and upgrading mysql database +Processing databases +mysql +mysql.column_stats OK +mysql.columns_priv OK +mysql.db OK +mysql.event OK +mysql.func OK +mysql.gtid_slave_pos OK +mysql.help_category OK +mysql.help_keyword OK +mysql.help_relation OK +mysql.help_topic OK +mysql.host OK +mysql.index_stats OK +mysql.innodb_index_stats OK +mysql.innodb_table_stats OK +mysql.plugin OK +mysql.proc OK +mysql.procs_priv OK +mysql.proxies_priv OK +mysql.roles_mapping OK +mysql.servers OK +mysql.table_stats OK +mysql.tables_priv OK +mysql.time_zone OK +mysql.time_zone_leap_second OK +mysql.time_zone_name OK +mysql.time_zone_transition OK +mysql.time_zone_transition_type OK +mysql.user OK +Phase 2/7: Installing used storage engines... Skipped +Phase 3/7: Fixing views +Phase 4/7: Running 'mysql_fix_privilege_tables' +Phase 5/7: Fixing table and database names +Phase 6/7: Checking and upgrading tables +Processing databases +information_schema +mtr +mtr.global_suppressions OK +mtr.test_suppressions OK +performance_schema +test +Phase 7/7: Running 'FLUSH PRIVILEGES' +OK +SHOW CREATE TABLE mysql.user; +Table Create Table +user CREATE TABLE `user` ( + `Host` char(60) COLLATE utf8_bin NOT NULL DEFAULT '', + `User` char(80) COLLATE utf8_bin NOT NULL DEFAULT '', + `Password` char(41) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL DEFAULT '', + `Select_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Insert_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Update_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Delete_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Create_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Drop_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Reload_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Shutdown_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Process_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `File_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Grant_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `References_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Index_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Alter_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Show_db_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Super_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Create_tmp_table_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Lock_tables_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Execute_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Repl_slave_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Repl_client_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Create_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Show_view_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Create_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Alter_routine_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Create_user_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Event_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Trigger_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `Create_tablespace_priv` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `ssl_type` enum('','ANY','X509','SPECIFIED') CHARACTER SET utf8 NOT NULL DEFAULT '', + `ssl_cipher` blob NOT NULL, + `x509_issuer` blob NOT NULL, + `x509_subject` blob NOT NULL, + `max_questions` int(11) unsigned NOT NULL DEFAULT 0, + `max_updates` int(11) unsigned NOT NULL DEFAULT 0, + `max_connections` int(11) unsigned NOT NULL DEFAULT 0, + `max_user_connections` int(11) NOT NULL DEFAULT 0, + `plugin` char(64) CHARACTER SET latin1 NOT NULL DEFAULT '', + `authentication_string` text COLLATE utf8_bin NOT NULL, + `password_expired` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `is_role` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + `default_role` char(80) COLLATE utf8_bin NOT NULL DEFAULT '', + `max_statement_time` decimal(12,6) NOT NULL DEFAULT 0.000000, + `password_last_changed` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `password_lifetime` smallint(5) unsigned DEFAULT NULL, + `account_locked` enum('N','Y') CHARACTER SET utf8 NOT NULL DEFAULT 'N', + PRIMARY KEY (`Host`,`User`) +) ENGINE=MyISAM DEFAULT CHARSET=utf8 COLLATE=utf8_bin COMMENT='Users and global privileges' +CREATE ROLE `aRole`; +SET DEFAULT ROLE aRole; +SHOW GRANTS; +Grants for root@localhost +GRANT aRole TO 'root'@'localhost' WITH ADMIN OPTION +GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' WITH GRANT OPTION +GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION +GRANT USAGE ON *.* TO 'aRole' +SET DEFAULT ROLE aRole FOR 'root'@'localhost' +SET DEFAULT ROLE NONE; +SHOW GRANTS; +Grants for root@localhost +GRANT aRole TO 'root'@'localhost' WITH ADMIN OPTION +GRANT ALL PRIVILEGES ON *.* TO 'root'@'localhost' WITH GRANT OPTION +GRANT PROXY ON ''@'%' TO 'root'@'localhost' WITH GRANT OPTION +GRANT USAGE ON *.* TO 'aRole' DROP ROLE `aRole`; FLUSH PRIVILEGES; End of 10.2 tests diff --git a/mysql-test/t/mysql_upgrade.test b/mysql-test/t/mysql_upgrade.test index a6702f91aa6..c116af860a6 100644 --- a/mysql-test/t/mysql_upgrade.test +++ b/mysql-test/t/mysql_upgrade.test @@ -246,6 +246,36 @@ FLUSH PRIVILEGES; SET ROLE `aRole`; SELECT `User`, `is_role` FROM `mysql`.`user`; +DROP ROLE aRole; + +--echo # +--echo # MDEV-24122: Fix previously MySQL-5.7 data directories that upgraded prior to MDEV-23201 +--echo # +--echo # + +# For 10.4 merge - dropping the view. +# DROP VIEW IF EXISTS mysql.user; +DROP TABLE IF EXISTS mysql.user; +--copy_file std_data/mysql57user.frm $MYSQLD_DATADIR/mysql/user.frm +--copy_file std_data/mysql57user.MYI $MYSQLD_DATADIR/mysql/user.MYI +--copy_file std_data/mysql57user.MYD $MYSQLD_DATADIR/mysql/user.MYD +FLUSH TABLES mysql.user; + +# What prior to MDEV-23201 would of done: +ALTER TABLE mysql.user ADD is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL; +ALTER TABLE mysql.user ADD default_role char(80) binary DEFAULT '' NOT NULL; +ALTER TABLE mysql.user ADD max_statement_time decimal(12,6) DEFAULT 0 NOT NULL; +FLUSH PRIVILEGES; + +--exec $MYSQL_UPGRADE --force 2>&1 +SHOW CREATE TABLE mysql.user; + +CREATE ROLE `aRole`; +SET DEFAULT ROLE aRole; +SHOW GRANTS; +SET DEFAULT ROLE NONE; +SHOW GRANTS; + DROP ROLE `aRole`; --exec $MYSQL mysql < $MYSQLTEST_VARDIR/tmp/user.sql FLUSH PRIVILEGES; diff --git a/scripts/mysql_system_tables_fix.sql b/scripts/mysql_system_tables_fix.sql index 9a8ebaa119e..8baee4fef17 100644 --- a/scripts/mysql_system_tables_fix.sql +++ b/scripts/mysql_system_tables_fix.sql @@ -647,16 +647,30 @@ UPDATE user SET Create_tablespace_priv = Super_priv WHERE @hadCreateTablespacePr ALTER TABLE user ADD plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL AFTER max_user_connections, ADD authentication_string TEXT NOT NULL AFTER plugin; ALTER TABLE user CHANGE auth_string authentication_string TEXT NOT NULL; -ALTER TABLE user MODIFY plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL, - MODIFY authentication_string TEXT NOT NULL; + ALTER TABLE user ADD password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER authentication_string; ALTER TABLE user ADD is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER password_expired; ALTER TABLE user ADD default_role char(80) binary DEFAULT '' NOT NULL AFTER is_role; ALTER TABLE user ADD max_statement_time decimal(12,6) DEFAULT 0 NOT NULL AFTER default_role; + -- Somewhere above, we ran ALTER TABLE user .... CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin. --- we want password_expired column to have collation utf8_general_ci. -ALTER TABLE user MODIFY password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL; -ALTER TABLE user MODIFY is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL; +-- we want password_expired column to have collation utf8_general_ci. +-- Order columns correctly that were not ordered until MDEV-23201 (ff8ffef3e1915d7a9caa07d9461cd8d47c4baf98) + +ALTER TABLE user MODIFY plugin char(64) CHARACTER SET latin1 DEFAULT '' NOT NULL AFTER max_user_connections, + MODIFY authentication_string TEXT NOT NULL AFTER plugin, + MODIFY password_expired ENUM('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER authentication_string, + MODIFY is_role enum('N', 'Y') COLLATE utf8_general_ci DEFAULT 'N' NOT NULL AFTER password_expired, + MODIFY default_role char(80) binary DEFAULT '' NOT NULL AFTER is_role, + MODIFY max_statement_time decimal(12,6) DEFAULT 0 NOT NULL AFTER default_role, +-- MDEV-24122 formerly mysql5.7 users may have the following columns password_last_changed, +-- password_lifetime and account_locked. Ensure they are beyond the end of the user columns +-- used by MariaDB. MariaDB-10.4 will use these in the creation of mysql.global_priv. +-- password_last_changed has a DEFAULT/ON UPDATE of CURRENT_TIMESTAMP to keep track of +-- time until 10.4 added. + MODIFY IF EXISTS password_last_changed timestamp DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP AFTER max_statement_time, + MODIFY IF EXISTS password_lifetime smallint unsigned DEFAULT NULL AFTER password_last_changed, + MODIFY IF EXISTS account_locked enum('N', 'Y') CHARACTER SET utf8 DEFAULT 'N' NOT NULL after password_lifetime; -- Need to pre-fill mysql.proxies_priv with access for root even when upgrading from -- older versions From 4a7e62296a295758b128d20f6bbb0973b94c5193 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 18 Jan 2021 11:22:48 +0100 Subject: [PATCH 093/150] don't allow `KILL QUERY ID USER xxx` --- mysql-test/r/kill.result | 5 +++++ mysql-test/t/kill.test | 6 ++++++ sql/sql_yacc.yy | 18 +++++++++++------- 3 files changed, 22 insertions(+), 7 deletions(-) diff --git a/mysql-test/r/kill.result b/mysql-test/r/kill.result index e3fbb830df5..bda6bddb7de 100644 --- a/mysql-test/r/kill.result +++ b/mysql-test/r/kill.result @@ -415,3 +415,8 @@ ALTER TABLE t2 DROP c; UNLOCK TABLES; DROP VIEW v1; DROP TABLE t1, t2; +# +# KILL QUERY ID USER +# +kill query id user 'foo'; +ERROR 42000: You have an error in your SQL syntax; check the manual that corresponds to your MariaDB server version for the right syntax to use near ''foo'' at line 1 diff --git a/mysql-test/t/kill.test b/mysql-test/t/kill.test index 78de8a784f3..6eb9d55f9b8 100644 --- a/mysql-test/t/kill.test +++ b/mysql-test/t/kill.test @@ -663,3 +663,9 @@ ALTER TABLE t2 DROP c; UNLOCK TABLES; DROP VIEW v1; DROP TABLE t1, t2; + +--echo # +--echo # KILL QUERY ID USER +--echo # +--error ER_PARSE_ERROR +kill query id user 'foo'; diff --git a/sql/sql_yacc.yy b/sql/sql_yacc.yy index a1616024fb8..6eb47f1e49f 100644 --- a/sql/sql_yacc.yy +++ b/sql/sql_yacc.yy @@ -1033,7 +1033,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, ulong *yystacksize); Currently there are 98 shift/reduce conflicts. We should not introduce new conflicts any more. */ -%expect 109 +%expect 115 /* Comments for TOKENS. @@ -13525,7 +13525,7 @@ kill: lex->sql_command= SQLCOM_KILL; lex->kill_type= KILL_TYPE_ID; } - kill_type kill_option kill_expr + kill_type kill_option { Lex->kill_signal= (killed_state) ($3 | $4); } @@ -13538,16 +13538,21 @@ kill_type: ; kill_option: - /* empty */ { $$= (int) KILL_CONNECTION; } - | CONNECTION_SYM { $$= (int) KILL_CONNECTION; } - | QUERY_SYM { $$= (int) KILL_QUERY; } - | QUERY_SYM ID_SYM + opt_connection kill_expr { $$= (int) KILL_CONNECTION; } + | QUERY_SYM kill_expr { $$= (int) KILL_QUERY; } + | QUERY_SYM ID_SYM expr { $$= (int) KILL_QUERY; Lex->kill_type= KILL_TYPE_QUERY; + Lex->value_list.push_front($3, thd->mem_root); } ; +opt_connection: + /* empty */ { } + | CONNECTION_SYM { } + ; + kill_expr: expr { @@ -13560,7 +13565,6 @@ kill_expr: } ; - shutdown: SHUTDOWN { Lex->sql_command= SQLCOM_SHUTDOWN; } ; From 990eb09333dcb2147ccffa9633c1b2bd246aea65 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 18 Jan 2021 18:01:17 +0100 Subject: [PATCH 094/150] cleanup: fix and generalize handle_manager thread * provide an argument to the callback * don't ignore a callback request if it's already present in the queue * initialize mutex/cond/in_use flag before starting the thread, in case the first callback queueing request arrives before handle_manager had time to initialize * set/check abort_manager under a mutex, otherwise handle_manager thread might destroy LOCK_manager before stop_handle_manager released it * signal COND on queueing a callback, stop cond_wait on callback request * always start the thread, even if flush_time is 0 * but keep the old behavior in embedded (no replication, no galera) * style cleanups (e.g. remove volatile for a variable protected by a mutex) --- libmysqld/lib_sql.cc | 3 +- sql/sql_manager.cc | 87 ++++++++++++++++++++------------------------ sql/sql_manager.h | 2 +- 3 files changed, 43 insertions(+), 49 deletions(-) diff --git a/libmysqld/lib_sql.cc b/libmysqld/lib_sql.cc index 229e4e69e6a..ddb3f2c71aa 100644 --- a/libmysqld/lib_sql.cc +++ b/libmysqld/lib_sql.cc @@ -623,7 +623,8 @@ int init_embedded_server(int argc, char **argv, char **groups) (void) thr_setconcurrency(concurrency); // 10 by default - start_handle_manager(); + if (flush_time && flush_time != ~(ulong) 0L) + start_handle_manager(); // FIXME initialize binlog_filter and rpl_filter if not already done // corresponding delete is in clean_up() diff --git a/sql/sql_manager.cc b/sql/sql_manager.cc index 7fa9a78f06f..d57963cceb0 100644 --- a/sql/sql_manager.cc +++ b/sql/sql_manager.cc @@ -26,8 +26,8 @@ #include "sql_manager.h" #include "sql_base.h" // flush_tables -static bool volatile manager_thread_in_use; -static bool abort_manager; +static bool volatile manager_thread_in_use = 0; +static bool abort_manager = false; pthread_t manager_thread; mysql_mutex_t LOCK_manager; @@ -35,31 +35,31 @@ mysql_cond_t COND_manager; struct handler_cb { struct handler_cb *next; - void (*action)(void); + void (*action)(void *); + void *data; }; -static struct handler_cb * volatile cb_list; +static struct handler_cb *cb_list; // protected by LOCK_manager -bool mysql_manager_submit(void (*action)()) +bool mysql_manager_submit(void (*action)(void *), void *data) { bool result= FALSE; DBUG_ASSERT(manager_thread_in_use); - struct handler_cb * volatile *cb; + struct handler_cb **cb; mysql_mutex_lock(&LOCK_manager); cb= &cb_list; - while (*cb && (*cb)->action != action) + while (*cb) cb= &(*cb)->next; + *cb= (struct handler_cb *)my_malloc(sizeof(struct handler_cb), MYF(MY_WME)); if (!*cb) + result= TRUE; + else { - *cb= (struct handler_cb *)my_malloc(sizeof(struct handler_cb), MYF(MY_WME)); - if (!*cb) - result= TRUE; - else - { - (*cb)->next= NULL; - (*cb)->action= action; - } + (*cb)->next= NULL; + (*cb)->action= action; + (*cb)->data= data; } + mysql_cond_signal(&COND_manager); mysql_mutex_unlock(&LOCK_manager); return result; } @@ -69,18 +69,14 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused))) int error = 0; struct timespec abstime; bool reset_flush_time = TRUE; - struct handler_cb *cb= NULL; my_thread_init(); DBUG_ENTER("handle_manager"); pthread_detach_this_thread(); manager_thread = pthread_self(); - mysql_cond_init(key_COND_manager, &COND_manager,NULL); - mysql_mutex_init(key_LOCK_manager, &LOCK_manager, NULL); - manager_thread_in_use = 1; - for (;;) + mysql_mutex_lock(&LOCK_manager); + while (!abort_manager) { - mysql_mutex_lock(&LOCK_manager); /* XXX: This will need to be made more general to handle different * polling needs. */ if (flush_time) @@ -90,40 +86,37 @@ pthread_handler_t handle_manager(void *arg __attribute__((unused))) set_timespec(abstime, flush_time); reset_flush_time = FALSE; } - while ((!error || error == EINTR) && !abort_manager) + while ((!error || error == EINTR) && !abort_manager && !cb_list) error= mysql_cond_timedwait(&COND_manager, &LOCK_manager, &abstime); + + if (error == ETIMEDOUT || error == ETIME) + { + tc_purge(); + error = 0; + reset_flush_time = TRUE; + } } else { - while ((!error || error == EINTR) && !abort_manager) + while ((!error || error == EINTR) && !abort_manager && !cb_list) error= mysql_cond_wait(&COND_manager, &LOCK_manager); } - if (cb == NULL) - { - cb= cb_list; - cb_list= NULL; - } + + struct handler_cb *cb= cb_list; + cb_list= NULL; mysql_mutex_unlock(&LOCK_manager); - if (abort_manager) - break; - - if (error == ETIMEDOUT || error == ETIME) - { - tc_purge(); - error = 0; - reset_flush_time = TRUE; - } - while (cb) { struct handler_cb *next= cb->next; - cb->action(); + cb->action(cb->data); my_free(cb); cb= next; } + mysql_mutex_lock(&LOCK_manager); } manager_thread_in_use = 0; + mysql_mutex_unlock(&LOCK_manager); mysql_mutex_destroy(&LOCK_manager); mysql_cond_destroy(&COND_manager); DBUG_LEAVE; // Can't use DBUG_RETURN after my_thread_end @@ -137,15 +130,15 @@ void start_handle_manager() { DBUG_ENTER("start_handle_manager"); abort_manager = false; - if (flush_time && flush_time != ~(ulong) 0L) { pthread_t hThread; - int error; - if ((error= mysql_thread_create(key_thread_handle_manager, - &hThread, &connection_attrib, - handle_manager, 0))) - sql_print_warning("Can't create handle_manager thread (errno= %d)", - error); + int err; + manager_thread_in_use = 1; + mysql_cond_init(key_COND_manager, &COND_manager,NULL); + mysql_mutex_init(key_LOCK_manager, &LOCK_manager, NULL); + if ((err= mysql_thread_create(key_thread_handle_manager, &hThread, + &connection_attrib, handle_manager, 0))) + sql_print_warning("Can't create handle_manager thread (errno: %M)", err); } DBUG_VOID_RETURN; } @@ -155,10 +148,10 @@ void start_handle_manager() void stop_handle_manager() { DBUG_ENTER("stop_handle_manager"); - abort_manager = true; if (manager_thread_in_use) { mysql_mutex_lock(&LOCK_manager); + abort_manager = true; DBUG_PRINT("quit", ("initiate shutdown of handle manager thread: %lu", (ulong)manager_thread)); mysql_cond_signal(&COND_manager); diff --git a/sql/sql_manager.h b/sql/sql_manager.h index 9c6c84450ed..f97d4a2cfc5 100644 --- a/sql/sql_manager.h +++ b/sql/sql_manager.h @@ -18,6 +18,6 @@ void start_handle_manager(); void stop_handle_manager(); -bool mysql_manager_submit(void (*action)()); +bool mysql_manager_submit(void (*action)(void *), void *data); #endif /* SQL_MANAGER_INCLUDED */ From 6a1cb449feb1b77e5ec94904c228d7c5477f528a Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 18 Jan 2021 18:02:16 +0100 Subject: [PATCH 095/150] cleanup: remove slave background thread, use handle_manager thread instead --- .../suite/perfschema/r/threads_mysql.result | 22 +-- sql/mysqld.cc | 15 +- sql/mysqld.h | 4 +- sql/slave.cc | 172 ++++-------------- 4 files changed, 46 insertions(+), 167 deletions(-) diff --git a/mysql-test/suite/perfschema/r/threads_mysql.result b/mysql-test/suite/perfschema/r/threads_mysql.result index 9f9c46fe6a6..169c22fb7a2 100644 --- a/mysql-test/suite/perfschema/r/threads_mysql.result +++ b/mysql-test/suite/perfschema/r/threads_mysql.result @@ -17,6 +17,16 @@ processlist_info NULL unified_parent_thread_id NULL role NULL instrumented YES +name thread/sql/manager +type BACKGROUND +processlist_user NULL +processlist_host NULL +processlist_db NULL +processlist_command NULL +processlist_info NULL +unified_parent_thread_id unified parent_thread_id +role NULL +instrumented YES name thread/sql/one_connection type FOREGROUND processlist_user root @@ -44,16 +54,6 @@ processlist_info NULL unified_parent_thread_id unified parent_thread_id role NULL instrumented YES -name thread/sql/slave_background -type BACKGROUND -processlist_user NULL -processlist_host NULL -processlist_db NULL -processlist_command NULL -processlist_info NULL -unified_parent_thread_id unified parent_thread_id -role NULL -instrumented YES CREATE TEMPORARY TABLE t1 AS SELECT thread_id FROM performance_schema.threads WHERE name LIKE 'thread/sql%'; @@ -113,7 +113,7 @@ WHERE t1.name LIKE 'thread/sql%' ORDER BY parent_thread_name, child_thread_name; parent_thread_name child_thread_name thread/sql/event_scheduler thread/sql/event_worker +thread/sql/main thread/sql/manager thread/sql/main thread/sql/one_connection thread/sql/main thread/sql/signal_handler -thread/sql/main thread/sql/slave_background thread/sql/one_connection thread/sql/event_scheduler diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 8d67bc53164..c0bf69b38b9 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -385,7 +385,6 @@ static bool binlog_format_used= false; LEX_STRING opt_init_connect, opt_init_slave; mysql_cond_t COND_thread_cache; static mysql_cond_t COND_flush_thread_cache; -mysql_cond_t COND_slave_background; static DYNAMIC_ARRAY all_options; /* Global variables */ @@ -758,7 +757,7 @@ mysql_mutex_t LOCK_crypt, LOCK_global_system_variables, LOCK_user_conn, LOCK_slave_list, - LOCK_connection_count, LOCK_error_messages, LOCK_slave_background; + LOCK_connection_count, LOCK_error_messages; mysql_mutex_t LOCK_stats, LOCK_global_user_client_stats, LOCK_global_table_stats, LOCK_global_index_stats; @@ -947,8 +946,7 @@ PSI_mutex_key key_LOCK_stats, PSI_mutex_key key_LOCK_gtid_waiting; PSI_mutex_key key_LOCK_after_binlog_sync; -PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered, - key_LOCK_slave_background; +PSI_mutex_key key_LOCK_prepare_ordered, key_LOCK_commit_ordered; PSI_mutex_key key_TABLE_SHARE_LOCK_share; static PSI_mutex_info all_server_mutexes[]= @@ -1017,7 +1015,6 @@ static PSI_mutex_info all_server_mutexes[]= { &key_LOCK_prepare_ordered, "LOCK_prepare_ordered", PSI_FLAG_GLOBAL}, { &key_LOCK_after_binlog_sync, "LOCK_after_binlog_sync", PSI_FLAG_GLOBAL}, { &key_LOCK_commit_ordered, "LOCK_commit_ordered", PSI_FLAG_GLOBAL}, - { &key_LOCK_slave_background, "LOCK_slave_background", PSI_FLAG_GLOBAL}, { &key_LOG_INFO_lock, "LOG_INFO::lock", 0}, { &key_LOCK_thread_count, "LOCK_thread_count", PSI_FLAG_GLOBAL}, { &key_LOCK_thread_cache, "LOCK_thread_cache", PSI_FLAG_GLOBAL}, @@ -1074,7 +1071,7 @@ PSI_cond_key key_TC_LOG_MMAP_COND_queue_busy; PSI_cond_key key_COND_rpl_thread_queue, key_COND_rpl_thread, key_COND_rpl_thread_stop, key_COND_rpl_thread_pool, key_COND_parallel_entry, key_COND_group_commit_orderer, - key_COND_prepare_ordered, key_COND_slave_background; + key_COND_prepare_ordered; PSI_cond_key key_COND_wait_gtid, key_COND_gtid_ignore_duplicates; static PSI_cond_info all_server_conds[]= @@ -1124,7 +1121,6 @@ static PSI_cond_info all_server_conds[]= { &key_COND_parallel_entry, "COND_parallel_entry", 0}, { &key_COND_group_commit_orderer, "COND_group_commit_orderer", 0}, { &key_COND_prepare_ordered, "COND_prepare_ordered", 0}, - { &key_COND_slave_background, "COND_slave_background", 0}, { &key_COND_start_thread, "COND_start_thread", PSI_FLAG_GLOBAL}, { &key_COND_wait_gtid, "COND_wait_gtid", 0}, { &key_COND_gtid_ignore_duplicates, "COND_gtid_ignore_duplicates", 0} @@ -2379,8 +2375,6 @@ static void clean_up_mutexes() mysql_cond_destroy(&COND_prepare_ordered); mysql_mutex_destroy(&LOCK_after_binlog_sync); mysql_mutex_destroy(&LOCK_commit_ordered); - mysql_mutex_destroy(&LOCK_slave_background); - mysql_cond_destroy(&COND_slave_background); DBUG_VOID_RETURN; } @@ -4838,9 +4832,6 @@ static int init_thread_environment() MY_MUTEX_INIT_SLOW); mysql_mutex_init(key_LOCK_commit_ordered, &LOCK_commit_ordered, MY_MUTEX_INIT_SLOW); - mysql_mutex_init(key_LOCK_slave_background, &LOCK_slave_background, - MY_MUTEX_INIT_SLOW); - mysql_cond_init(key_COND_slave_background, &COND_slave_background, NULL); #ifdef HAVE_OPENSSL mysql_mutex_init(key_LOCK_des_key_file, diff --git a/sql/mysqld.h b/sql/mysqld.h index 2947734901c..64e5aef5946 100644 --- a/sql/mysqld.h +++ b/sql/mysqld.h @@ -568,8 +568,7 @@ extern mysql_mutex_t LOCK_error_log, LOCK_delayed_insert, LOCK_short_uuid_generator, LOCK_delayed_status, LOCK_delayed_create, LOCK_crypt, LOCK_timezone, LOCK_slave_list, LOCK_active_mi, LOCK_manager, LOCK_user_conn, - LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count, - LOCK_slave_background; + LOCK_prepared_stmt_count, LOCK_error_messages, LOCK_connection_count; extern MYSQL_PLUGIN_IMPORT mysql_mutex_t LOCK_thread_count, LOCK_global_system_variables; extern mysql_mutex_t LOCK_start_thread; @@ -583,7 +582,6 @@ extern mysql_rwlock_t LOCK_grant, LOCK_sys_init_connect, LOCK_sys_init_slave; extern mysql_prlock_t LOCK_system_variables_hash; extern mysql_cond_t COND_thread_count, COND_start_thread; extern mysql_cond_t COND_manager; -extern mysql_cond_t COND_slave_background; extern int32 thread_running; extern int32 thread_count, service_thread_count; diff --git a/sql/slave.cc b/sql/slave.cc index 5685769bbfb..3124b2d10ab 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -60,6 +60,7 @@ #include "rpl_tblmap.h" #include "debug_sync.h" #include "rpl_parallel.h" +#include "sql_manager.h" #define FLAGSTR(V,F) ((V)&(F)?#F" ":"") @@ -279,8 +280,6 @@ static void init_slave_psi_keys(void) #endif /* HAVE_PSI_INTERFACE */ -static bool slave_background_thread_running; -static bool slave_background_thread_stop; static bool slave_background_thread_gtid_loaded; struct slave_background_kill_t { @@ -289,24 +288,15 @@ struct slave_background_kill_t { } *slave_background_kill_list; -pthread_handler_t -handle_slave_background(void *arg __attribute__((unused))) +static void bg_rpl_load_gtid_slave_state(void *) { - THD *thd; - PSI_stage_info old_stage; - bool stop; - - my_thread_init(); - thd= new THD(next_thread_id()); + THD *thd= new THD(next_thread_id()); thd->thread_stack= (char*) &thd; /* Set approximate stack start */ thd->system_thread = SYSTEM_THREAD_SLAVE_BACKGROUND; - thread_safe_increment32(&service_thread_count); thd->store_globals(); thd->security_ctx->skip_grants(); thd->set_command(COM_DAEMON); -#ifdef WITH_WSREP thd->variables.wsrep_on= 0; -#endif thd_proc_info(thd, "Loading slave GTID position from table"); if (rpl_load_gtid_slave_state(thd)) @@ -316,136 +306,34 @@ handle_slave_background(void *arg __attribute__((unused))) thd->get_stmt_da()->sql_errno(), thd->get_stmt_da()->message()); - mysql_mutex_lock(&LOCK_slave_background); + // hijacking global_rpl_thread_pool cond here - it's only once on startup + mysql_mutex_lock(&global_rpl_thread_pool.LOCK_rpl_thread_pool); slave_background_thread_gtid_loaded= true; - mysql_cond_broadcast(&COND_slave_background); - - THD_STAGE_INFO(thd, stage_slave_background_process_request); - do - { - slave_background_kill_t *kill_list; - - thd->ENTER_COND(&COND_slave_background, &LOCK_slave_background, - &stage_slave_background_wait_request, - &old_stage); - for (;;) - { - stop= abort_loop || thd->killed || slave_background_thread_stop; - kill_list= slave_background_kill_list; - if (stop || kill_list) - break; - mysql_cond_wait(&COND_slave_background, &LOCK_slave_background); - } - - slave_background_kill_list= NULL; - thd->EXIT_COND(&old_stage); - - while (kill_list) - { - slave_background_kill_t *p = kill_list; - THD *to_kill= p->to_kill; - kill_list= p->next; - - mysql_mutex_lock(&to_kill->LOCK_thd_data); - to_kill->awake(KILL_CONNECTION); - mysql_mutex_unlock(&to_kill->LOCK_thd_data); - mysql_mutex_lock(&to_kill->LOCK_wakeup_ready); - to_kill->rgi_slave->killed_for_retry= - rpl_group_info::RETRY_KILL_KILLED; - mysql_cond_broadcast(&to_kill->COND_wakeup_ready); - mysql_mutex_unlock(&to_kill->LOCK_wakeup_ready); - my_free(p); - } - mysql_mutex_lock(&LOCK_slave_background); - } while (!stop); - - slave_background_thread_running= false; - mysql_cond_broadcast(&COND_slave_background); - mysql_mutex_unlock(&LOCK_slave_background); - + mysql_cond_signal(&global_rpl_thread_pool.COND_rpl_thread_pool); + mysql_mutex_unlock(&global_rpl_thread_pool.LOCK_rpl_thread_pool); delete thd; - thread_safe_decrement32(&service_thread_count); - signal_thd_deleted(); - - my_thread_end(); - return 0; } +static void bg_slave_kill(void *victim) +{ + THD *to_kill= (THD *)victim; + mysql_mutex_lock(&to_kill->LOCK_thd_data); + to_kill->awake(KILL_CONNECTION); + mysql_mutex_unlock(&to_kill->LOCK_thd_data); + mysql_mutex_lock(&to_kill->LOCK_wakeup_ready); + to_kill->rgi_slave->killed_for_retry= rpl_group_info::RETRY_KILL_KILLED; + mysql_cond_broadcast(&to_kill->COND_wakeup_ready); + mysql_mutex_unlock(&to_kill->LOCK_wakeup_ready); +} - -void -slave_background_kill_request(THD *to_kill) +void slave_background_kill_request(THD *to_kill) { if (to_kill->rgi_slave->killed_for_retry) return; // Already deadlock killed. - slave_background_kill_t *p= - (slave_background_kill_t *)my_malloc(sizeof(*p), MYF(MY_WME)); - if (p) - { - p->to_kill= to_kill; - to_kill->rgi_slave->killed_for_retry= - rpl_group_info::RETRY_KILL_PENDING; - mysql_mutex_lock(&LOCK_slave_background); - p->next= slave_background_kill_list; - slave_background_kill_list= p; - mysql_cond_signal(&COND_slave_background); - mysql_mutex_unlock(&LOCK_slave_background); - } + to_kill->rgi_slave->killed_for_retry= rpl_group_info::RETRY_KILL_PENDING; + mysql_manager_submit(bg_slave_kill, to_kill); } - -/* - Start the slave background thread. - - This thread is currently used for two purposes: - - 1. To load the GTID state from mysql.gtid_slave_pos at server start; reading - from table requires valid THD, which is otherwise not available during - server init. - - 2. To kill worker thread transactions during parallel replication, when a - storage engine attempts to take an errorneous conflicting lock that would - cause a deadlock. Killing is done asynchroneously, as the kill may not - be safe within the context of a callback from inside storage engine - locking code. -*/ -static int -start_slave_background_thread() -{ - pthread_t th; - - slave_background_thread_running= true; - slave_background_thread_stop= false; - slave_background_thread_gtid_loaded= false; - if (mysql_thread_create(key_thread_slave_background, - &th, &connection_attrib, handle_slave_background, - NULL)) - { - sql_print_error("Failed to create thread while initialising slave"); - return 1; - } - - mysql_mutex_lock(&LOCK_slave_background); - while (!slave_background_thread_gtid_loaded) - mysql_cond_wait(&COND_slave_background, &LOCK_slave_background); - mysql_mutex_unlock(&LOCK_slave_background); - - return 0; -} - - -static void -stop_slave_background_thread() -{ - mysql_mutex_lock(&LOCK_slave_background); - slave_background_thread_stop= true; - mysql_cond_broadcast(&COND_slave_background); - while (slave_background_thread_running) - mysql_cond_wait(&COND_slave_background, &LOCK_slave_background); - mysql_mutex_unlock(&LOCK_slave_background); -} - - /* Initialize slave structures */ int init_slave() @@ -457,12 +345,19 @@ int init_slave() init_slave_psi_keys(); #endif - if (start_slave_background_thread()) - return 1; - if (global_rpl_thread_pool.init(opt_slave_parallel_threads)) return 1; + slave_background_thread_gtid_loaded= false; + mysql_manager_submit(bg_rpl_load_gtid_slave_state, NULL); + + // hijacking global_rpl_thread_pool cond here - it's only once on startup + mysql_mutex_lock(&global_rpl_thread_pool.LOCK_rpl_thread_pool); + while (!slave_background_thread_gtid_loaded) + mysql_cond_wait(&global_rpl_thread_pool.COND_rpl_thread_pool, + &global_rpl_thread_pool.LOCK_rpl_thread_pool); + mysql_mutex_unlock(&global_rpl_thread_pool.LOCK_rpl_thread_pool); + /* This is called when mysqld starts. Before client connections are accepted. However bootstrap may conflict with us if it does START SLAVE. @@ -1080,7 +975,6 @@ void slave_prepare_for_shutdown() mysql_mutex_lock(&LOCK_active_mi); master_info_index->free_connections(); mysql_mutex_unlock(&LOCK_active_mi); - stop_slave_background_thread(); } /* @@ -1111,8 +1005,6 @@ void end_slave() active_mi= 0; mysql_mutex_unlock(&LOCK_active_mi); - stop_slave_background_thread(); - global_rpl_thread_pool.destroy(); free_all_rpl_filters(); DBUG_VOID_RETURN; @@ -4335,9 +4227,7 @@ pthread_handler_t handle_slave_io(void *arg) goto err; } -#ifdef WITH_WSREP thd->variables.wsrep_on= 0; -#endif if (RUN_HOOK(binlog_relay_io, thread_start, (thd, mi))) { mi->report(ERROR_LEVEL, ER_SLAVE_FATAL_ERROR, NULL, From 5d1db34585bdedfcc1f6339c18ea2fce2cdb7ce3 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 20 Jan 2021 15:22:26 +0100 Subject: [PATCH 096/150] cleanup: void hton::abort_transaction() and void wsrep_innobase_kill_one_trx() as their return values are never used. Also remove redundant cast and checks that are always true --- sql/handler.h | 2 +- storage/innobase/handler/ha_innodb.cc | 50 +++++++++--------------- storage/innobase/include/ha_prototypes.h | 4 +- 3 files changed, 22 insertions(+), 34 deletions(-) diff --git a/sql/handler.h b/sql/handler.h index 96f2836c921..6113b748696 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -1237,7 +1237,7 @@ struct handlerton enum handler_create_iterator_result (*create_iterator)(handlerton *hton, enum handler_iterator_type type, struct handler_iterator *fill_this_in); - int (*abort_transaction)(handlerton *hton, THD *bf_thd, + void (*abort_transaction)(handlerton *hton, THD *bf_thd, THD *victim_thd, my_bool signal); int (*set_checkpoint)(handlerton *hton, const XID* xid); int (*get_checkpoint)(handlerton *hton, XID* xid); diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index c8be848fabe..e475852cb90 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -167,11 +167,8 @@ wsrep_ws_handle(THD* thd, const trx_t* trx) { extern TC_LOG* tc_log; extern void wsrep_cleanup_transaction(THD *thd); -static int -wsrep_abort_transaction(handlerton* hton, THD *bf_thd, THD *victim_thd, - my_bool signal); -static void -wsrep_fake_trx_id(handlerton* hton, THD *thd); +static void wsrep_abort_transaction(handlerton*, THD *, THD *, my_bool); +static void wsrep_fake_trx_id(handlerton* hton, THD *thd); static int innobase_wsrep_set_checkpoint(handlerton* hton, const XID* xid); static int innobase_wsrep_get_checkpoint(handlerton* hton, XID* xid); #endif /* WITH_WSREP */ @@ -19507,35 +19504,27 @@ wsrep_abort_slave_trx( /*******************************************************************//** This function is used to kill one transaction in BF. */ UNIV_INTERN -int +void wsrep_innobase_kill_one_trx( /*========================*/ - void * const bf_thd_ptr, + MYSQL_THD const bf_thd, const trx_t * const bf_trx, trx_t *victim_trx, ibool signal) { + ut_ad(bf_thd); + ut_ad(victim_trx); ut_ad(lock_mutex_own()); ut_ad(trx_mutex_own(victim_trx)); - ut_ad(bf_thd_ptr); - ut_ad(victim_trx); DBUG_ENTER("wsrep_innobase_kill_one_trx"); - THD *bf_thd = bf_thd_ptr ? (THD*) bf_thd_ptr : NULL; THD *thd = (THD *) victim_trx->mysql_thd; - int64_t bf_seqno = (bf_thd) ? wsrep_thd_trx_seqno(bf_thd) : 0; + int64_t bf_seqno = wsrep_thd_trx_seqno(bf_thd); if (!thd) { DBUG_PRINT("wsrep", ("no thd for conflicting lock")); WSREP_WARN("no THD for trx: " TRX_ID_FMT, victim_trx->id); - DBUG_RETURN(1); - } - - if (!bf_thd) { - DBUG_PRINT("wsrep", ("no BF thd for conflicting lock")); - WSREP_WARN("no BF THD for trx: " TRX_ID_FMT, - bf_trx ? bf_trx->id : 0); - DBUG_RETURN(1); + DBUG_VOID_RETURN; } WSREP_LOG_CONFLICT(bf_thd, thd, TRUE); @@ -19566,7 +19555,7 @@ wsrep_innobase_kill_one_trx( WSREP_DEBUG("kill trx EXITING for " TRX_ID_FMT, victim_trx->id); wsrep_thd_UNLOCK(thd); - DBUG_RETURN(0); + DBUG_VOID_RETURN; } if (wsrep_thd_exec_mode(thd) != LOCAL_STATE) { @@ -19584,7 +19573,7 @@ wsrep_innobase_kill_one_trx( victim_trx->id); wsrep_thd_UNLOCK(thd); wsrep_thd_awake(thd, signal); - DBUG_RETURN(0); + DBUG_VOID_RETURN; break; case ABORTED: case ABORTING: // fall through @@ -19592,7 +19581,7 @@ wsrep_innobase_kill_one_trx( WSREP_DEBUG("victim " TRX_ID_FMT " in state %d", victim_trx->id, wsrep_thd_get_conflict_state(thd)); wsrep_thd_UNLOCK(thd); - DBUG_RETURN(0); + DBUG_VOID_RETURN; break; } @@ -19622,7 +19611,7 @@ wsrep_innobase_kill_one_trx( victim_trx->id); wsrep_thd_UNLOCK(thd); wsrep_thd_awake(thd, signal); - DBUG_RETURN(1); + DBUG_VOID_RETURN; break; case WSREP_OK: break; @@ -19692,7 +19681,7 @@ wsrep_innobase_kill_one_trx( wsrep_thd_UNLOCK(thd); wsrep_abort_slave_trx(bf_seqno, wsrep_thd_trx_seqno(thd)); - DBUG_RETURN(0); + DBUG_VOID_RETURN; } /* This will lock thd from proceeding after net_read() */ wsrep_thd_set_conflict_state(thd, ABORTING); @@ -19724,11 +19713,11 @@ wsrep_innobase_kill_one_trx( break; } - DBUG_RETURN(0); + DBUG_VOID_RETURN; } static -int +void wsrep_abort_transaction( /*====================*/ handlerton* hton, @@ -19736,7 +19725,7 @@ wsrep_abort_transaction( THD *victim_thd, my_bool signal) { - DBUG_ENTER("wsrep_innobase_abort_thd"); + DBUG_ENTER("wsrep_abort_transaction"); trx_t* victim_trx = thd_to_trx(victim_thd); trx_t* bf_trx = (bf_thd) ? thd_to_trx(bf_thd) : NULL; @@ -19749,12 +19738,11 @@ wsrep_abort_transaction( if (victim_trx) { lock_mutex_enter(); trx_mutex_enter(victim_trx); - int rcode = wsrep_innobase_kill_one_trx(bf_thd, bf_trx, - victim_trx, signal); + wsrep_innobase_kill_one_trx(bf_thd, bf_trx, victim_trx, signal); lock_mutex_exit(); trx_mutex_exit(victim_trx); wsrep_srv_conc_cancel_wait(victim_trx); - DBUG_RETURN(rcode); + DBUG_VOID_RETURN; } else { WSREP_DEBUG("victim does not have transaction"); wsrep_thd_LOCK(victim_thd); @@ -19763,7 +19751,7 @@ wsrep_abort_transaction( wsrep_thd_awake(victim_thd, signal); } - DBUG_RETURN(-1); + DBUG_VOID_RETURN; } static diff --git a/storage/innobase/include/ha_prototypes.h b/storage/innobase/include/ha_prototypes.h index 05dc3f57df7..3eab2135969 100644 --- a/storage/innobase/include/ha_prototypes.h +++ b/storage/innobase/include/ha_prototypes.h @@ -234,8 +234,8 @@ innobase_casedn_str( #ifdef WITH_WSREP UNIV_INTERN -int -wsrep_innobase_kill_one_trx(void * const thd_ptr, +void +wsrep_innobase_kill_one_trx(MYSQL_THD const thd_ptr, const trx_t * const bf_trx, trx_t *victim_trx, ibool signal); From 29bbcac0ee841faaa68eeb09c86ff825eabbe6b6 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Wed, 20 Jan 2021 18:22:38 +0100 Subject: [PATCH 097/150] MDEV-23328 Server hang due to Galera lock conflict resolution mutex order violation here. when wsrep bf thread kills a conflicting trx, the stack is wsrep_thd_LOCK() wsrep_kill_victim() lock_rec_other_has_conflicting() lock_clust_rec_read_check_and_lock() row_search_mvcc() ha_innobase::index_read() ha_innobase::rnd_pos() handler::ha_rnd_pos() handler::rnd_pos_by_record() handler::ha_rnd_pos_by_record() Rows_log_event::find_row() Update_rows_log_event::do_exec_row() Rows_log_event::do_apply_event() Log_event::apply_event() wsrep_apply_events() and mutexes are taken in the order lock_sys->mutex -> victim_trx->mutex -> victim_thread->LOCK_thd_data When a normal KILL statement is executed, the stack is innobase_kill_query() kill_handlerton() plugin_foreach_with_mask() ha_kill_query() THD::awake() kill_one_thread() and mutexes are victim_thread->LOCK_thd_data -> lock_sys->mutex -> victim_trx->mutex To fix the mutex order violation we kill the victim thd asynchronously, from the manager thread --- sql/wsrep_mysqld.cc | 2 - storage/innobase/handler/ha_innodb.cc | 171 +++++++++++++++----------- 2 files changed, 100 insertions(+), 73 deletions(-) diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index d392d1c2a61..c033f7e1464 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -2762,9 +2762,7 @@ extern "C" void wsrep_thd_awake(THD *thd, my_bool signal) { if (signal) { - mysql_mutex_lock(&thd->LOCK_thd_data); thd->awake(KILL_QUERY); - mysql_mutex_unlock(&thd->LOCK_thd_data); } else { diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index e475852cb90..58a07e46be6 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -60,6 +60,7 @@ this program; if not, write to the Free Software Foundation, Inc., #include #include +#include /* Include necessary InnoDB headers */ #include "btr0btr.h" @@ -19501,61 +19502,57 @@ wsrep_abort_slave_trx( (long long)bf_seqno, (long long)victim_seqno); abort(); } -/*******************************************************************//** -This function is used to kill one transaction in BF. */ -UNIV_INTERN -void -wsrep_innobase_kill_one_trx( -/*========================*/ - MYSQL_THD const bf_thd, - const trx_t * const bf_trx, - trx_t *victim_trx, - ibool signal) + +struct bg_wsrep_kill_trx_arg { + my_thread_id thd_id; + trx_id_t trx_id; + int64_t bf_seqno; + ibool signal; +}; + +static void bg_wsrep_kill_trx( + void *void_arg) { - ut_ad(bf_thd); - ut_ad(victim_trx); - ut_ad(lock_mutex_own()); - ut_ad(trx_mutex_own(victim_trx)); + bg_wsrep_kill_trx_arg *arg = (bg_wsrep_kill_trx_arg*)void_arg; + THD *thd = find_thread_by_id(arg->thd_id, false); + trx_t *victim_trx = NULL; + bool awake = false; + DBUG_ENTER("bg_wsrep_kill_trx"); - DBUG_ENTER("wsrep_innobase_kill_one_trx"); - THD *thd = (THD *) victim_trx->mysql_thd; - int64_t bf_seqno = wsrep_thd_trx_seqno(bf_thd); - - if (!thd) { - DBUG_PRINT("wsrep", ("no thd for conflicting lock")); - WSREP_WARN("no THD for trx: " TRX_ID_FMT, victim_trx->id); - DBUG_VOID_RETURN; + if (thd) { + victim_trx = thd_to_trx(thd); + lock_mutex_enter(); + trx_mutex_enter(victim_trx); + if (victim_trx->id != arg->trx_id) + { + trx_mutex_exit(victim_trx); + lock_mutex_exit(); + wsrep_thd_UNLOCK(thd); + victim_trx = NULL; + } } - WSREP_LOG_CONFLICT(bf_thd, thd, TRUE); + if (!victim_trx) { + /* it can happen that trx_id was meanwhile rolled back */ + DBUG_PRINT("wsrep", ("no thd for conflicting lock")); + goto ret; + } WSREP_DEBUG("BF kill (" ULINTPF ", seqno: " INT64PF "), victim: (%lu) trx: " TRX_ID_FMT, - signal, bf_seqno, + arg->signal, arg->bf_seqno, thd_get_thread_id(thd), victim_trx->id); WSREP_DEBUG("Aborting query: %s conf %d trx: %" PRId64, - (thd && wsrep_thd_query(thd)) ? wsrep_thd_query(thd) : "void", + (wsrep_thd_query(thd)) ? wsrep_thd_query(thd) : "void", wsrep_thd_conflict_state(thd, FALSE), wsrep_thd_ws_handle(thd)->trx_id); - wsrep_thd_LOCK(thd); - DBUG_EXECUTE_IF("sync.wsrep_after_BF_victim_lock", - { - const char act[]= - "now " - "wait_for signal.wsrep_after_BF_victim_lock"; - DBUG_ASSERT(!debug_sync_set_action(bf_thd, - STRING_WITH_LEN(act))); - };); - - if (wsrep_thd_query_state(thd) == QUERY_EXITING) { WSREP_DEBUG("kill trx EXITING for " TRX_ID_FMT, victim_trx->id); - wsrep_thd_UNLOCK(thd); - DBUG_VOID_RETURN; + goto ret_unlock; } if (wsrep_thd_exec_mode(thd) != LOCAL_STATE) { @@ -19571,18 +19568,13 @@ wsrep_innobase_kill_one_trx( case MUST_ABORT: WSREP_DEBUG("victim " TRX_ID_FMT " in MUST ABORT state", victim_trx->id); - wsrep_thd_UNLOCK(thd); - wsrep_thd_awake(thd, signal); - DBUG_VOID_RETURN; - break; + goto ret_awake; case ABORTED: case ABORTING: // fall through default: WSREP_DEBUG("victim " TRX_ID_FMT " in state %d", victim_trx->id, wsrep_thd_get_conflict_state(thd)); - wsrep_thd_UNLOCK(thd); - DBUG_VOID_RETURN; - break; + goto ret_unlock; } switch (wsrep_thd_query_state(thd)) { @@ -19595,12 +19587,12 @@ wsrep_innobase_kill_one_trx( victim_trx->id); if (wsrep_thd_exec_mode(thd) == REPL_RECV) { - wsrep_abort_slave_trx(bf_seqno, + wsrep_abort_slave_trx(arg->bf_seqno, wsrep_thd_trx_seqno(thd)); } else { wsrep_t *wsrep= get_wsrep(); rcode = wsrep->abort_pre_commit( - wsrep, bf_seqno, + wsrep, arg->bf_seqno, (wsrep_trx_id_t)wsrep_thd_ws_handle(thd)->trx_id ); @@ -19609,10 +19601,7 @@ wsrep_innobase_kill_one_trx( WSREP_DEBUG("cancel commit warning: " TRX_ID_FMT, victim_trx->id); - wsrep_thd_UNLOCK(thd); - wsrep_thd_awake(thd, signal); - DBUG_VOID_RETURN; - break; + goto ret_awake; case WSREP_OK: break; default: @@ -19625,12 +19614,9 @@ wsrep_innobase_kill_one_trx( * kill the lock holder first. */ abort(); - break; } } - wsrep_thd_UNLOCK(thd); - wsrep_thd_awake(thd, signal); - break; + goto ret_awake; case QUERY_EXEC: /* it is possible that victim trx is itself waiting for some * other lock. We need to cancel this waiting @@ -19651,26 +19637,20 @@ wsrep_innobase_kill_one_trx( lock_cancel_waiting_and_release(wait_lock); } - wsrep_thd_UNLOCK(thd); - wsrep_thd_awake(thd, signal); } else { /* abort currently executing query */ DBUG_PRINT("wsrep",("sending KILL_QUERY to: %lu", thd_get_thread_id(thd))); WSREP_DEBUG("kill query for: %ld", thd_get_thread_id(thd)); - /* Note that innobase_kill_query will take lock_mutex - and trx_mutex */ - wsrep_thd_UNLOCK(thd); - wsrep_thd_awake(thd, signal); /* for BF thd, we need to prevent him from committing */ if (wsrep_thd_exec_mode(thd) == REPL_RECV) { - wsrep_abort_slave_trx(bf_seqno, + wsrep_abort_slave_trx(arg->bf_seqno, wsrep_thd_trx_seqno(thd)); } } - break; + goto ret_awake; case QUERY_IDLE: { WSREP_DEBUG("kill IDLE for " TRX_ID_FMT, victim_trx->id); @@ -19678,10 +19658,9 @@ wsrep_innobase_kill_one_trx( if (wsrep_thd_exec_mode(thd) == REPL_RECV) { WSREP_DEBUG("kill BF IDLE, seqno: %lld", (long long)wsrep_thd_trx_seqno(thd)); - wsrep_thd_UNLOCK(thd); - wsrep_abort_slave_trx(bf_seqno, + wsrep_abort_slave_trx(arg->bf_seqno, wsrep_thd_trx_seqno(thd)); - DBUG_VOID_RETURN; + goto ret_unlock; } /* This will lock thd from proceeding after net_read() */ wsrep_thd_set_conflict_state(thd, ABORTING); @@ -19702,17 +19681,67 @@ wsrep_innobase_kill_one_trx( DBUG_PRINT("wsrep",("signalling wsrep rollbacker")); WSREP_DEBUG("signaling aborter"); wsrep_unlock_rollback(); - wsrep_thd_UNLOCK(thd); - - break; + goto ret_unlock; } default: WSREP_WARN("bad wsrep query state: %d", wsrep_thd_query_state(thd)); - wsrep_thd_UNLOCK(thd); - break; + goto ret_unlock; } +ret_awake: + awake = true; + +ret_unlock: + trx_mutex_exit(victim_trx); + lock_mutex_exit(); + if (awake) + wsrep_thd_awake(thd, arg->signal); + wsrep_thd_UNLOCK(thd); + +ret: + free(arg); + DBUG_VOID_RETURN; + +} + +/*******************************************************************//** +This function is used to kill one transaction in BF. */ +UNIV_INTERN +void +wsrep_innobase_kill_one_trx( +/*========================*/ + MYSQL_THD const bf_thd, + const trx_t * const bf_trx, + trx_t *victim_trx, + ibool signal) +{ + ut_ad(bf_thd); + ut_ad(victim_trx); + ut_ad(lock_mutex_own()); + ut_ad(trx_mutex_own(victim_trx)); + + bg_wsrep_kill_trx_arg *arg = (bg_wsrep_kill_trx_arg*)malloc(sizeof(*arg)); + arg->thd_id = thd_get_thread_id(victim_trx->mysql_thd); + arg->trx_id = victim_trx->id; + arg->bf_seqno = wsrep_thd_trx_seqno((THD*)bf_thd); + arg->signal = signal; + + DBUG_ENTER("wsrep_innobase_kill_one_trx"); + + WSREP_LOG_CONFLICT(bf_thd, victim_trx->mysql_thd, TRUE); + + DBUG_EXECUTE_IF("sync.wsrep_after_BF_victim_lock", + { + const char act[]= + "now " + "wait_for signal.wsrep_after_BF_victim_lock"; + DBUG_ASSERT(!debug_sync_set_action(bf_thd, + STRING_WITH_LEN(act))); + };); + + + mysql_manager_submit(bg_wsrep_kill_trx, arg); DBUG_VOID_RETURN; } From 1a99958545f923f39344abe8b57bc80a2072510d Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 22 Jan 2021 13:43:06 +0100 Subject: [PATCH 098/150] mtr: --client-gdb='' --- mysql-test/mysql-test-run.pl | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/mysql-test/mysql-test-run.pl b/mysql-test/mysql-test-run.pl index 7b73782eedc..05ceab98d7e 100755 --- a/mysql-test/mysql-test-run.pl +++ b/mysql-test/mysql-test-run.pl @@ -1323,7 +1323,7 @@ sub command_line_setup { 'rr' => \$opt_rr, 'rr-arg=s' => \@rr_record_args, 'rr-dir=s' => \$opt_rr_dir, - 'client-gdb' => \$opt_client_gdb, + 'client-gdb=s' => \$opt_client_gdb, 'manual-gdb' => \$opt_manual_gdb, 'manual-lldb' => \$opt_manual_lldb, 'boot-gdb' => \$opt_boot_gdb, @@ -1420,7 +1420,7 @@ sub command_line_setup { ); # fix options (that take an optional argument and *only* after = sign - my %fixopt = ( '--gdb' => '--gdb=#' ); + my %fixopt = ( '--gdb' => '--gdb=#', '--client-gdb' => '--client-gdb=#' ); @ARGV = map { $fixopt{$_} or $_ } @ARGV; GetOptions(%options) or usage("Can't read options"); usage("") if $opt_usage; @@ -6064,7 +6064,11 @@ sub gdb_arguments { $input = $input ? "< $input" : ""; if ($type eq 'client') { - mtr_tofile($gdb_init_file, "set args @$$args $input"); + mtr_tofile($gdb_init_file, + join("\n", + "set args @$$args $input", + split /;/, $opt_client_gdb || "" + )); } elsif ($opt_valgrind_mysqld) { my $v = $$exe; my $vargs = []; From c58d2c941c49550a85d610b1ad7b51cf0d4cd8db Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Fri, 22 Jan 2021 16:03:07 +1100 Subject: [PATCH 099/150] MDEV-20939: Race condition between mysqldump import and InnoDB persistent statistics calculation mysqldump --system=stats and --system=timezones by default used ordinary INSERT statements populate EITS, innodb stats, and timezone tables. As these all have primary keys it could result in conflict. The behavior desired with --system= is to replace the tables. As such we assume --replace for the purposes of stats and timezone tables there if --insert-ignore isn't specified. --- client/mysqldump.c | 10 ++++++++-- mysql-test/r/mysqldump-system.result | 20 ++++++++++---------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/client/mysqldump.c b/client/mysqldump.c index 96bfd754d42..2eaec829867 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -4640,7 +4640,7 @@ static int dump_all_servers() static int dump_all_stats() { - my_bool prev_no_create_info; + my_bool prev_no_create_info, prev_opt_replace_into; if (mysql_select_db(mysql, "mysql")) { @@ -4648,6 +4648,8 @@ static int dump_all_stats() return 1; /* If --force */ } fprintf(md_result_file,"\nUSE mysql;\n"); + prev_opt_replace_into= opt_replace_into; + opt_replace_into|= !opt_ignore; prev_no_create_info= opt_no_create_info; opt_no_create_info= 1; /* don't overwrite recreate tables */ /* EITS added in 10.0.1 */ @@ -4666,6 +4668,7 @@ static int dump_all_stats() dump_table("innodb_table_stats", "mysql", NULL, 0); } opt_no_create_info= prev_no_create_info; + opt_replace_into= prev_opt_replace_into; return 0; } @@ -4676,12 +4679,14 @@ static int dump_all_stats() static int dump_all_timezones() { - my_bool opt_prev_no_create_info; + my_bool opt_prev_no_create_info, opt_prev_replace_into; if (mysql_select_db(mysql, "mysql")) { DB_error(mysql, "when selecting the database"); return 1; /* If --force */ } + opt_prev_replace_into= opt_replace_into; + opt_replace_into|= !opt_ignore; opt_prev_no_create_info= opt_no_create_info; opt_no_create_info= 1; fprintf(md_result_file,"\nUSE mysql;\n"); @@ -4691,6 +4696,7 @@ static int dump_all_timezones() dump_table("time_zone_transition", "mysql", NULL, 0); dump_table("time_zone_transition_type", "mysql", NULL, 0); opt_no_create_info= opt_prev_no_create_info; + opt_replace_into= opt_prev_replace_into; return 0; } diff --git a/mysql-test/r/mysqldump-system.result b/mysql-test/r/mysqldump-system.result index 88cf950c621..74cfc4e9882 100644 --- a/mysql-test/r/mysqldump-system.result +++ b/mysql-test/r/mysqldump-system.result @@ -85,31 +85,31 @@ USE mysql; LOCK TABLES `column_stats` WRITE; /*!40000 ALTER TABLE `column_stats` DISABLE KEYS */; -INSERT INTO `column_stats` VALUES ('mysql','tz','Time_zone_id','1','5',0.0000,4.0000,98.2500,0,NULL,NULL); +REPLACE INTO `column_stats` VALUES ('mysql','tz','Time_zone_id','1','5',0.0000,4.0000,98.2500,0,NULL,NULL); /*!40000 ALTER TABLE `column_stats` ENABLE KEYS */; UNLOCK TABLES; LOCK TABLES `index_stats` WRITE; /*!40000 ALTER TABLE `index_stats` DISABLE KEYS */; -INSERT INTO `index_stats` VALUES ('mysql','tz','PRIMARY',1,98.2500); +REPLACE INTO `index_stats` VALUES ('mysql','tz','PRIMARY',1,98.2500); /*!40000 ALTER TABLE `index_stats` ENABLE KEYS */; UNLOCK TABLES; LOCK TABLES `table_stats` WRITE; /*!40000 ALTER TABLE `table_stats` DISABLE KEYS */; -INSERT INTO `table_stats` VALUES ('mysql','tz',393); +REPLACE INTO `table_stats` VALUES ('mysql','tz',393); /*!40000 ALTER TABLE `table_stats` ENABLE KEYS */; UNLOCK TABLES; LOCK TABLES `innodb_index_stats` WRITE; /*!40000 ALTER TABLE `innodb_index_stats` DISABLE KEYS */; -INSERT INTO `innodb_index_stats` VALUES ('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_diff_pfx01',4,1,'Time_zone_id'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_diff_pfx02',393,1,'Time_zone_id,Transition_time'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_leaf_pages',1,NULL,'Number of leaf pages in the index'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','size',1,NULL,'Number of pages in the index'); +REPLACE INTO `innodb_index_stats` VALUES ('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_diff_pfx01',4,1,'Time_zone_id'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_diff_pfx02',393,1,'Time_zone_id,Transition_time'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','n_leaf_pages',1,NULL,'Number of leaf pages in the index'),('mysql','tz','PRIMARY','2019-12-31 21:00:00','size',1,NULL,'Number of pages in the index'); /*!40000 ALTER TABLE `innodb_index_stats` ENABLE KEYS */; UNLOCK TABLES; LOCK TABLES `innodb_table_stats` WRITE; /*!40000 ALTER TABLE `innodb_table_stats` DISABLE KEYS */; -INSERT INTO `innodb_table_stats` VALUES ('mysql','tz','2019-12-31 21:00:00',393,1,0); +REPLACE INTO `innodb_table_stats` VALUES ('mysql','tz','2019-12-31 21:00:00',393,1,0); /*!40000 ALTER TABLE `innodb_table_stats` ENABLE KEYS */; UNLOCK TABLES; @@ -117,31 +117,31 @@ USE mysql; LOCK TABLES `time_zone` WRITE; /*!40000 ALTER TABLE `time_zone` DISABLE KEYS */; -INSERT INTO `time_zone` VALUES (1,'N'),(2,'N'),(3,'N'),(4,'Y'),(5,'N'); +REPLACE INTO `time_zone` VALUES (1,'N'),(2,'N'),(3,'N'),(4,'Y'),(5,'N'); /*!40000 ALTER TABLE `time_zone` ENABLE KEYS */; UNLOCK TABLES; LOCK TABLES `time_zone_name` WRITE; /*!40000 ALTER TABLE `time_zone_name` DISABLE KEYS */; -INSERT INTO `time_zone_name` VALUES ('Europe/Moscow',3),('Japan',5),('leap/Europe/Moscow',4),('MET',1),('Universal',2),('UTC',2); +REPLACE INTO `time_zone_name` VALUES ('Europe/Moscow',3),('Japan',5),('leap/Europe/Moscow',4),('MET',1),('Universal',2),('UTC',2); /*!40000 ALTER TABLE `time_zone_name` ENABLE KEYS */; UNLOCK TABLES; LOCK TABLES `time_zone_leap_second` WRITE; /*!40000 ALTER TABLE `time_zone_leap_second` DISABLE KEYS */; -INSERT INTO `time_zone_leap_second` VALUES (78796800,1),(94694401,2),(126230402,3),(157766403,4),(189302404,5),(220924805,6),(252460806,7),(283996807,8),(315532808,9),(362793609,10),(394329610,11),(425865611,12),(489024012,13),(567993613,14),(631152014,15),(662688015,16),(709948816,17),(741484817,18),(773020818,19),(820454419,20),(867715220,21),(915148821,22); +REPLACE INTO `time_zone_leap_second` VALUES (78796800,1),(94694401,2),(126230402,3),(157766403,4),(189302404,5),(220924805,6),(252460806,7),(283996807,8),(315532808,9),(362793609,10),(394329610,11),(425865611,12),(489024012,13),(567993613,14),(631152014,15),(662688015,16),(709948816,17),(741484817,18),(773020818,19),(820454419,20),(867715220,21),(915148821,22); /*!40000 ALTER TABLE `time_zone_leap_second` ENABLE KEYS */; UNLOCK TABLES; LOCK TABLES `time_zone_transition` WRITE; /*!40000 ALTER TABLE `time_zone_transition` DISABLE KEYS */; -INSERT INTO `time_zone_transition` VALUES (1,-1693706400,0),(1,-1680483600,1),(1,-1663455600,2),(1,-1650150000,3),(1,-1632006000,2),(1,-1618700400,3),(1,-938905200,2),(1,-857257200,3),(1,-844556400,2),(1,-828226800,3),(1,-812502000,2),(1,-796777200,3),(1,228877200,2),(1,243997200,3),(1,260326800,2),(1,276051600,3),(1,291776400,2),(1,307501200,3),(1,323830800,2),(1,338950800,3),(1,354675600,2),(1,370400400,3),(1,386125200,2),(1,401850000,3),(1,417574800,2),(1,433299600,3),(1,449024400,2),(1,465354000,3),(1,481078800,2),(1,496803600,3),(1,512528400,2),(1,528253200,3),(1,543978000,2),(1,559702800,3),(1,575427600,2),(1,591152400,3),(1,606877200,2),(1,622602000,3),(1,638326800,2),(1,654656400,3),(1,670381200,2),(1,686106000,3),(1,701830800,2),(1,717555600,3),(1,733280400,2),(1,749005200,3),(1,764730000,2),(1,780454800,3),(1,796179600,2),(1,811904400,3),(1,828234000,2),(1,846378000,3),(1,859683600,2),(1,877827600,3),(1,891133200,2),(1,909277200,3),(1,922582800,2),(1,941331600,3),(1,954032400,2),(1,972781200,3),(1,985482000,2),(1,1004230800,3),(1,1017536400,2),(1,1035680400,3),(1,1048986000,2),(1,1067130000,3),(1,1080435600,2),(1,1099184400,3),(1,1111885200,2),(1,1130634000,3),(1,1143334800,2),(1,1162083600,3),(1,1174784400,2),(1,1193533200,3),(1,1206838800,2),(1,1224982800,3),(1,1238288400,2),(1,1256432400,3),(1,1269738000,2),(1,1288486800,3),(1,1301187600,2),(1,1319936400,3),(1,1332637200,2),(1,1351386000,3),(1,1364691600,2),(1,1382835600,3),(1,1396141200,2),(1,1414285200,3),(1,1427590800,2),(1,1445734800,3),(1,1459040400,2),(1,1477789200,3),(1,1490490000,2),(1,1509238800,3),(1,1521939600,2),(1,1540688400,3),(1,1553994000,2),(1,1572138000,3),(1,1585443600,2),(1,1603587600,3),(1,1616893200,2),(1,1635642000,3),(1,1648342800,2),(1,1667091600,3),(1,1679792400,2),(1,1698541200,3),(1,1711846800,2),(1,1729990800,3),(1,1743296400,2),(1,1761440400,3),(1,1774746000,2),(1,1792890000,3),(1,1806195600,2),(1,1824944400,3),(1,1837645200,2),(1,1856394000,3),(1,1869094800,2),(1,1887843600,3),(1,1901149200,2),(1,1919293200,3),(1,1932598800,2),(1,1950742800,3),(1,1964048400,2),(1,1982797200,3),(1,1995498000,2),(1,2014246800,3),(1,2026947600,2),(1,2045696400,3),(1,2058397200,2),(1,2077146000,3),(1,2090451600,2),(1,2108595600,3),(1,2121901200,2),(1,2140045200,3),(3,-1688265000,2),(3,-1656819048,1),(3,-1641353448,2),(3,-1627965048,3),(3,-1618716648,1),(3,-1596429048,3),(3,-1593829848,5),(3,-1589860800,4),(3,-1542427200,5),(3,-1539493200,6),(3,-1525323600,5),(3,-1522728000,4),(3,-1491188400,7),(3,-1247536800,4),(3,354920400,5),(3,370728000,4),(3,386456400,5),(3,402264000,4),(3,417992400,5),(3,433800000,4),(3,449614800,5),(3,465346800,8),(3,481071600,9),(3,496796400,8),(3,512521200,9),(3,528246000,8),(3,543970800,9),(3,559695600,8),(3,575420400,9),(3,591145200,8),(3,606870000,9),(3,622594800,8),(3,638319600,9),(3,654649200,8),(3,670374000,10),(3,686102400,11),(3,695779200,8),(3,701812800,5),(3,717534000,4),(3,733273200,9),(3,748998000,8),(3,764722800,9),(3,780447600,8),(3,796172400,9),(3,811897200,8),(3,828226800,9),(3,846370800,8),(3,859676400,9),(3,877820400,8),(3,891126000,9),(3,909270000,8),(3,922575600,9),(3,941324400,8),(3,954025200,9),(3,972774000,8),(3,985474800,9),(3,1004223600,8),(3,1017529200,9),(3,1035673200,8),(3,1048978800,9),(3,1067122800,8),(3,1080428400,9),(3,1099177200,8),(3,1111878000,9),(3,1130626800,8),(3,1143327600,9),(3,1162076400,8),(3,1174777200,9),(3,1193526000,8),(3,1206831600,9),(3,1224975600,8),(3,1238281200,9),(3,1256425200,8),(3,1269730800,9),(3,1288479600,8),(3,1301180400,9),(3,1319929200,8),(3,1332630000,9),(3,1351378800,8),(3,1364684400,9),(3,1382828400,8),(3,1396134000,9),(3,1414278000,8),(3,1427583600,9),(3,1445727600,8),(3,1459033200,9),(3,1477782000,8),(3,1490482800,9),(3,1509231600,8),(3,1521932400,9),(3,1540681200,8),(3,1553986800,9),(3,1572130800,8),(3,1585436400,9),(3,1603580400,8),(3,1616886000,9),(3,1635634800,8),(3,1648335600,9),(3,1667084400,8),(3,1679785200,9),(3,1698534000,8),(3,1711839600,9),(3,1729983600,8),(3,1743289200,9),(3,1761433200,8),(3,1774738800,9),(3,1792882800,8),(3,1806188400,9),(3,1824937200,8),(3,1837638000,9),(3,1856386800,8),(3,1869087600,9),(3,1887836400,8),(3,1901142000,9),(3,1919286000,8),(3,1932591600,9),(3,1950735600,8),(3,1964041200,9),(3,1982790000,8),(3,1995490800,9),(3,2014239600,8),(3,2026940400,9),(3,2045689200,8),(3,2058390000,9),(3,2077138800,8),(3,2090444400,9),(3,2108588400,8),(3,2121894000,9),(3,2140038000,8),(4,-1688265000,2),(4,-1656819048,1),(4,-1641353448,2),(4,-1627965048,3),(4,-1618716648,1),(4,-1596429048,3),(4,-1593829848,5),(4,-1589860800,4),(4,-1542427200,5),(4,-1539493200,6),(4,-1525323600,5),(4,-1522728000,4),(4,-1491188400,7),(4,-1247536800,4),(4,354920409,5),(4,370728010,4),(4,386456410,5),(4,402264011,4),(4,417992411,5),(4,433800012,4),(4,449614812,5),(4,465346812,8),(4,481071612,9),(4,496796413,8),(4,512521213,9),(4,528246013,8),(4,543970813,9),(4,559695613,8),(4,575420414,9),(4,591145214,8),(4,606870014,9),(4,622594814,8),(4,638319615,9),(4,654649215,8),(4,670374016,10),(4,686102416,11),(4,695779216,8),(4,701812816,5),(4,717534017,4),(4,733273217,9),(4,748998018,8),(4,764722818,9),(4,780447619,8),(4,796172419,9),(4,811897219,8),(4,828226820,9),(4,846370820,8),(4,859676420,9),(4,877820421,8),(4,891126021,9),(4,909270021,8),(4,922575622,9),(4,941324422,8),(4,954025222,9),(4,972774022,8),(4,985474822,9),(4,1004223622,8),(4,1017529222,9),(4,1035673222,8),(4,1048978822,9),(4,1067122822,8),(4,1080428422,9),(4,1099177222,8),(4,1111878022,9),(4,1130626822,8),(4,1143327622,9),(4,1162076422,8),(4,1174777222,9),(4,1193526022,8),(4,1206831622,9),(4,1224975622,8),(4,1238281222,9),(4,1256425222,8),(4,1269730822,9),(4,1288479622,8),(4,1301180422,9),(4,1319929222,8),(4,1332630022,9),(4,1351378822,8),(4,1364684422,9),(4,1382828422,8),(4,1396134022,9),(4,1414278022,8),(4,1427583622,9),(4,1445727622,8),(4,1459033222,9),(4,1477782022,8),(4,1490482822,9),(4,1509231622,8),(4,1521932422,9),(4,1540681222,8),(4,1553986822,9),(4,1572130822,8),(4,1585436422,9),(4,1603580422,8),(4,1616886022,9),(4,1635634822,8),(4,1648335622,9),(4,1667084422,8),(4,1679785222,9),(4,1698534022,8),(4,1711839622,9),(4,1729983622,8),(4,1743289222,9),(4,1761433222,8),(4,1774738822,9),(4,1792882822,8),(4,1806188422,9),(4,1824937222,8),(4,1837638022,9),(4,1856386822,8),(4,1869087622,9),(4,1887836422,8),(4,1901142022,9),(4,1919286022,8),(4,1932591622,9),(4,1950735622,8),(4,1964041222,9),(4,1982790022,8),(4,1995490822,9),(4,2014239622,8),(4,2026940422,9),(4,2045689222,8),(4,2058390022,9),(4,2077138822,8),(4,2090444422,9),(4,2108588422,8),(4,2121894022,9),(4,2140038022,8),(5,-1009875600,1); +REPLACE INTO `time_zone_transition` VALUES (1,-1693706400,0),(1,-1680483600,1),(1,-1663455600,2),(1,-1650150000,3),(1,-1632006000,2),(1,-1618700400,3),(1,-938905200,2),(1,-857257200,3),(1,-844556400,2),(1,-828226800,3),(1,-812502000,2),(1,-796777200,3),(1,228877200,2),(1,243997200,3),(1,260326800,2),(1,276051600,3),(1,291776400,2),(1,307501200,3),(1,323830800,2),(1,338950800,3),(1,354675600,2),(1,370400400,3),(1,386125200,2),(1,401850000,3),(1,417574800,2),(1,433299600,3),(1,449024400,2),(1,465354000,3),(1,481078800,2),(1,496803600,3),(1,512528400,2),(1,528253200,3),(1,543978000,2),(1,559702800,3),(1,575427600,2),(1,591152400,3),(1,606877200,2),(1,622602000,3),(1,638326800,2),(1,654656400,3),(1,670381200,2),(1,686106000,3),(1,701830800,2),(1,717555600,3),(1,733280400,2),(1,749005200,3),(1,764730000,2),(1,780454800,3),(1,796179600,2),(1,811904400,3),(1,828234000,2),(1,846378000,3),(1,859683600,2),(1,877827600,3),(1,891133200,2),(1,909277200,3),(1,922582800,2),(1,941331600,3),(1,954032400,2),(1,972781200,3),(1,985482000,2),(1,1004230800,3),(1,1017536400,2),(1,1035680400,3),(1,1048986000,2),(1,1067130000,3),(1,1080435600,2),(1,1099184400,3),(1,1111885200,2),(1,1130634000,3),(1,1143334800,2),(1,1162083600,3),(1,1174784400,2),(1,1193533200,3),(1,1206838800,2),(1,1224982800,3),(1,1238288400,2),(1,1256432400,3),(1,1269738000,2),(1,1288486800,3),(1,1301187600,2),(1,1319936400,3),(1,1332637200,2),(1,1351386000,3),(1,1364691600,2),(1,1382835600,3),(1,1396141200,2),(1,1414285200,3),(1,1427590800,2),(1,1445734800,3),(1,1459040400,2),(1,1477789200,3),(1,1490490000,2),(1,1509238800,3),(1,1521939600,2),(1,1540688400,3),(1,1553994000,2),(1,1572138000,3),(1,1585443600,2),(1,1603587600,3),(1,1616893200,2),(1,1635642000,3),(1,1648342800,2),(1,1667091600,3),(1,1679792400,2),(1,1698541200,3),(1,1711846800,2),(1,1729990800,3),(1,1743296400,2),(1,1761440400,3),(1,1774746000,2),(1,1792890000,3),(1,1806195600,2),(1,1824944400,3),(1,1837645200,2),(1,1856394000,3),(1,1869094800,2),(1,1887843600,3),(1,1901149200,2),(1,1919293200,3),(1,1932598800,2),(1,1950742800,3),(1,1964048400,2),(1,1982797200,3),(1,1995498000,2),(1,2014246800,3),(1,2026947600,2),(1,2045696400,3),(1,2058397200,2),(1,2077146000,3),(1,2090451600,2),(1,2108595600,3),(1,2121901200,2),(1,2140045200,3),(3,-1688265000,2),(3,-1656819048,1),(3,-1641353448,2),(3,-1627965048,3),(3,-1618716648,1),(3,-1596429048,3),(3,-1593829848,5),(3,-1589860800,4),(3,-1542427200,5),(3,-1539493200,6),(3,-1525323600,5),(3,-1522728000,4),(3,-1491188400,7),(3,-1247536800,4),(3,354920400,5),(3,370728000,4),(3,386456400,5),(3,402264000,4),(3,417992400,5),(3,433800000,4),(3,449614800,5),(3,465346800,8),(3,481071600,9),(3,496796400,8),(3,512521200,9),(3,528246000,8),(3,543970800,9),(3,559695600,8),(3,575420400,9),(3,591145200,8),(3,606870000,9),(3,622594800,8),(3,638319600,9),(3,654649200,8),(3,670374000,10),(3,686102400,11),(3,695779200,8),(3,701812800,5),(3,717534000,4),(3,733273200,9),(3,748998000,8),(3,764722800,9),(3,780447600,8),(3,796172400,9),(3,811897200,8),(3,828226800,9),(3,846370800,8),(3,859676400,9),(3,877820400,8),(3,891126000,9),(3,909270000,8),(3,922575600,9),(3,941324400,8),(3,954025200,9),(3,972774000,8),(3,985474800,9),(3,1004223600,8),(3,1017529200,9),(3,1035673200,8),(3,1048978800,9),(3,1067122800,8),(3,1080428400,9),(3,1099177200,8),(3,1111878000,9),(3,1130626800,8),(3,1143327600,9),(3,1162076400,8),(3,1174777200,9),(3,1193526000,8),(3,1206831600,9),(3,1224975600,8),(3,1238281200,9),(3,1256425200,8),(3,1269730800,9),(3,1288479600,8),(3,1301180400,9),(3,1319929200,8),(3,1332630000,9),(3,1351378800,8),(3,1364684400,9),(3,1382828400,8),(3,1396134000,9),(3,1414278000,8),(3,1427583600,9),(3,1445727600,8),(3,1459033200,9),(3,1477782000,8),(3,1490482800,9),(3,1509231600,8),(3,1521932400,9),(3,1540681200,8),(3,1553986800,9),(3,1572130800,8),(3,1585436400,9),(3,1603580400,8),(3,1616886000,9),(3,1635634800,8),(3,1648335600,9),(3,1667084400,8),(3,1679785200,9),(3,1698534000,8),(3,1711839600,9),(3,1729983600,8),(3,1743289200,9),(3,1761433200,8),(3,1774738800,9),(3,1792882800,8),(3,1806188400,9),(3,1824937200,8),(3,1837638000,9),(3,1856386800,8),(3,1869087600,9),(3,1887836400,8),(3,1901142000,9),(3,1919286000,8),(3,1932591600,9),(3,1950735600,8),(3,1964041200,9),(3,1982790000,8),(3,1995490800,9),(3,2014239600,8),(3,2026940400,9),(3,2045689200,8),(3,2058390000,9),(3,2077138800,8),(3,2090444400,9),(3,2108588400,8),(3,2121894000,9),(3,2140038000,8),(4,-1688265000,2),(4,-1656819048,1),(4,-1641353448,2),(4,-1627965048,3),(4,-1618716648,1),(4,-1596429048,3),(4,-1593829848,5),(4,-1589860800,4),(4,-1542427200,5),(4,-1539493200,6),(4,-1525323600,5),(4,-1522728000,4),(4,-1491188400,7),(4,-1247536800,4),(4,354920409,5),(4,370728010,4),(4,386456410,5),(4,402264011,4),(4,417992411,5),(4,433800012,4),(4,449614812,5),(4,465346812,8),(4,481071612,9),(4,496796413,8),(4,512521213,9),(4,528246013,8),(4,543970813,9),(4,559695613,8),(4,575420414,9),(4,591145214,8),(4,606870014,9),(4,622594814,8),(4,638319615,9),(4,654649215,8),(4,670374016,10),(4,686102416,11),(4,695779216,8),(4,701812816,5),(4,717534017,4),(4,733273217,9),(4,748998018,8),(4,764722818,9),(4,780447619,8),(4,796172419,9),(4,811897219,8),(4,828226820,9),(4,846370820,8),(4,859676420,9),(4,877820421,8),(4,891126021,9),(4,909270021,8),(4,922575622,9),(4,941324422,8),(4,954025222,9),(4,972774022,8),(4,985474822,9),(4,1004223622,8),(4,1017529222,9),(4,1035673222,8),(4,1048978822,9),(4,1067122822,8),(4,1080428422,9),(4,1099177222,8),(4,1111878022,9),(4,1130626822,8),(4,1143327622,9),(4,1162076422,8),(4,1174777222,9),(4,1193526022,8),(4,1206831622,9),(4,1224975622,8),(4,1238281222,9),(4,1256425222,8),(4,1269730822,9),(4,1288479622,8),(4,1301180422,9),(4,1319929222,8),(4,1332630022,9),(4,1351378822,8),(4,1364684422,9),(4,1382828422,8),(4,1396134022,9),(4,1414278022,8),(4,1427583622,9),(4,1445727622,8),(4,1459033222,9),(4,1477782022,8),(4,1490482822,9),(4,1509231622,8),(4,1521932422,9),(4,1540681222,8),(4,1553986822,9),(4,1572130822,8),(4,1585436422,9),(4,1603580422,8),(4,1616886022,9),(4,1635634822,8),(4,1648335622,9),(4,1667084422,8),(4,1679785222,9),(4,1698534022,8),(4,1711839622,9),(4,1729983622,8),(4,1743289222,9),(4,1761433222,8),(4,1774738822,9),(4,1792882822,8),(4,1806188422,9),(4,1824937222,8),(4,1837638022,9),(4,1856386822,8),(4,1869087622,9),(4,1887836422,8),(4,1901142022,9),(4,1919286022,8),(4,1932591622,9),(4,1950735622,8),(4,1964041222,9),(4,1982790022,8),(4,1995490822,9),(4,2014239622,8),(4,2026940422,9),(4,2045689222,8),(4,2058390022,9),(4,2077138822,8),(4,2090444422,9),(4,2108588422,8),(4,2121894022,9),(4,2140038022,8),(5,-1009875600,1); /*!40000 ALTER TABLE `time_zone_transition` ENABLE KEYS */; UNLOCK TABLES; LOCK TABLES `time_zone_transition_type` WRITE; /*!40000 ALTER TABLE `time_zone_transition_type` DISABLE KEYS */; -INSERT INTO `time_zone_transition_type` VALUES (1,0,7200,1,'MEST'),(1,1,3600,0,'MET'),(1,2,7200,1,'MEST'),(1,3,3600,0,'MET'),(2,0,0,0,'UTC'),(3,0,9000,0,'MMT'),(3,1,12648,1,'MST'),(3,2,9048,0,'MMT'),(3,3,16248,1,'MDST'),(3,4,10800,0,'MSK'),(3,5,14400,1,'MSD'),(3,6,18000,1,'MSD'),(3,7,7200,0,'EET'),(3,8,10800,0,'MSK'),(3,9,14400,1,'MSD'),(3,10,10800,1,'EEST'),(3,11,7200,0,'EET'),(4,0,9000,0,'MMT'),(4,1,12648,1,'MST'),(4,2,9048,0,'MMT'),(4,3,16248,1,'MDST'),(4,4,10800,0,'MSK'),(4,5,14400,1,'MSD'),(4,6,18000,1,'MSD'),(4,7,7200,0,'EET'),(4,8,10800,0,'MSK'),(4,9,14400,1,'MSD'),(4,10,10800,1,'EEST'),(4,11,7200,0,'EET'),(5,0,32400,0,'CJT'),(5,1,32400,0,'JST'); +REPLACE INTO `time_zone_transition_type` VALUES (1,0,7200,1,'MEST'),(1,1,3600,0,'MET'),(1,2,7200,1,'MEST'),(1,3,3600,0,'MET'),(2,0,0,0,'UTC'),(3,0,9000,0,'MMT'),(3,1,12648,1,'MST'),(3,2,9048,0,'MMT'),(3,3,16248,1,'MDST'),(3,4,10800,0,'MSK'),(3,5,14400,1,'MSD'),(3,6,18000,1,'MSD'),(3,7,7200,0,'EET'),(3,8,10800,0,'MSK'),(3,9,14400,1,'MSD'),(3,10,10800,1,'EEST'),(3,11,7200,0,'EET'),(4,0,9000,0,'MMT'),(4,1,12648,1,'MST'),(4,2,9048,0,'MMT'),(4,3,16248,1,'MDST'),(4,4,10800,0,'MSK'),(4,5,14400,1,'MSD'),(4,6,18000,1,'MSD'),(4,7,7200,0,'EET'),(4,8,10800,0,'MSK'),(4,9,14400,1,'MSD'),(4,10,10800,1,'EEST'),(4,11,7200,0,'EET'),(5,0,32400,0,'CJT'),(5,1,32400,0,'JST'); /*!40000 ALTER TABLE `time_zone_transition_type` ENABLE KEYS */; UNLOCK TABLES; /*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; From cc2d6d1bb26cdcd649357ec23cd54dd28ddefaf3 Mon Sep 17 00:00:00 2001 From: Daniel Black Date: Mon, 25 Jan 2021 18:12:34 +1100 Subject: [PATCH 100/150] MDEV-20939: Race condition between mysqldump import and InnoDB persistent Update mysqldump man page for --system={stats,timezones} --- man/mysqldump.1 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/man/mysqldump.1 b/man/mysqldump.1 index a7defd8bf47..2cfe308836a 100644 --- a/man/mysqldump.1 +++ b/man/mysqldump.1 @@ -2261,7 +2261,7 @@ servers \- remote (federated) servers as \fBCREATE SERVER\fR\&. .sp -1 .IP \(bu 2.3 .\} -stats \- statistics tables, InnoDB and Engine Independent Table Statistics (EITS), are dumped as \fBINSERT\fR/\fBREPLACE INTO\fR statements without (re)creating tables\&. +stats \- statistics tables, InnoDB and Engine Independent Table Statistics (EITS), are dumped as \fBREPLACE INTO\fR (or \fBINSERT IGNORE\fR if \fB\-\-insert\-into\fR is specified) statements without (re)creating tables\&. .RE .RS 4 .ie n \{\ @@ -2271,7 +2271,7 @@ stats \- statistics tables, InnoDB and Engine Independent Table Statistics (EITS .sp -1 .IP \(bu 2.3 .\} -timezones \- timezone related system tables dumped as \fBINSERT\fR/\fBREPLACE INTO\fR statements without (re)creating tables\&. +timezones \- timezone related system tables dumped as \fBREPLACE INTO\fR (or \fBINSERT IGNORE\fR if \fB\-\-insert\-into\fR is specified) statements without (re)creating tables\&. .RE .sp The format of the output is affected by \fB\-\-replace\fR and \fB\-\-insert\-into\fR\&. The \fB\-\-replace\fR option will output \fBCREATE OR REPLACE\fR From 8cdeee177d353e60f4a9cdebe6c851f9505d84b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Wed, 20 Jan 2021 09:38:20 +0200 Subject: [PATCH 101/150] MDEV-24509 : Warning: Memory not freed: 56 on SET @@global.wsrep_sst_auth It seems that memory is not freed when updated value is NULL or when wsrep is not initialized before shutdown. --- .../suite/galera/r/galera_var_sst_auth.result | 16 ++++++---- .../suite/galera/t/galera_var_sst_auth.test | 29 ++++++++++++++++--- sql/mysqld.cc | 4 +++ sql/wsrep_sst.cc | 6 ++++ 4 files changed, 46 insertions(+), 9 deletions(-) diff --git a/mysql-test/suite/galera/r/galera_var_sst_auth.result b/mysql-test/suite/galera/r/galera_var_sst_auth.result index 1db83197870..89a27dce4f6 100644 --- a/mysql-test/suite/galera/r/galera_var_sst_auth.result +++ b/mysql-test/suite/galera/r/galera_var_sst_auth.result @@ -1,6 +1,3 @@ -# -# MDEV-10492: Assertion failure on shutdown when wsrep_sst_auth set in config -# SELECT @@global.wsrep_sst_auth; @@global.wsrep_sst_auth ******** @@ -8,5 +5,14 @@ SET @@global.wsrep_sst_auth='foo:bar'; SELECT @@global.wsrep_sst_auth; @@global.wsrep_sst_auth ******** -disconnect node_2; -disconnect node_1; +connection node_2; +SET @@global.wsrep_sst_auth= 'abcdefghijklmnopqrstuvwxyz'; +SELECT @@global.wsrep_sst_auth; +@@global.wsrep_sst_auth +******** +Shutdown node_2 +connection node_1; +connection node_2; +SELECT @@global.wsrep_sst_auth; +@@global.wsrep_sst_auth +******** diff --git a/mysql-test/suite/galera/t/galera_var_sst_auth.test b/mysql-test/suite/galera/t/galera_var_sst_auth.test index 5c9b3f5a61e..ad7f46620ad 100644 --- a/mysql-test/suite/galera/t/galera_var_sst_auth.test +++ b/mysql-test/suite/galera/t/galera_var_sst_auth.test @@ -1,12 +1,33 @@ --source include/galera_cluster.inc --source include/have_innodb.inc ---echo # ---echo # MDEV-10492: Assertion failure on shutdown when wsrep_sst_auth set in config ---echo # +# +# MDEV-10492: Assertion failure on shutdown when wsrep_sst_auth set in config +# SELECT @@global.wsrep_sst_auth; SET @@global.wsrep_sst_auth='foo:bar'; SELECT @@global.wsrep_sst_auth; ---source include/galera_end.inc +# +# MDEV-24509 Warning: Memory not freed: 56 on SET @@global.wsrep_sst_auth +# +--connection node_2 +SET @@global.wsrep_sst_auth= 'abcdefghijklmnopqrstuvwxyz'; +SELECT @@global.wsrep_sst_auth; +--echo Shutdown node_2 +--source include/shutdown_mysqld.inc + +# On node_1, verify that the node has left the cluster. +--connection node_1 +--let $wait_condition = SELECT VARIABLE_VALUE = 1 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +--source include/wait_condition.inc + +# Restart node_2 +--connection node_2 +--source include/start_mysqld.inc +--let $wait_condition = SELECT VARIABLE_VALUE = 2 FROM INFORMATION_SCHEMA.GLOBAL_STATUS WHERE VARIABLE_NAME = 'wsrep_cluster_size'; +--source include/wait_condition.inc +SELECT @@global.wsrep_sst_auth; + + diff --git a/sql/mysqld.cc b/sql/mysqld.cc index c0bf69b38b9..55ed5a6a680 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -2011,8 +2011,11 @@ static void __cdecl kill_server(int sig_ptr) close_connections(); +#ifdef WITH_WSREP if (wsrep_inited == 1) wsrep_deinit(true); + wsrep_sst_auth_free(); +#endif /* WITH_WSREP */ if (sig != MYSQL_KILL_SIGNAL && sig != 0) @@ -2132,6 +2135,7 @@ extern "C" void unireg_abort(int exit_code) /* In bootstrap mode we deinitialize wsrep here. */ if (opt_bootstrap && wsrep_inited) wsrep_deinit(true); + wsrep_sst_auth_free(); } #endif // WITH_WSREP diff --git a/sql/wsrep_sst.cc b/sql/wsrep_sst.cc index 65a3f971c62..e12a26efbb6 100644 --- a/sql/wsrep_sst.cc +++ b/sql/wsrep_sst.cc @@ -149,6 +149,12 @@ static bool sst_auth_real_set (const char* value) if (wsrep_sst_auth) { my_free((void*) wsrep_sst_auth); } wsrep_sst_auth= my_strdup(WSREP_SST_AUTH_MASK, MYF(0)); } + else + { + if (wsrep_sst_auth) { my_free((void*) wsrep_sst_auth); } + wsrep_sst_auth= NULL; + } + return 0; } return 1; From c207f04eccf2a1f35e6bac2b8146f6c5e0643857 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 13 Jan 2021 18:36:48 +0100 Subject: [PATCH 102/150] MDEV-21785: sequences used as default by other table not dumped in right order by mysqldump Dump sequences first. This atch made to keep it small and to keep number of queries to the server the same. Order of tables in a dump can not be changed (except sequences first) because mysql_list_tables uses SHOW TABLES and I used SHOW FULL TABLES. --- client/mysqldump.c | 79 +++++++++++++++---- .../suite/sql_sequence/mysqldump.result | 42 ++++++++++ mysql-test/suite/sql_sequence/mysqldump.test | 11 +++ 3 files changed, 115 insertions(+), 17 deletions(-) diff --git a/client/mysqldump.c b/client/mysqldump.c index a964f96437d..e8601e5f7d5 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -42,6 +42,11 @@ /* on merge conflict, bump to a higher version again */ #define DUMP_VERSION "10.19" +/** + First mysql version supporting sequences. +*/ +#define FIRST_SEQUENCE_VERSION 100300 + #include #include #include @@ -92,6 +97,11 @@ /* Max length GTID position that we will output. */ #define MAX_GTID_LENGTH 1024 +/* Dump sequence/tables control */ +#define DUMP_TABLE_ALL -1 +#define DUMP_TABLE_TABLE 0 +#define DUMP_TABLE_SEQUENCE 1 + static my_bool ignore_table_data(const uchar *hash_key, size_t len); static void add_load_option(DYNAMIC_STRING *str, const char *option, const char *option_value); @@ -3876,14 +3886,6 @@ static void dump_table(const char *table, const char *db, const uchar *hash_key, MYSQL_ROW row; DBUG_ENTER("dump_table"); - /* - Check does table has a sequence structure and if has apply different sql queries - */ - if (check_if_ignore_table(table, table_type) & IGNORE_SEQUENCE_TABLE) - { - get_sequence_structure(table, db); - DBUG_VOID_RETURN; - } /* Make sure you get the create table info before the following check for --no-data flag below. Otherwise, the create table info won't be printed. @@ -4368,18 +4370,36 @@ err: } /* dump_table */ -static char *getTableName(int reset) +static char *getTableName(int reset, int want_sequences) { MYSQL_ROW row; if (!get_table_name_result) { - if (!(get_table_name_result= mysql_list_tables(mysql,NullS))) - return(NULL); + if (mysql_get_server_version(mysql) >= FIRST_SEQUENCE_VERSION) + { + const char *query= "SHOW FULL TABLES"; + if (mysql_query_with_error_report(mysql, 0, query)) + return (NULL); + + if (!(get_table_name_result= mysql_store_result(mysql))) + return (NULL); + } + else + { + if (!(get_table_name_result= mysql_list_tables(mysql,NullS))) + return(NULL); + } } if ((row= mysql_fetch_row(get_table_name_result))) - return((char*) row[0]); + { + if (want_sequences != DUMP_TABLE_ALL) + while (row && MY_TEST(strcmp(row[1], "SEQUENCE")) == want_sequences) + row= mysql_fetch_row(get_table_name_result); + if (row) + return((char*) row[0]); + } if (reset) mysql_data_seek(get_table_name_result,0); /* We want to read again */ else @@ -5312,7 +5332,7 @@ static int dump_all_tables_in_db(char *database) { DYNAMIC_STRING query; init_dynamic_string_checked(&query, "LOCK TABLES ", 256, 1024); - for (numrows= 0 ; (table= getTableName(1)) ; ) + for (numrows= 0 ; (table= getTableName(1, DUMP_TABLE_ALL)) ; ) { char *end= strmov(afterdot, table); if (include_table((uchar*) hash_key,end - hash_key)) @@ -5346,7 +5366,19 @@ static int dump_all_tables_in_db(char *database) DBUG_RETURN(1); } } - while ((table= getTableName(0))) + + if (mysql_get_server_version(mysql) >= FIRST_SEQUENCE_VERSION && + !opt_no_create_info) + { + // First process sequences + while ((table= getTableName(1, DUMP_TABLE_SEQUENCE))) + { + char *end= strmov(afterdot, table); + if (include_table((uchar*) hash_key, end - hash_key)) + get_sequence_structure(table, database); + } + } + while ((table= getTableName(0, DUMP_TABLE_TABLE))) { char *end= strmov(afterdot, table); if (include_table((uchar*) hash_key, end - hash_key)) @@ -5495,7 +5527,7 @@ static my_bool dump_all_views_in_db(char *database) { DYNAMIC_STRING query; init_dynamic_string_checked(&query, "LOCK TABLES ", 256, 1024); - for (numrows= 0 ; (table= getTableName(1)); ) + for (numrows= 0 ; (table= getTableName(1, DUMP_TABLE_TABLE)); ) { char *end= strmov(afterdot, table); if (include_table((uchar*) hash_key,end - hash_key)) @@ -5518,7 +5550,7 @@ static my_bool dump_all_views_in_db(char *database) else verbose_msg("-- dump_all_views_in_db : logs flushed successfully!\n"); } - while ((table= getTableName(0))) + while ((table= getTableName(0, DUMP_TABLE_TABLE))) { char *end= strmov(afterdot, table); if (include_table((uchar*) hash_key, end - hash_key)) @@ -5648,7 +5680,7 @@ static int get_sys_var_lower_case_table_names() static int dump_selected_tables(char *db, char **table_names, int tables) { - char table_buff[NAME_LEN*2+3]; + char table_buff[NAME_LEN*2+3], table_type[NAME_LEN]; DYNAMIC_STRING lock_tables_query; char **dump_tables, **pos, **end; int lower_case_table_names; @@ -5745,9 +5777,22 @@ static int dump_selected_tables(char *db, char **table_names, int tables) DBUG_RETURN(1); } } + + if (mysql_get_server_version(mysql) >= FIRST_SEQUENCE_VERSION) + { + /* Dump Sequence first */ + for (pos= dump_tables; pos < end; pos++) + { + DBUG_PRINT("info",("Dumping sequence(?) %s", *pos)); + if (check_if_ignore_table(*pos, table_type) & IGNORE_SEQUENCE_TABLE) + get_sequence_structure(*pos, db); + } + } /* Dump each selected table */ for (pos= dump_tables; pos < end; pos++) { + if (check_if_ignore_table(*pos, table_type) & IGNORE_SEQUENCE_TABLE) + continue; DBUG_PRINT("info",("Dumping table %s", *pos)); dump_table(*pos, db, NULL, 0); if (opt_dump_triggers && diff --git a/mysql-test/suite/sql_sequence/mysqldump.result b/mysql-test/suite/sql_sequence/mysqldump.result index e6aedb57ea6..fb023cc5e36 100644 --- a/mysql-test/suite/sql_sequence/mysqldump.result +++ b/mysql-test/suite/sql_sequence/mysqldump.result @@ -2,8 +2,11 @@ CREATE SEQUENCE a1 engine=aria; CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024; insert into t1 values (1),(2); CREATE SEQUENCE x1 engine=innodb; +# dump whole database CREATE SEQUENCE `a1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=Aria; SELECT SETVAL(`a1`, 1, 0); +CREATE SEQUENCE `x1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB; +SELECT SETVAL(`x1`, 1, 0); /*!40101 SET @saved_cs_client = @@character_set_client */; /*!40101 SET character_set_client = utf8 */; CREATE TABLE `t1` ( @@ -12,8 +15,47 @@ CREATE TABLE `t1` ( ) ENGINE=MyISAM DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1024; /*!40101 SET character_set_client = @saved_cs_client */; INSERT INTO `t1` VALUES (1),(2); +# dump by tables order 1 +CREATE SEQUENCE `a1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=Aria; +SELECT SETVAL(`a1`, 1, 0); CREATE SEQUENCE `x1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB; SELECT SETVAL(`x1`, 1, 0); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1024; +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `t1` VALUES (1),(2); +# dump by tables order 2 +CREATE SEQUENCE `a1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=Aria; +SELECT SETVAL(`a1`, 1, 0); +CREATE SEQUENCE `x1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB; +SELECT SETVAL(`x1`, 1, 0); +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1024; +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `t1` VALUES (1),(2); +# dump by tables only tables +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `t1` ( + `a` int(11) DEFAULT NULL, + KEY `a` (`a`) +) ENGINE=MyISAM DEFAULT CHARSET=latin1 KEY_BLOCK_SIZE=1024; +/*!40101 SET character_set_client = @saved_cs_client */; +INSERT INTO `t1` VALUES (1),(2); +# dump by tables only sequences +CREATE SEQUENCE `a1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=Aria; +SELECT SETVAL(`a1`, 1, 0); +CREATE SEQUENCE `x1` start with 1 minvalue 1 maxvalue 9223372036854775806 increment by 1 cache 1000 nocycle ENGINE=InnoDB; +SELECT SETVAL(`x1`, 1, 0); +# end of dumps DROP TABLE a1,t1,x1; set default_storage_engine=InnoDB; create sequence t1; diff --git a/mysql-test/suite/sql_sequence/mysqldump.test b/mysql-test/suite/sql_sequence/mysqldump.test index 308f06d5e8d..d2afb2fd675 100644 --- a/mysql-test/suite/sql_sequence/mysqldump.test +++ b/mysql-test/suite/sql_sequence/mysqldump.test @@ -11,7 +11,18 @@ CREATE SEQUENCE a1 engine=aria; CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024; insert into t1 values (1),(2); CREATE SEQUENCE x1 engine=innodb; +--echo # dump whole database --exec $MYSQL_DUMP --compact test +--echo # dump by tables order 1 +--exec $MYSQL_DUMP --compact --tables test t1 a1 x1 +--echo # dump by tables order 2 +--exec $MYSQL_DUMP --compact --tables test a1 t1 x1 +--echo # dump by tables only tables +--exec $MYSQL_DUMP --compact --tables test t1 +--echo # dump by tables only sequences +--exec $MYSQL_DUMP --compact --tables test a1 x1 +--echo # end of dumps + DROP TABLE a1,t1,x1; # From 1398160a719394cff3e7e4ee214f51375e8825a1 Mon Sep 17 00:00:00 2001 From: Aleksey Midenkov Date: Tue, 26 Jan 2021 14:41:23 +0300 Subject: [PATCH 103/150] MDEV-24522 Assertion `inited==NONE' fails upon UPDATE on versioned table with unique blob Cause: no table->update_handler cloned at the moment of vers_insert_history_row(). update_handler is needed because there can't be several inited indexes at once in the same handler. First index is inited by QUICK_RANGE_SELECT::reset(). Then when history row is inserted check_duplicate_long_entry_key() is done and it requires another index. --- mysql-test/suite/versioning/r/update.result | 16 ++++++++++++++ mysql-test/suite/versioning/t/update.test | 24 +++++++++++++++++++++ sql/handler.h | 2 +- sql/sql_update.cc | 2 ++ 4 files changed, 43 insertions(+), 1 deletion(-) diff --git a/mysql-test/suite/versioning/r/update.result b/mysql-test/suite/versioning/r/update.result index fbb9f541b06..da893432749 100644 --- a/mysql-test/suite/versioning/r/update.result +++ b/mysql-test/suite/versioning/r/update.result @@ -399,3 +399,19 @@ a check_row(row_start, row_end) 1 HISTORICAL ROW 1 CURRENT ROW drop tables t1, t2, t3; +# +# MDEV-24522 Assertion `inited==NONE' fails upon UPDATE on versioned table with unique blob + +create table t1 (a int, b int, c text, unique(c), key (b)) engine=myisam with system versioning; +insert into t1 values (1, 1, 'foo'), (2, 11, 'bar'); +update t1 set a = 3 where b <= 9; +update t1 set a = 3 where b <= 10; +drop table t1; +create table t1 (a int, b int, c text, unique(c), key (b)) engine=myisam with system versioning; +create table t2 (a int, b int, c text, unique(c), key (b)) engine=myisam with system versioning; +insert into t1 values (1, 1, 'foo'), (2, 11, 'bar'); +insert into t2 values (1, 1, 'foo'), (2, 11, 'bar'); +update t1 set a = 3 where b <= 9; +update t2 set a = 3 where b <= 9; +update t1, t2 set t1.a = 3, t2.a = 3 where t1.b <= 10 and t2.b <= 10 and t1.b = t2.b; +drop tables t1, t2; diff --git a/mysql-test/suite/versioning/t/update.test b/mysql-test/suite/versioning/t/update.test index 7f99e307942..47a56a71bd3 100644 --- a/mysql-test/suite/versioning/t/update.test +++ b/mysql-test/suite/versioning/t/update.test @@ -326,4 +326,28 @@ select *, check_row(row_start, row_end) from t2 for system_time all order by row # cleanup drop tables t1, t2, t3; +--echo # +--echo # MDEV-24522 Assertion `inited==NONE' fails upon UPDATE on versioned table with unique blob +--echo +create table t1 (a int, b int, c text, unique(c), key (b)) engine=myisam with system versioning; +insert into t1 values (1, 1, 'foo'), (2, 11, 'bar'); + +update t1 set a = 3 where b <= 9; +update t1 set a = 3 where b <= 10; + +# cleanup +drop table t1; + +create table t1 (a int, b int, c text, unique(c), key (b)) engine=myisam with system versioning; +create table t2 (a int, b int, c text, unique(c), key (b)) engine=myisam with system versioning; +insert into t1 values (1, 1, 'foo'), (2, 11, 'bar'); +insert into t2 values (1, 1, 'foo'), (2, 11, 'bar'); + +update t1 set a = 3 where b <= 9; +update t2 set a = 3 where b <= 9; +update t1, t2 set t1.a = 3, t2.a = 3 where t1.b <= 10 and t2.b <= 10 and t1.b = t2.b; + +# cleanup +drop tables t1, t2; + source suite/versioning/common_finish.inc; diff --git a/sql/handler.h b/sql/handler.h index 0c8be2154a9..5dd3f6f5c5e 100644 --- a/sql/handler.h +++ b/sql/handler.h @@ -3191,7 +3191,7 @@ public: { cached_table_flags= table_flags(); } - /* ha_ methods: pubilc wrappers for private virtual API */ + /* ha_ methods: public wrappers for private virtual API */ int ha_open(TABLE *table, const char *name, int mode, uint test_if_locked, MEM_ROOT *mem_root= 0, List *partitions_to_open=NULL); diff --git a/sql/sql_update.cc b/sql/sql_update.cc index 7230c9c5f60..d64a96f5070 100644 --- a/sql/sql_update.cc +++ b/sql/sql_update.cc @@ -1108,6 +1108,7 @@ update_begin: { store_record(table, record[2]); table->mark_columns_per_binlog_row_image(); + table->clone_handler_for_update(); error= vers_insert_history_row(table); restore_record(table, record[2]); if (unlikely(error)) @@ -2599,6 +2600,7 @@ error: if (has_vers_fields && table->versioned(VERS_TIMESTAMP)) { store_record(table, record[2]); + table->clone_handler_for_update(); if (unlikely(error= vers_insert_history_row(table))) { restore_record(table, record[2]); From 900a14754a64388a79e02b52cd32dea013189868 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Tue, 26 Jan 2021 14:21:33 +0200 Subject: [PATCH 104/150] Fix wsrep.variables --- mysql-test/suite/wsrep/r/variables.result | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mysql-test/suite/wsrep/r/variables.result b/mysql-test/suite/wsrep/r/variables.result index 1c427b34d2b..9ef1b3290af 100644 --- a/mysql-test/suite/wsrep/r/variables.result +++ b/mysql-test/suite/wsrep/r/variables.result @@ -142,7 +142,7 @@ SELECT @@global.wsrep_sst_auth; SET @@global.wsrep_sst_auth= ''; SELECT @@global.wsrep_sst_auth; @@global.wsrep_sst_auth - +NULL SET @@global.wsrep_sst_auth= NULL; SELECT @@global.wsrep_sst_auth; @@global.wsrep_sst_auth From 21809f9a450df1bc44cef36377f96b516ac4a9ae Mon Sep 17 00:00:00 2001 From: Nikita Malyavin Date: Tue, 29 Dec 2020 13:38:16 +1000 Subject: [PATCH 105/150] MDEV-17556 Assertion `bitmap_is_set_all(&table->s->all_set)' failed The assertion failed in handler::ha_reset upon SELECT under READ UNCOMMITTED from table with index on virtual column. This was the debug-only failure, though the problem is mush wider: * MY_BITMAP is a structure containing my_bitmap_map, the latter is a raw bitmap. * read_set, write_set and vcol_set of TABLE are the pointers to MY_BITMAP * The rest of MY_BITMAPs are stored in TABLE and TABLE_SHARE * The pointers to the stored MY_BITMAPs, like orig_read_set etc, and sometimes all_set and tmp_set, are assigned to the pointers. * Sometimes tmp_use_all_columns is used to substitute the raw bitmap directly with all_set.bitmap * Sometimes even bitmaps are directly modified, like in TABLE::update_virtual_field(): bitmap_clear_all(&tmp_set) is called. The last three bullets in the list, when used together (which is mostly always) make the program flow cumbersome and impossible to follow, notwithstanding the errors they cause, like this MDEV-17556, where tmp_set pointer was assigned to read_set, write_set and vcol_set, then its bitmap was substituted with all_set.bitmap by dbug_tmp_use_all_columns() call, and then bitmap_clear_all(&tmp_set) was applied to all this. To untangle this knot, the rule should be applied: * Never substitute bitmaps! This patch is about this. orig_*, all_set bitmaps are never substituted already. This patch changes the following function prototypes: * tmp_use_all_columns, dbug_tmp_use_all_columns to accept MY_BITMAP** and to return MY_BITMAP * instead of my_bitmap_map* * tmp_restore_column_map, dbug_tmp_restore_column_maps to accept MY_BITMAP* instead of my_bitmap_map* These functions now will substitute read_set/write_set/vcol_set directly, and won't touch underlying bitmaps. --- plugin/feedback/sender_thread.cc | 2 +- sql/field.cc | 9 ++- sql/ha_partition.cc | 11 ++-- sql/item.cc | 8 +-- sql/item_cmpfunc.cc | 8 +-- sql/key.cc | 9 ++- sql/log_event.cc | 6 +- sql/opt_range.cc | 33 +++++----- sql/partition_info.cc | 4 +- sql/protocol.cc | 6 +- sql/sql_handler.cc | 5 +- sql/sql_select.cc | 30 +++++----- sql/sql_select.h | 18 +++--- sql/sql_sequence.cc | 13 ++-- sql/sql_show.cc | 7 +-- sql/sql_statistics.cc | 5 +- sql/table.h | 38 ++++++------ storage/archive/ha_archive.cc | 4 +- storage/cassandra/ha_cassandra.cc | 42 ++++++------- storage/connect/ha_connect.cc | 23 ++++--- storage/csv/ha_tina.cc | 9 ++- storage/federated/ha_federated.cc | 19 +++--- storage/federatedx/ha_federatedx.cc | 19 +++--- storage/innobase/handler/ha_innodb.cc | 8 +-- storage/innobase/handler/handler0alter.cc | 4 +- .../mroonga/lib/mrn_debug_column_access.cpp | 4 +- .../mroonga/lib/mrn_debug_column_access.hpp | 2 +- storage/oqgraph/ha_oqgraph.cc | 10 ++-- storage/perfschema/pfs_engine_table.cc | 23 +++---- storage/rocksdb/ha_rocksdb.cc | 11 ++-- storage/rocksdb/rdb_datadic.cc | 6 +- storage/sequence/sequence.cc | 4 +- storage/sphinx/ha_sphinx.cc | 4 +- storage/spider/ha_spider.cc | 12 ++-- storage/spider/spd_db_conn.cc | 60 +++++++++---------- storage/spider/spd_db_mysql.cc | 17 +++--- storage/tokudb/ha_tokudb.cc | 16 ++--- 37 files changed, 244 insertions(+), 265 deletions(-) diff --git a/plugin/feedback/sender_thread.cc b/plugin/feedback/sender_thread.cc index 943443f60fe..9a8e7775d01 100644 --- a/plugin/feedback/sender_thread.cc +++ b/plugin/feedback/sender_thread.cc @@ -47,7 +47,7 @@ static int table_to_string(TABLE *table, String *result) res= table->file->ha_rnd_init(1); - dbug_tmp_use_all_columns(table, table->read_set); + dbug_tmp_use_all_columns(table, &table->read_set); while(!res && !table->file->ha_rnd_next(table->record[0])) { diff --git a/sql/field.cc b/sql/field.cc index 571f4bfb1e5..76d9a3ccf95 100644 --- a/sql/field.cc +++ b/sql/field.cc @@ -7976,11 +7976,10 @@ uint Field_varstring::get_key_image(uchar *buff, uint length, { String val; uint local_char_length; - my_bitmap_map *old_map; - old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); val_str(&val, &val); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); local_char_length= val.charpos(length / field_charset->mbmaxlen); if (local_char_length < val.length()) @@ -11496,7 +11495,7 @@ key_map Field::get_possible_keys() bool Field::validate_value_in_record_with_warn(THD *thd, const uchar *record) { - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); bool rc; if ((rc= validate_value_in_record(thd, record))) { @@ -11508,7 +11507,7 @@ bool Field::validate_value_in_record_with_warn(THD *thd, const uchar *record) ER_THD(thd, ER_INVALID_DEFAULT_VALUE_FOR_FIELD), ErrConvString(&tmp).ptr(), field_name.str); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); return rc; } diff --git a/sql/ha_partition.cc b/sql/ha_partition.cc index 88385900e5e..bf9d6af997f 100644 --- a/sql/ha_partition.cc +++ b/sql/ha_partition.cc @@ -4278,7 +4278,7 @@ int ha_partition::write_row(uchar * buf) int error; longlong func_value; bool have_auto_increment= table->next_number_field && buf == table->record[0]; - my_bitmap_map *old_map; + MY_BITMAP *old_map; THD *thd= ha_thd(); sql_mode_t saved_sql_mode= thd->variables.sql_mode; bool saved_auto_inc_field_not_null= table->auto_increment_field_not_null; @@ -4320,9 +4320,9 @@ int ha_partition::write_row(uchar * buf) } } - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); if (unlikely(error)) { m_part_info->err_value= func_value; @@ -11191,13 +11191,12 @@ int ha_partition::bulk_update_row(const uchar *old_data, const uchar *new_data, int error= 0; uint32 part_id; longlong func_value; - my_bitmap_map *old_map; DBUG_ENTER("ha_partition::bulk_update_row"); - old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); error= m_part_info->get_partition_id(m_part_info, &part_id, &func_value); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); if (unlikely(error)) { m_part_info->err_value= func_value; diff --git a/sql/item.cc b/sql/item.cc index a9f34787f86..cd81aca7e37 100644 --- a/sql/item.cc +++ b/sql/item.cc @@ -1624,9 +1624,9 @@ int Item::save_in_field_no_warnings(Field *field, bool no_conversions) Sql_mode_save sql_mode(thd); thd->variables.sql_mode&= ~(MODE_NO_ZERO_IN_DATE | MODE_NO_ZERO_DATE); thd->variables.sql_mode|= MODE_INVALID_DATES; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); res= save_in_field(field, no_conversions); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return res; } @@ -6051,7 +6051,7 @@ bool Item_field::fix_fields(THD *thd, Item **reference) Field *from_field= (Field *)not_found_field; bool outer_fixed= false; SELECT_LEX *select= thd->lex->current_select; - + if (select && select->in_tvc) { my_error(ER_FIELD_REFERENCE_IN_TVC, MYF(0), full_name()); @@ -6947,7 +6947,7 @@ Item *Item_string::make_odbc_literal(THD *thd, const LEX_CSTRING *typestr) } -static int save_int_value_in_field (Field *field, longlong nr, +static int save_int_value_in_field (Field *field, longlong nr, bool null_value, bool unsigned_flag) { if (null_value) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index df5521f2a1a..ebb1c480abb 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -345,13 +345,13 @@ static bool convert_const_to_int(THD *thd, Item_field *field_item, TABLE *table= field->table; Sql_mode_save sql_mode(thd); Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE); - my_bitmap_map *old_maps[2] = { NULL, NULL }; + MY_BITMAP *old_maps[2] = { NULL, NULL }; ulonglong UNINIT_VAR(orig_field_val); /* original field value if valid */ /* table->read_set may not be set if we come here from a CREATE TABLE */ if (table && table->read_set) dbug_tmp_use_all_columns(table, old_maps, - table->read_set, table->write_set); + &table->read_set, &table->write_set); /* For comparison purposes allow invalid dates like 2000-01-32 */ thd->variables.sql_mode= (thd->variables.sql_mode & ~MODE_NO_ZERO_DATE) | MODE_INVALID_DATES; @@ -392,7 +392,7 @@ static bool convert_const_to_int(THD *thd, Item_field *field_item, DBUG_ASSERT(!result); } if (table && table->read_set) - dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_maps); + dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_maps); } return result; } @@ -3101,7 +3101,7 @@ bool Item_func_decode_oracle::fix_length_and_dec() /* Aggregate all THEN and ELSE expression types and collations when string result - + @param THD - current thd @param start - an element in args to start aggregating from */ diff --git a/sql/key.cc b/sql/key.cc index adff6975631..6f0a1112497 100644 --- a/sql/key.cc +++ b/sql/key.cc @@ -244,14 +244,13 @@ void key_restore(uchar *to_record, const uchar *from_key, KEY *key_info, else if (key_part->key_part_flag & HA_VAR_LENGTH_PART) { Field *field= key_part->field; - my_bitmap_map *old_map; my_ptrdiff_t ptrdiff= to_record - field->table->record[0]; field->move_field_offset(ptrdiff); key_length-= HA_KEY_BLOB_LENGTH; length= MY_MIN(key_length, key_part->length); - old_map= dbug_tmp_use_all_columns(field->table, field->table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(field->table, &field->table->write_set); field->set_key_image(from_key, length); - dbug_tmp_restore_column_map(field->table->write_set, old_map); + dbug_tmp_restore_column_map(&field->table->write_set, old_map); from_key+= HA_KEY_BLOB_LENGTH; field->move_field_offset(-ptrdiff); } @@ -419,7 +418,7 @@ void field_unpack(String *to, Field *field, const uchar *rec, uint max_length, void key_unpack(String *to, TABLE *table, KEY *key) { - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); DBUG_ENTER("key_unpack"); to->length(0); @@ -443,7 +442,7 @@ void key_unpack(String *to, TABLE *table, KEY *key) field_unpack(to, key_part->field, table->record[0], key_part->length, MY_TEST(key_part->key_part_flag & HA_PART_KEY_SEG)); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_VOID_RETURN; } diff --git a/sql/log_event.cc b/sql/log_event.cc index e344fc8894f..d072402a00b 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -13699,11 +13699,11 @@ int Rows_log_event::update_sequence() /* This event come from a setval function executed on the master. Update the sequence next_number and round, like we do with setval() */ - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, - table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, + &table->read_set); longlong nextval= table->field[NEXT_FIELD_NO]->val_int(); longlong round= table->field[ROUND_FIELD_NO]->val_int(); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); return table->s->sequence->set_value(table, nextval, round, 0) > 0; } diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 2204500e3b5..30c74799b6f 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -3264,8 +3264,7 @@ bool calculate_cond_selectivity_for_table(THD *thd, TABLE *table, Item **cond) void store_key_image_to_rec(Field *field, uchar *ptr, uint len) { - /* Do the same as print_key() does */ - my_bitmap_map *old_map; + /* Do the same as print_key() does */ if (field->real_maybe_null()) { @@ -3277,10 +3276,10 @@ void store_key_image_to_rec(Field *field, uchar *ptr, uint len) field->set_notnull(); ptr++; } - old_map= dbug_tmp_use_all_columns(field->table, - field->table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(field->table, + &field->table->write_set); field->set_key_image(ptr, len); - dbug_tmp_restore_column_map(field->table->write_set, old_map); + dbug_tmp_restore_column_map(&field->table->write_set, old_map); } #ifdef WITH_PARTITION_STORAGE_ENGINE @@ -3495,7 +3494,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond) PART_PRUNE_PARAM prune_param; MEM_ROOT alloc; RANGE_OPT_PARAM *range_par= &prune_param.range_param; - my_bitmap_map *old_sets[2]; + MY_BITMAP *old_sets[2]; prune_param.part_info= part_info; init_sql_alloc(&alloc, "prune_partitions", @@ -3512,7 +3511,7 @@ bool prune_partitions(THD *thd, TABLE *table, Item *pprune_cond) } dbug_tmp_use_all_columns(table, old_sets, - table->read_set, table->write_set); + &table->read_set, &table->write_set); range_par->thd= thd; range_par->table= table; /* range_par->cond doesn't need initialization */ @@ -3609,7 +3608,7 @@ all_used: retval= FALSE; // some partitions are used mark_all_partitions_as_used(prune_param.part_info); end: - dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets); + dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets); thd->no_errors=0; thd->mem_root= range_par->old_root; free_root(&alloc,MYF(0)); // Return memory & allocator @@ -14852,8 +14851,8 @@ static void print_sel_arg_key(Field *field, const uchar *key, String *out) { TABLE *table= field->table; - my_bitmap_map *old_sets[2]; - dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set); + MY_BITMAP *old_sets[2]; + dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set); if (field->real_maybe_null()) { @@ -14873,7 +14872,7 @@ print_sel_arg_key(Field *field, const uchar *key, String *out) field->val_str(out); end: - dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets); + dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets); } @@ -14968,9 +14967,9 @@ print_key(KEY_PART *key_part, const uchar *key, uint used_length) const uchar *key_end= key+used_length; uint store_length; TABLE *table= key_part->field->table; - my_bitmap_map *old_sets[2]; + MY_BITMAP *old_sets[2]; - dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set); + dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set); for (; key < key_end; key+=store_length, key_part++) { @@ -14997,7 +14996,7 @@ print_key(KEY_PART *key_part, const uchar *key, uint used_length) if (key+store_length < key_end) fputc('/',DBUG_FILE); } - dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets); + dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets); } @@ -15005,16 +15004,16 @@ static void print_quick(QUICK_SELECT_I *quick, const key_map *needed_reg) { char buf[MAX_KEY/8+1]; TABLE *table; - my_bitmap_map *old_sets[2]; + MY_BITMAP *old_sets[2]; DBUG_ENTER("print_quick"); if (!quick) DBUG_VOID_RETURN; DBUG_LOCK_FILE; table= quick->head; - dbug_tmp_use_all_columns(table, old_sets, table->read_set, table->write_set); + dbug_tmp_use_all_columns(table, old_sets, &table->read_set, &table->write_set); quick->dbug_dump(0, TRUE); - dbug_tmp_restore_column_maps(table->read_set, table->write_set, old_sets); + dbug_tmp_restore_column_maps(&table->read_set, &table->write_set, old_sets); fprintf(DBUG_FILE,"other_keys: 0x%s:\n", needed_reg->print(buf)); diff --git a/sql/partition_info.cc b/sql/partition_info.cc index 9f08964e62c..a8459438be7 100644 --- a/sql/partition_info.cc +++ b/sql/partition_info.cc @@ -1449,13 +1449,13 @@ void partition_info::print_no_partition_found(TABLE *table_arg, myf errflag) buf_ptr= (char*)"from column_list"; else { - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table_arg, table_arg->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table_arg, &table_arg->read_set); if (part_expr->null_value) buf_ptr= (char*)"NULL"; else longlong10_to_str(err_value, buf, part_expr->unsigned_flag ? 10 : -10); - dbug_tmp_restore_column_map(table_arg->read_set, old_map); + dbug_tmp_restore_column_map(&table_arg->read_set, old_map); } my_error(ER_NO_PARTITION_FOR_GIVEN_VALUE, errflag, buf_ptr); } diff --git a/sql/protocol.cc b/sql/protocol.cc index 3f4e251403b..26cc686ad0a 100644 --- a/sql/protocol.cc +++ b/sql/protocol.cc @@ -1251,15 +1251,15 @@ bool Protocol_text::store(Field *field) CHARSET_INFO *tocs= this->thd->variables.character_set_results; #ifdef DBUG_ASSERT_EXISTS TABLE *table= field->table; - my_bitmap_map *old_map= 0; + MY_BITMAP *old_map= 0; if (table->file) - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); #endif field->val_str(&str); #ifdef DBUG_ASSERT_EXISTS if (old_map) - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); #endif return store_string_aux(str.ptr(), str.length(), str.charset(), tocs); diff --git a/sql/sql_handler.cc b/sql/sql_handler.cc index 9fdfd612c96..967b85496ac 100644 --- a/sql/sql_handler.cc +++ b/sql/sql_handler.cc @@ -689,7 +689,6 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler, for (keypart_map= key_len=0 ; (item=it_ke++) ; key_part++) { - my_bitmap_map *old_map; /* note that 'item' can be changed by fix_fields() call */ if (item->fix_fields_if_needed_for_scalar(thd, it_ke.ref())) return 1; @@ -701,9 +700,9 @@ mysql_ha_fix_cond_and_key(SQL_HANDLER *handler, } if (!in_prepare) { - old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); (void) item->save_in_field(key_part->field, 1); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); } key_len+= key_part->store_length; keypart_map= (keypart_map << 1) | 1; diff --git a/sql/sql_select.cc b/sql/sql_select.cc index 8baac124b71..7e084cafa05 100644 --- a/sql/sql_select.cc +++ b/sql/sql_select.cc @@ -1770,7 +1770,7 @@ JOIN::optimize_inner() join->optimization_state == JOIN::OPTIMIZATION_PHASE_1_DONE && join->with_two_phase_optimization) continue; - /* + /* Do not push conditions from where into materialized inner tables of outer joins: this is not valid. */ @@ -1981,7 +1981,7 @@ setup_subq_exit: if (with_two_phase_optimization) optimization_state= JOIN::OPTIMIZATION_PHASE_1_DONE; else - { + { if (optimize_stage2()) DBUG_RETURN(1); } @@ -2001,11 +2001,11 @@ int JOIN::optimize_stage2() if (unlikely(thd->check_killed())) DBUG_RETURN(1); - + /* Generate an execution plan from the found optimal join order. */ if (get_best_combination()) DBUG_RETURN(1); - + if (select_lex->handle_derived(thd->lex, DT_OPTIMIZE)) DBUG_RETURN(1); @@ -3625,7 +3625,7 @@ bool JOIN::setup_subquery_caches() if (tmp_having) { DBUG_ASSERT(having == NULL); - if (!(tmp_having= + if (!(tmp_having= tmp_having->transform(thd, &Item::expr_cache_insert_transformer, NULL))) @@ -6476,7 +6476,7 @@ update_ref_and_keys(THD *thd, DYNAMIC_ARRAY *keyuse,JOIN_TAB *join_tab, Special treatment for ft-keys. */ -bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse, +bool sort_and_filter_keyuse(THD *thd, DYNAMIC_ARRAY *keyuse, bool skip_unprefixed_keyparts) { KEYUSE key_end, *prev, *save_pos, *use; @@ -7499,7 +7499,7 @@ best_access_path(JOIN *join, pos->loosescan_picker.loosescan_key= MAX_KEY; pos->use_join_buffer= best_uses_jbuf; pos->spl_plan= spl_plan; - + loose_scan_opt.save_to_position(s, loose_scan_pos); if (!best_key && @@ -9592,7 +9592,7 @@ bool JOIN::check_two_phase_optimization(THD *thd) return true; return false; } - + bool JOIN::inject_cond_into_where(Item *injected_cond) { @@ -9623,7 +9623,7 @@ bool JOIN::inject_cond_into_where(Item *injected_cond) and_args->push_back(elem, thd->mem_root); } } - + return false; } @@ -17252,7 +17252,7 @@ Field *create_tmp_field(THD *thd, TABLE *table,Item *item, Item::Type type, item->maybe_null= save_maybe_null; result->field_name= orig_item->name; } - } + } else if (table_cant_handle_bit_fields && field->field->type() == MYSQL_TYPE_BIT) { @@ -23314,7 +23314,7 @@ bool cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref) { Check_level_instant_set check_level_save(thd, CHECK_FIELD_IGNORE); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); bool result= 0; for (store_key **copy=ref->key_copy ; *copy ; copy++) @@ -23325,7 +23325,7 @@ cp_buffer_from_ref(THD *thd, TABLE *table, TABLE_REF *ref) break; } } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return result; } @@ -24997,7 +24997,7 @@ bool JOIN::rollup_init() { if (!(rollup.null_items[i]= new (thd->mem_root) Item_null_result(thd))) return true; - + List *rollup_fields= &rollup.fields[i]; rollup_fields->empty(); rollup.ref_pointer_arrays[i]= Ref_ptr_array(ref_array, all_fields.elements); @@ -25505,7 +25505,7 @@ bool JOIN_TAB::save_explain_data(Explain_table_access *eta, { JOIN_TAB *ctab= bush_children->start; /* table */ - size_t len= my_snprintf(table_name_buffer, + size_t len= my_snprintf(table_name_buffer, sizeof(table_name_buffer)-1, "", ctab->emb_sj_nest->sj_subq_pred->get_identifier()); @@ -26631,7 +26631,7 @@ void TABLE_LIST::print(THD *thd, table_map eliminated_tables, String *str, void st_select_lex::print(THD *thd, String *str, enum_query_type query_type) { DBUG_ASSERT(thd); - + if (tvc) { tvc->print(thd, str, query_type); diff --git a/sql/sql_select.h b/sql/sql_select.h index d207363a9ba..06cc86b5710 100644 --- a/sql/sql_select.h +++ b/sql/sql_select.h @@ -1920,8 +1920,8 @@ class store_key_field: public store_key enum store_key_result copy_inner() { TABLE *table= copy_field.to_field->table; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, - table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, + &table->write_set); /* It looks like the next statement is needed only for a simplified @@ -1932,7 +1932,7 @@ class store_key_field: public store_key bzero(copy_field.to_ptr,copy_field.to_length); copy_field.do_copy(©_field); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); null_key= to_field->is_null(); return err != 0 ? STORE_KEY_FATAL : STORE_KEY_OK; } @@ -1967,8 +1967,8 @@ public: enum store_key_result copy_inner() { TABLE *table= to_field->table; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, - table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, + &table->write_set); int res= FALSE; /* @@ -1989,7 +1989,7 @@ public: */ if (!res && table->in_use->is_error()) res= 1; /* STORE_KEY_FATAL */ - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); null_key= to_field->is_null() || item->null_value; return ((err != 0 || res < 0 || res > 2) ? STORE_KEY_FATAL : (store_key_result) res); @@ -2025,8 +2025,8 @@ protected: { inited=1; TABLE *table= to_field->table; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, - table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, + &table->write_set); if ((res= item->save_in_field(to_field, 1))) { if (!err) @@ -2038,7 +2038,7 @@ protected: */ if (!err && to_field->table->in_use->is_error()) err= 1; /* STORE_KEY_FATAL */ - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); } null_key= to_field->is_null() || item->null_value; return (err > 2 ? STORE_KEY_FATAL : (store_key_result) err); diff --git a/sql/sql_sequence.cc b/sql/sql_sequence.cc index ffdb4b54c16..11125c0a619 100644 --- a/sql/sql_sequence.cc +++ b/sql/sql_sequence.cc @@ -136,7 +136,7 @@ bool sequence_definition::check_and_adjust(bool set_reserved_until) void sequence_definition::read_fields(TABLE *table) { - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); reserved_until= table->field[0]->val_int(); min_value= table->field[1]->val_int(); max_value= table->field[2]->val_int(); @@ -145,7 +145,7 @@ void sequence_definition::read_fields(TABLE *table) cache= table->field[5]->val_int(); cycle= table->field[6]->val_int(); round= table->field[7]->val_int(); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); used_fields= ~(uint) 0; print_dbug(); } @@ -157,7 +157,7 @@ void sequence_definition::read_fields(TABLE *table) void sequence_definition::store_fields(TABLE *table) { - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); /* zero possible delete markers & null bits */ memcpy(table->record[0], table->s->default_values, table->s->null_bytes); @@ -170,7 +170,7 @@ void sequence_definition::store_fields(TABLE *table) table->field[6]->store((longlong) cycle != 0, 0); table->field[7]->store((longlong) round, 1); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); print_dbug(); } @@ -527,12 +527,11 @@ int SEQUENCE::read_initial_values(TABLE *table) int SEQUENCE::read_stored_values(TABLE *table) { int error; - my_bitmap_map *save_read_set; DBUG_ENTER("SEQUENCE::read_stored_values"); - save_read_set= tmp_use_all_columns(table, table->read_set); + MY_BITMAP *save_read_set= tmp_use_all_columns(table, &table->read_set); error= table->file->ha_read_first_row(table->record[0], MAX_KEY); - tmp_restore_column_map(table->read_set, save_read_set); + tmp_restore_column_map(&table->read_set, save_read_set); if (unlikely(error)) { diff --git a/sql/sql_show.cc b/sql/sql_show.cc index 9a5141fa414..2bb52788dcd 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -2139,7 +2139,6 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, !foreign_db_mode; bool check_options= !(sql_mode & MODE_IGNORE_BAD_TABLE_OPTIONS) && !create_info_arg; - my_bitmap_map *old_map; handlerton *hton; int error= 0; DBUG_ENTER("show_create_table"); @@ -2206,7 +2205,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, We have to restore the read_set if we are called from insert in case of row based replication. */ - old_map= tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set); bool not_the_first_field= false; for (ptr=table->field ; (field= *ptr); ptr++) @@ -2492,7 +2491,7 @@ int show_create_table(THD *thd, TABLE_LIST *table_list, String *packet, } } #endif - tmp_restore_column_map(table->read_set, old_map); + tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(error); } @@ -5824,7 +5823,7 @@ static bool print_anchor_data_type(const Spvar_definition *def, Let's print it according to the current sql_mode. It will make output in line with the value in mysql.proc.param_list, so both I_S.XXX.DTD_IDENTIFIER and mysql.proc.param_list use the same notation: - default or Oracle, according to the sql_mode at the SP creation time. + default or Oracle, according to the sql_mode at the SP creation time. The caller must make sure to set thd->variables.sql_mode to the routine sql_mode. */ static bool print_anchor_dtd_identifier(THD *thd, const Spvar_definition *def, diff --git a/sql/sql_statistics.cc b/sql/sql_statistics.cc index a84e2449a55..13532f5f171 100644 --- a/sql/sql_statistics.cc +++ b/sql/sql_statistics.cc @@ -1043,9 +1043,8 @@ public: { char buff[MAX_FIELD_WIDTH]; String val(buff, sizeof(buff), &my_charset_bin); - my_bitmap_map *old_map; - old_map= dbug_tmp_use_all_columns(stat_table, stat_table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(stat_table, &stat_table->read_set); for (uint i= COLUMN_STAT_MIN_VALUE; i <= COLUMN_STAT_HISTOGRAM; i++) { Field *stat_field= stat_table->field[i]; @@ -1103,7 +1102,7 @@ public: } } } - dbug_tmp_restore_column_map(stat_table->read_set, old_map); + dbug_tmp_restore_column_map(&stat_table->read_set, old_map); } diff --git a/sql/table.h b/sql/table.h index 1134610284b..bcad713b06c 100644 --- a/sql/table.h +++ b/sql/table.h @@ -2987,25 +2987,25 @@ typedef struct st_open_table_list{ } OPEN_TABLE_LIST; -static inline my_bitmap_map *tmp_use_all_columns(TABLE *table, - MY_BITMAP *bitmap) +static inline MY_BITMAP *tmp_use_all_columns(TABLE *table, + MY_BITMAP **bitmap) { - my_bitmap_map *old= bitmap->bitmap; - bitmap->bitmap= table->s->all_set.bitmap; + MY_BITMAP *old= *bitmap; + *bitmap= &table->s->all_set; return old; } -static inline void tmp_restore_column_map(MY_BITMAP *bitmap, - my_bitmap_map *old) +static inline void tmp_restore_column_map(MY_BITMAP **bitmap, + MY_BITMAP *old) { - bitmap->bitmap= old; + *bitmap= old; } /* The following is only needed for debugging */ -static inline my_bitmap_map *dbug_tmp_use_all_columns(TABLE *table, - MY_BITMAP *bitmap) +static inline MY_BITMAP *dbug_tmp_use_all_columns(TABLE *table, + MY_BITMAP **bitmap) { #ifdef DBUG_ASSERT_EXISTS return tmp_use_all_columns(table, bitmap); @@ -3014,8 +3014,8 @@ static inline my_bitmap_map *dbug_tmp_use_all_columns(TABLE *table, #endif } -static inline void dbug_tmp_restore_column_map(MY_BITMAP *bitmap, - my_bitmap_map *old) +static inline void dbug_tmp_restore_column_map(MY_BITMAP **bitmap, + MY_BITMAP *old) { #ifdef DBUG_ASSERT_EXISTS tmp_restore_column_map(bitmap, old); @@ -3028,22 +3028,22 @@ static inline void dbug_tmp_restore_column_map(MY_BITMAP *bitmap, Provide for the possiblity of the read set being the same as the write set */ static inline void dbug_tmp_use_all_columns(TABLE *table, - my_bitmap_map **save, - MY_BITMAP *read_set, - MY_BITMAP *write_set) + MY_BITMAP **save, + MY_BITMAP **read_set, + MY_BITMAP **write_set) { #ifdef DBUG_ASSERT_EXISTS - save[0]= read_set->bitmap; - save[1]= write_set->bitmap; + save[0]= *read_set; + save[1]= *write_set; (void) tmp_use_all_columns(table, read_set); (void) tmp_use_all_columns(table, write_set); #endif } -static inline void dbug_tmp_restore_column_maps(MY_BITMAP *read_set, - MY_BITMAP *write_set, - my_bitmap_map **old) +static inline void dbug_tmp_restore_column_maps(MY_BITMAP **read_set, + MY_BITMAP **write_set, + MY_BITMAP **old) { #ifdef DBUG_ASSERT_EXISTS tmp_restore_column_map(read_set, old[0]); diff --git a/storage/archive/ha_archive.cc b/storage/archive/ha_archive.cc index c25a1c00c87..f540bbaa4f1 100644 --- a/storage/archive/ha_archive.cc +++ b/storage/archive/ha_archive.cc @@ -1547,7 +1547,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) share->rows_recorded= 0; stats.auto_increment_value= 1; share->archive_write.auto_increment= 0; - my_bitmap_map *org_bitmap= tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= tmp_use_all_columns(table, &table->read_set); while (!(rc= get_row(&archive, table->record[0]))) { @@ -1568,7 +1568,7 @@ int ha_archive::optimize(THD* thd, HA_CHECK_OPT* check_opt) } } - tmp_restore_column_map(table->read_set, org_bitmap); + tmp_restore_column_map(&table->read_set, org_bitmap); share->rows_recorded= (ha_rows)writer.rows; } diff --git a/storage/cassandra/ha_cassandra.cc b/storage/cassandra/ha_cassandra.cc index 4f9e207235f..56af4bfd40a 100644 --- a/storage/cassandra/ha_cassandra.cc +++ b/storage/cassandra/ha_cassandra.cc @@ -1641,18 +1641,18 @@ int ha_cassandra::index_read_map(uchar *buf, const uchar *key, char *cass_key; int cass_key_len; - my_bitmap_map *old_map; + MY_BITMAP *old_map; - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); if (rowkey_converter->mariadb_to_cassandra(&cass_key, &cass_key_len)) { /* We get here when making lookups like uuid_column='not-an-uuid' */ - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_KEY_NOT_FOUND); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); bool found; if (se->get_slice(cass_key, cass_key_len, &found)) @@ -1726,8 +1726,8 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk) cassandra_to_mariadb() calls will use field->store(...) methods, which require that the column is in the table->write_set */ - my_bitmap_map *old_map; - old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map; + old_map= dbug_tmp_use_all_columns(table, &table->write_set); /* Start with all fields being NULL */ for (field= table->field + 1; *field; field++) @@ -1848,7 +1848,7 @@ int ha_cassandra::read_cassandra_columns(bool unpack_pk) } err: - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return res; } @@ -1933,7 +1933,7 @@ void ha_cassandra::free_dynamic_row(DYNAMIC_COLUMN_VALUE **vals, int ha_cassandra::write_row(uchar *buf) { - my_bitmap_map *old_map; + MY_BITMAP *old_map; int ires; DBUG_ENTER("ha_cassandra::write_row"); @@ -1943,7 +1943,7 @@ int ha_cassandra::write_row(uchar *buf) if (!doing_insert_batch) se->clear_insert_buffer(); - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); insert_lineno++; @@ -1954,7 +1954,7 @@ int ha_cassandra::write_row(uchar *buf) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), rowkey_converter->field->field_name.str, insert_lineno); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } se->start_row_insert(cass_key, cass_key_len); @@ -1977,7 +1977,7 @@ int ha_cassandra::write_row(uchar *buf) free_dynamic_row(&vals, &names); if (rc) { - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(rc); } } @@ -1988,7 +1988,7 @@ int ha_cassandra::write_row(uchar *buf) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), field_converters[i]->field->field_name.str, insert_lineno); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } se->add_insert_column(field_converters[i]->field->field_name.str, 0, @@ -1996,7 +1996,7 @@ int ha_cassandra::write_row(uchar *buf) } } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); bool res; @@ -2263,8 +2263,8 @@ bool ha_cassandra::mrr_start_read() { uint key_len; - my_bitmap_map *old_map; - old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map; + old_map= dbug_tmp_use_all_columns(table, &table->read_set); se->new_lookup_keys(); @@ -2288,7 +2288,7 @@ bool ha_cassandra::mrr_start_read() break; } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); return se->multiget_slice(); } @@ -2366,7 +2366,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data) LEX_STRING *oldnames, *names; uint oldcount, count; String oldvalcol, valcol; - my_bitmap_map *old_map; + MY_BITMAP *old_map; int res; DBUG_ENTER("ha_cassandra::update_row"); /* Currently, it is guaranteed that new_data == table->record[0] */ @@ -2374,7 +2374,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data) /* For now, just rewrite the full record */ se->clear_insert_buffer(); - old_map= dbug_tmp_use_all_columns(table, table->read_set); + old_map= dbug_tmp_use_all_columns(table, &table->read_set); char *old_key; int old_key_len; @@ -2387,7 +2387,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), rowkey_converter->field->field_name.str, insert_lineno); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } @@ -2450,7 +2450,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data) { my_error(ER_WARN_DATA_OUT_OF_RANGE, MYF(0), field_converters[i]->field->field_name.str, insert_lineno); - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); DBUG_RETURN(HA_ERR_INTERNAL_ERROR); } se->add_insert_column(field_converters[i]->field->field_name.str, 0, @@ -2477,7 +2477,7 @@ int ha_cassandra::update_row(const uchar *old_data, const uchar *new_data) } } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); res= se->do_insert(); diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 74951090787..f7117af030d 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -1387,7 +1387,7 @@ PCSZ ha_connect::GetStringOption(PCSZ opname, PCSZ sdef) PTOS options= GetTableOptionStruct(); if (!stricmp(opname, "Connect")) { - LEX_CSTRING cnc= (tshp) ? tshp->connect_string + LEX_CSTRING cnc= (tshp) ? tshp->connect_string : table->s->connect_string; if (cnc.length) @@ -2157,7 +2157,6 @@ int ha_connect::MakeRecord(char *buf) int rc= 0; Field* *field; Field *fp; - my_bitmap_map *org_bitmap; CHARSET_INFO *charset= tdbp->data_charset(); //MY_BITMAP readmap; MY_BITMAP *map; @@ -2172,7 +2171,7 @@ int ha_connect::MakeRecord(char *buf) *table->def_read_set.bitmap, *table->def_write_set.bitmap); // Avoid asserts in field::store() for columns that are not updated - org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set); // This is for variable_length rows memset(buf, 0, table->s->null_bytes); @@ -2199,7 +2198,7 @@ int ha_connect::MakeRecord(char *buf) continue; htrc("Column %s not found\n", fp->field_name.str); - dbug_tmp_restore_column_map(table->write_set, org_bitmap); + dbug_tmp_restore_column_map(&table->write_set, org_bitmap); DBUG_RETURN(HA_ERR_WRONG_IN_RECORD); } // endif colp @@ -2259,7 +2258,7 @@ int ha_connect::MakeRecord(char *buf) sprintf(buf, "Out of range value %.140s for column '%s' at row %ld", value->GetCharString(val), - fp->field_name.str, + fp->field_name.str, thd->get_stmt_da()->current_row_for_warning()); push_warning(thd, Sql_condition::WARN_LEVEL_WARN, 0, buf); @@ -2282,7 +2281,7 @@ int ha_connect::MakeRecord(char *buf) memcpy(buf, table->record[0], table->s->stored_rec_length); // This is copied from ha_tina and is necessary to avoid asserts - dbug_tmp_restore_column_map(table->write_set, org_bitmap); + dbug_tmp_restore_column_map(&table->write_set, org_bitmap); DBUG_RETURN(rc); } // end of MakeRecord @@ -2302,7 +2301,7 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *) //PTDBASE tp= (PTDBASE)tdbp; String attribute(attr_buffer, sizeof(attr_buffer), table->s->table_charset); - my_bitmap_map *bmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *bmap= dbug_tmp_use_all_columns(table, &table->read_set); const CHARSET_INFO *charset= tdbp->data_charset(); String data_charset_value(data_buffer, sizeof(data_buffer), charset); @@ -2424,7 +2423,7 @@ int ha_connect::ScanRecord(PGLOBAL g, const uchar *) } // endfor field err: - dbug_tmp_restore_column_map(table->read_set, bmap); + dbug_tmp_restore_column_map(&table->read_set, bmap); return rc; } // end of ScanRecord @@ -2472,7 +2471,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, OPVAL op; Field *fp; const key_range *ranges[2]; - my_bitmap_map *old_map; + MY_BITMAP *old_map; KEY *kfp; KEY_PART_INFO *kpart; @@ -2489,7 +2488,7 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, both= ranges[0] && ranges[1]; kfp= &table->key_info[active_index]; - old_map= dbug_tmp_use_all_columns(table, table->write_set); + old_map= dbug_tmp_use_all_columns(table, &table->write_set); for (i= 0; i <= 1; i++) { if (ranges[i] == NULL) @@ -2584,11 +2583,11 @@ bool ha_connect::MakeKeyWhere(PGLOBAL g, PSTRG qry, OPVAL vop, char q, if ((oom= qry->IsTruncated())) strcpy(g->Message, "Out of memory"); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return oom; err: - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return true; } // end of MakeKeyWhere diff --git a/storage/csv/ha_tina.cc b/storage/csv/ha_tina.cc index 3a9bbc2766f..86b9d853424 100644 --- a/storage/csv/ha_tina.cc +++ b/storage/csv/ha_tina.cc @@ -528,7 +528,7 @@ int ha_tina::encode_quote(const uchar *buf) String attribute(attribute_buffer, sizeof(attribute_buffer), &my_charset_bin); bool ietf_quotes= table_share->option_struct->ietf_quotes; - my_bitmap_map *org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set); buffer.length(0); for (Field **field=table->field ; *field ; field++) @@ -606,7 +606,7 @@ int ha_tina::encode_quote(const uchar *buf) //buffer.replace(buffer.length(), 0, "\n", 1); - dbug_tmp_restore_column_map(table->read_set, org_bitmap); + dbug_tmp_restore_column_map(&table->read_set, org_bitmap); return (buffer.length()); } @@ -659,7 +659,6 @@ int ha_tina::find_current_row(uchar *buf) { my_off_t end_offset, curr_offset= current_position; int eoln_len; - my_bitmap_map *org_bitmap; int error; bool read_all; bool ietf_quotes= table_share->option_struct->ietf_quotes; @@ -679,7 +678,7 @@ int ha_tina::find_current_row(uchar *buf) /* We must read all columns in case a table is opened for update */ read_all= !bitmap_is_clear_all(table->write_set); /* Avoid asserts in ::store() for columns that are not going to be updated */ - org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set); error= HA_ERR_CRASHED_ON_USAGE; memset(buf, 0, table->s->null_bytes); @@ -857,7 +856,7 @@ int ha_tina::find_current_row(uchar *buf) error= 0; err: - dbug_tmp_restore_column_map(table->write_set, org_bitmap); + dbug_tmp_restore_column_map(&table->write_set, org_bitmap); DBUG_RETURN(error); } diff --git a/storage/federated/ha_federated.cc b/storage/federated/ha_federated.cc index 79e70ce5123..0190ad80fca 100644 --- a/storage/federated/ha_federated.cc +++ b/storage/federated/ha_federated.cc @@ -936,7 +936,7 @@ uint ha_federated::convert_row_to_internal_format(uchar *record, { ulong *lengths; Field **field; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); DBUG_ENTER("ha_federated::convert_row_to_internal_format"); lengths= mysql_fetch_lengths(result); @@ -965,7 +965,7 @@ uint ha_federated::convert_row_to_internal_format(uchar *record, } (*field)->move_field_offset(-old_ptr); } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(0); } @@ -1293,14 +1293,13 @@ bool ha_federated::create_where_from_key(String *to, char tmpbuff[FEDERATED_QUERY_BUFFER_SIZE]; String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info); const key_range *ranges[2]= { start_key, end_key }; - my_bitmap_map *old_map; DBUG_ENTER("ha_federated::create_where_from_key"); tmp.length(0); if (start_key == NULL && end_key == NULL) DBUG_RETURN(1); - old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); for (uint i= 0; i <= 1; i++) { bool needs_quotes; @@ -1477,7 +1476,7 @@ prepare_for_next_key_part: tmp.c_ptr_quick())); } } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); if (both_not_null) if (tmp.append(STRING_WITH_LEN(") "))) @@ -1492,7 +1491,7 @@ prepare_for_next_key_part: DBUG_RETURN(0); err: - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(1); } @@ -1841,7 +1840,7 @@ int ha_federated::write_row(uchar *buf) String insert_field_value_string(insert_field_value_buffer, sizeof(insert_field_value_buffer), &my_charset_bin); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); DBUG_ENTER("ha_federated::write_row"); values_string.length(0); @@ -1895,7 +1894,7 @@ int ha_federated::write_row(uchar *buf) values_string.append(STRING_WITH_LEN(", ")); } } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); /* if there were no fields, we don't want to add a closing paren @@ -2203,7 +2202,7 @@ int ha_federated::update_row(const uchar *old_data, const uchar *new_data) else { /* otherwise = */ - my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set); bool needs_quote= (*field)->str_needs_quotes(); (*field)->val_str(&field_value); if (needs_quote) @@ -2212,7 +2211,7 @@ int ha_federated::update_row(const uchar *old_data, const uchar *new_data) if (needs_quote) update_string.append(value_quote_char); field_value.length(0); - tmp_restore_column_map(table->read_set, old_map); + tmp_restore_column_map(&table->read_set, old_map); } update_string.append(STRING_WITH_LEN(", ")); } diff --git a/storage/federatedx/ha_federatedx.cc b/storage/federatedx/ha_federatedx.cc index 18756cff17c..9bf66d00f78 100644 --- a/storage/federatedx/ha_federatedx.cc +++ b/storage/federatedx/ha_federatedx.cc @@ -862,7 +862,7 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record, ulong *lengths; Field **field; int column= 0; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); Time_zone *saved_time_zone= table->in_use->variables.time_zone; DBUG_ENTER("ha_federatedx::convert_row_to_internal_format"); @@ -891,7 +891,7 @@ uint ha_federatedx::convert_row_to_internal_format(uchar *record, (*field)->move_field_offset(-old_ptr); } table->in_use->variables.time_zone= saved_time_zone; - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(0); } @@ -1220,7 +1220,6 @@ bool ha_federatedx::create_where_from_key(String *to, String tmp(tmpbuff, sizeof(tmpbuff), system_charset_info); const key_range *ranges[2]= { start_key, end_key }; Time_zone *saved_time_zone= table->in_use->variables.time_zone; - my_bitmap_map *old_map; DBUG_ENTER("ha_federatedx::create_where_from_key"); tmp.length(0); @@ -1228,7 +1227,7 @@ bool ha_federatedx::create_where_from_key(String *to, DBUG_RETURN(1); table->in_use->variables.time_zone= UTC; - old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); for (uint i= 0; i <= 1; i++) { bool needs_quotes; @@ -1404,7 +1403,7 @@ prepare_for_next_key_part: tmp.c_ptr_quick())); } } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); table->in_use->variables.time_zone= saved_time_zone; if (both_not_null) @@ -1420,7 +1419,7 @@ prepare_for_next_key_part: DBUG_RETURN(0); err: - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); table->in_use->variables.time_zone= saved_time_zone; DBUG_RETURN(1); } @@ -1995,7 +1994,7 @@ int ha_federatedx::write_row(uchar *buf) sizeof(insert_field_value_buffer), &my_charset_bin); Time_zone *saved_time_zone= table->in_use->variables.time_zone; - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); DBUG_ENTER("ha_federatedx::write_row"); table->in_use->variables.time_zone= UTC; @@ -2050,7 +2049,7 @@ int ha_federatedx::write_row(uchar *buf) values_string.append(STRING_WITH_LEN(", ")); } } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); table->in_use->variables.time_zone= saved_time_zone; /* @@ -2375,7 +2374,7 @@ int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data) else { /* otherwise = */ - my_bitmap_map *old_map= tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= tmp_use_all_columns(table, &table->read_set); bool needs_quote= (*field)->str_needs_quotes(); (*field)->val_str(&field_value); if (needs_quote) @@ -2384,7 +2383,7 @@ int ha_federatedx::update_row(const uchar *old_data, const uchar *new_data) if (needs_quote) update_string.append(value_quote_char); field_value.length(0); - tmp_restore_column_map(table->read_set, old_map); + tmp_restore_column_map(&table->read_set, old_map); } update_string.append(STRING_WITH_LEN(", ")); } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 848591234f4..f172273e11e 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -21126,11 +21126,11 @@ innobase_get_computed_value( field = dtuple_get_nth_v_field(row, col->v_pos); - my_bitmap_map* old_write_set = dbug_tmp_use_all_columns(mysql_table, mysql_table->write_set); - my_bitmap_map* old_read_set = dbug_tmp_use_all_columns(mysql_table, mysql_table->read_set); + MY_BITMAP *old_write_set = dbug_tmp_use_all_columns(mysql_table, &mysql_table->write_set); + MY_BITMAP *old_read_set = dbug_tmp_use_all_columns(mysql_table, &mysql_table->read_set); ret = mysql_table->update_virtual_field(mysql_table->field[col->m_col.ind]); - dbug_tmp_restore_column_map(mysql_table->read_set, old_read_set); - dbug_tmp_restore_column_map(mysql_table->write_set, old_write_set); + dbug_tmp_restore_column_map(&mysql_table->read_set, old_read_set); + dbug_tmp_restore_column_map(&mysql_table->write_set, old_write_set); if (ret != 0) { DBUG_RETURN(NULL); diff --git a/storage/innobase/handler/handler0alter.cc b/storage/innobase/handler/handler0alter.cc index 6d4e545248a..dddcb9ceae0 100644 --- a/storage/innobase/handler/handler0alter.cc +++ b/storage/innobase/handler/handler0alter.cc @@ -2397,9 +2397,9 @@ innobase_row_to_mysql( } } if (table->vfield) { - my_bitmap_map* old_vcol_set = tmp_use_all_columns(table, table->vcol_set); + MY_BITMAP *old_vcol_set = tmp_use_all_columns(table, &table->vcol_set); table->update_virtual_fields(table->file, VCOL_UPDATE_FOR_READ); - tmp_restore_column_map(table->vcol_set, old_vcol_set); + tmp_restore_column_map(&table->vcol_set, old_vcol_set); } } diff --git a/storage/mroonga/lib/mrn_debug_column_access.cpp b/storage/mroonga/lib/mrn_debug_column_access.cpp index 778300a33d6..5e875953b57 100644 --- a/storage/mroonga/lib/mrn_debug_column_access.cpp +++ b/storage/mroonga/lib/mrn_debug_column_access.cpp @@ -24,13 +24,13 @@ namespace mrn { : table_(table), bitmap_(bitmap) { #ifdef DBUG_ASSERT_EXISTS - map_ = dbug_tmp_use_all_columns(table_, bitmap_); + map_ = dbug_tmp_use_all_columns(table_, &bitmap_); #endif } DebugColumnAccess::~DebugColumnAccess() { #ifdef DBUG_ASSERT_EXISTS - dbug_tmp_restore_column_map(bitmap_, map_); + dbug_tmp_restore_column_map(&bitmap_, map_); #endif } } diff --git a/storage/mroonga/lib/mrn_debug_column_access.hpp b/storage/mroonga/lib/mrn_debug_column_access.hpp index 7c2fd60344e..77fe05f383c 100644 --- a/storage/mroonga/lib/mrn_debug_column_access.hpp +++ b/storage/mroonga/lib/mrn_debug_column_access.hpp @@ -27,7 +27,7 @@ namespace mrn { TABLE *table_; MY_BITMAP *bitmap_; #ifdef DBUG_ASSERT_EXISTS - my_bitmap_map *map_; + MY_BITMAP *map_; #endif public: DebugColumnAccess(TABLE *table, MY_BITMAP *bitmap); diff --git a/storage/oqgraph/ha_oqgraph.cc b/storage/oqgraph/ha_oqgraph.cc index 3cb47e204a0..398f48a80ac 100644 --- a/storage/oqgraph/ha_oqgraph.cc +++ b/storage/oqgraph/ha_oqgraph.cc @@ -908,7 +908,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key, bmove_align(buf, table->s->default_values, table->s->reclength); key_restore(buf, (byte*) key, key_info, key_len); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->read_set); my_ptrdiff_t ptrdiff= buf - table->record[0]; if (ptrdiff) @@ -937,7 +937,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key, field[1]->move_field_offset(-ptrdiff); field[2]->move_field_offset(-ptrdiff); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); return error_code(oqgraph::NO_MORE_DATA); } } @@ -962,7 +962,7 @@ int ha_oqgraph::index_read_idx(byte * buf, uint index, const byte * key, field[1]->move_field_offset(-ptrdiff); field[2]->move_field_offset(-ptrdiff); } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); // Keep the latch around so we can use it in the query result later - // See fill_record(). @@ -995,7 +995,7 @@ int ha_oqgraph::fill_record(byte *record, const open_query::row &row) bmove_align(record, table->s->default_values, table->s->reclength); - my_bitmap_map *old_map= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map= dbug_tmp_use_all_columns(table, &table->write_set); my_ptrdiff_t ptrdiff= record - table->record[0]; if (ptrdiff) @@ -1071,7 +1071,7 @@ int ha_oqgraph::fill_record(byte *record, const open_query::row &row) field[4]->move_field_offset(-ptrdiff); field[5]->move_field_offset(-ptrdiff); } - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return 0; } diff --git a/storage/perfschema/pfs_engine_table.cc b/storage/perfschema/pfs_engine_table.cc index 08ad7d31c88..46937d00ecf 100644 --- a/storage/perfschema/pfs_engine_table.cc +++ b/storage/perfschema/pfs_engine_table.cc @@ -188,17 +188,15 @@ ha_rows PFS_engine_table_share::get_row_count(void) const int PFS_engine_table_share::write_row(TABLE *table, unsigned char *buf, Field **fields) const { - my_bitmap_map *org_bitmap; - if (m_write_row == NULL) { return HA_ERR_WRONG_COMMAND; } /* We internally read from Fields to support the write interface */ - org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set); int result= m_write_row(table, buf, fields); - dbug_tmp_restore_column_map(table->read_set, org_bitmap); + dbug_tmp_restore_column_map(&table->read_set, org_bitmap); return result; } @@ -256,7 +254,6 @@ int PFS_engine_table::read_row(TABLE *table, unsigned char *buf, Field **fields) { - my_bitmap_map *org_bitmap; Field *f; Field **fields_reset; @@ -264,7 +261,7 @@ int PFS_engine_table::read_row(TABLE *table, bool read_all= !bitmap_is_clear_all(table->write_set); /* We internally write to Fields to support the read interface */ - org_bitmap= dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->write_set); /* Some callers of the storage engine interface do not honor the @@ -276,7 +273,7 @@ int PFS_engine_table::read_row(TABLE *table, f->reset(); int result= read_row_values(table, buf, fields, read_all); - dbug_tmp_restore_column_map(table->write_set, org_bitmap); + dbug_tmp_restore_column_map(&table->write_set, org_bitmap); return result; } @@ -294,12 +291,10 @@ int PFS_engine_table::update_row(TABLE *table, const unsigned char *new_buf, Field **fields) { - my_bitmap_map *org_bitmap; - /* We internally read from Fields to support the write interface */ - org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set); int result= update_row_values(table, old_buf, new_buf, fields); - dbug_tmp_restore_column_map(table->read_set, org_bitmap); + dbug_tmp_restore_column_map(&table->read_set, org_bitmap); return result; } @@ -308,12 +303,10 @@ int PFS_engine_table::delete_row(TABLE *table, const unsigned char *buf, Field **fields) { - my_bitmap_map *org_bitmap; - /* We internally read from Fields to support the delete interface */ - org_bitmap= dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *org_bitmap= dbug_tmp_use_all_columns(table, &table->read_set); int result= delete_row_values(table, buf, fields); - dbug_tmp_restore_column_map(table->read_set, org_bitmap); + dbug_tmp_restore_column_map(&table->read_set, org_bitmap); return result; } diff --git a/storage/rocksdb/ha_rocksdb.cc b/storage/rocksdb/ha_rocksdb.cc index c4275054322..1922d6212f9 100644 --- a/storage/rocksdb/ha_rocksdb.cc +++ b/storage/rocksdb/ha_rocksdb.cc @@ -6116,8 +6116,7 @@ ulonglong ha_rocksdb::load_auto_incr_value_from_index() { Field *field = table->key_info[table->s->next_number_index].key_part[0].field; ulonglong max_val = rdb_get_int_col_max_value(field); - my_bitmap_map *const old_map = - dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *const old_map = dbug_tmp_use_all_columns(table, &table->read_set); last_val = field->val_int(); if (last_val != max_val) { last_val++; @@ -6132,7 +6131,7 @@ ulonglong ha_rocksdb::load_auto_incr_value_from_index() { } } #endif - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); } m_keyread_only = save_keyread_only; @@ -6169,15 +6168,15 @@ void ha_rocksdb::update_auto_incr_val_from_field() { field = table->key_info[table->s->next_number_index].key_part[0].field; max_val = rdb_get_int_col_max_value(field); - my_bitmap_map *const old_map = - dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *const old_map = + dbug_tmp_use_all_columns(table, &table->read_set); new_val = field->val_int(); // don't increment if we would wrap around if (new_val != max_val) { new_val++; } - dbug_tmp_restore_column_map(table->read_set, old_map); + dbug_tmp_restore_column_map(&table->read_set, old_map); // Only update if positive value was set for auto_incr column. if (new_val <= max_val) { diff --git a/storage/rocksdb/rdb_datadic.cc b/storage/rocksdb/rdb_datadic.cc index 0d43d4da5c4..3f9417385fe 100644 --- a/storage/rocksdb/rdb_datadic.cc +++ b/storage/rocksdb/rdb_datadic.cc @@ -1488,12 +1488,12 @@ void Rdb_key_def::pack_with_make_sort_key( DBUG_ASSERT(*dst != nullptr); const int max_len = fpi->m_max_image_len; - my_bitmap_map *old_map; + MY_BITMAP*old_map; old_map= dbug_tmp_use_all_columns(field->table, - field->table->read_set); + &field->table->read_set); field->sort_string(*dst, max_len); - dbug_tmp_restore_column_map(field->table->read_set, old_map); + dbug_tmp_restore_column_map(&field->table->read_set, old_map); *dst += max_len; } diff --git a/storage/sequence/sequence.cc b/storage/sequence/sequence.cc index b9f5d02bd51..8eae98955c3 100644 --- a/storage/sequence/sequence.cc +++ b/storage/sequence/sequence.cc @@ -115,13 +115,13 @@ THR_LOCK_DATA **ha_seq::store_lock(THD *thd, THR_LOCK_DATA **to, void ha_seq::set(unsigned char *buf) { - my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set); my_ptrdiff_t offset = (my_ptrdiff_t) (buf - table->record[0]); Field *field = table->field[0]; field->move_field_offset(offset); field->store(cur, true); field->move_field_offset(-offset); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); } int ha_seq::rnd_init(bool scan) diff --git a/storage/sphinx/ha_sphinx.cc b/storage/sphinx/ha_sphinx.cc index 4092cfcb514..0e5ba383caf 100644 --- a/storage/sphinx/ha_sphinx.cc +++ b/storage/sphinx/ha_sphinx.cc @@ -3048,7 +3048,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint ) } #if MYSQL_VERSION_ID>50100 - my_bitmap_map * org_bitmap = dbug_tmp_use_all_columns ( table, table->write_set ); + MY_BITMAP * org_bitmap = dbug_tmp_use_all_columns ( table, &table->write_set ); #endif Field ** field = table->field; @@ -3194,7 +3194,7 @@ int ha_sphinx::get_rec ( byte * buf, const byte *, uint ) m_iCurrentPos++; #if MYSQL_VERSION_ID > 50100 - dbug_tmp_restore_column_map ( table->write_set, org_bitmap ); + dbug_tmp_restore_column_map ( &table->write_set, org_bitmap ); #endif SPH_RET(0); diff --git a/storage/spider/ha_spider.cc b/storage/spider/ha_spider.cc index 2b993fa7e9c..0f222577515 100644 --- a/storage/spider/ha_spider.cc +++ b/storage/spider/ha_spider.cc @@ -9951,12 +9951,12 @@ int ha_spider::write_row( if (!table->auto_increment_field_not_null) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif table->next_number_field->store((longlong) 0, TRUE); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif force_auto_increment = FALSE; table->file->insert_id_for_cur_row = 0; @@ -9964,13 +9964,13 @@ int ha_spider::write_row( } else if (auto_increment_mode == 2) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif table->next_number_field->store((longlong) 0, TRUE); table->auto_increment_field_not_null = FALSE; #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif force_auto_increment = FALSE; table->file->insert_id_for_cur_row = 0; diff --git a/storage/spider/spd_db_conn.cc b/storage/spider/spd_db_conn.cc index cea85b46657..424f22d46ee 100644 --- a/storage/spider/spd_db_conn.cc +++ b/storage/spider/spd_db_conn.cc @@ -1673,7 +1673,7 @@ int spider_db_append_key_where_internal( DBUG_PRINT("info", ("spider end_key_part_map=%lu", end_key_part_map)); #ifndef DBUG_OFF - my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *tmp_map = dbug_tmp_use_all_columns(table, &table->read_set); #endif if (sql_kind == SPIDER_SQL_KIND_HANDLER) @@ -2481,7 +2481,7 @@ end: if (sql_kind == SPIDER_SQL_KIND_SQL) dbton_hdl->set_order_pos(sql_type); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif DBUG_RETURN(0); } @@ -3002,8 +3002,8 @@ int spider_db_fetch_table( bitmap_is_set(table->write_set, (*field)->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", SPIDER_field_name_str(*field))); @@ -3011,7 +3011,7 @@ int spider_db_fetch_table( spider_db_fetch_row(share, *field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } else { DBUG_PRINT("info", ("spider bitmap is not set %s", @@ -3182,8 +3182,8 @@ int spider_db_fetch_key( bitmap_is_set(table->write_set, field->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", SPIDER_field_name_str(field))); @@ -3191,7 +3191,7 @@ int spider_db_fetch_key( spider_db_fetch_row(share, field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -3306,15 +3306,15 @@ int spider_db_fetch_minimum_columns( bitmap_is_set(table->write_set, (*field)->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", SPIDER_field_name_str(*field))); if ((error_num = spider_db_fetch_row(share, *field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -5472,8 +5472,8 @@ int spider_db_seek_tmp_table( bitmap_is_set(table->write_set, (*field)->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", SPIDER_field_name_str(*field))); @@ -5481,7 +5481,7 @@ int spider_db_seek_tmp_table( spider_db_fetch_row(spider->share, *field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -5560,8 +5560,8 @@ int spider_db_seek_tmp_key( bitmap_is_set(table->write_set, field->field_index) )) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", SPIDER_field_name_str(field))); @@ -5569,7 +5569,7 @@ int spider_db_seek_tmp_key( spider_db_fetch_row(spider->share, field, row, ptr_diff))) DBUG_RETURN(error_num); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } row->next(); @@ -5651,8 +5651,8 @@ int spider_db_seek_tmp_minimum_columns( bitmap_is_set(table->write_set, (*field)->field_index))); */ #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif DBUG_PRINT("info", ("spider bitmap is set %s", SPIDER_field_name_str(*field))); @@ -5661,7 +5661,7 @@ int spider_db_seek_tmp_minimum_columns( DBUG_RETURN(error_num); row->next(); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif } else if (bitmap_is_set(table->read_set, (*field)->field_index)) @@ -9363,7 +9363,7 @@ int spider_db_open_item_string( { THD *thd = NULL; TABLE *table; - my_bitmap_map *saved_map; + MY_BITMAP *saved_map; Time_zone *saved_time_zone; String str_value; char tmp_buf[MAX_FIELD_WIDTH]; @@ -9392,7 +9392,7 @@ int spider_db_open_item_string( */ table = field->table; thd = table->in_use; - saved_map = dbug_tmp_use_all_columns(table, table->write_set); + saved_map = dbug_tmp_use_all_columns(table, &table->write_set); item->save_in_field(field, FALSE); saved_time_zone = thd->variables.time_zone; thd->variables.time_zone = UTC; @@ -9428,7 +9428,7 @@ end: if (thd) { thd->variables.time_zone = saved_time_zone; - dbug_tmp_restore_column_map(table->write_set, saved_map); + dbug_tmp_restore_column_map(&table->write_set, saved_map); } } @@ -9470,7 +9470,7 @@ int spider_db_open_item_int( { THD *thd = NULL; TABLE *table; - my_bitmap_map *saved_map; + MY_BITMAP *saved_map; Time_zone *saved_time_zone; String str_value; bool print_quoted_string; @@ -9498,7 +9498,7 @@ int spider_db_open_item_int( */ table = field->table; thd = table->in_use; - saved_map = dbug_tmp_use_all_columns(table, table->write_set); + saved_map = dbug_tmp_use_all_columns(table, &table->write_set); item->save_in_field(field, FALSE); saved_time_zone = thd->variables.time_zone; thd->variables.time_zone = UTC; @@ -9544,7 +9544,7 @@ end: if (thd) { thd->variables.time_zone = saved_time_zone; - dbug_tmp_restore_column_map(table->write_set, saved_map); + dbug_tmp_restore_column_map(&table->write_set, saved_map); } } @@ -9864,8 +9864,8 @@ int spider_db_udf_fetch_table( DBUG_RETURN(HA_ERR_END_OF_FILE); #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->write_set); #endif for ( roop_count = 0, @@ -9878,7 +9878,7 @@ int spider_db_udf_fetch_table( spider_db_udf_fetch_row(trx, *field, row))) { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif DBUG_RETURN(error_num); } @@ -9888,7 +9888,7 @@ int spider_db_udf_fetch_table( for (; roop_count < set_off; roop_count++, field++) (*field)->set_default(); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->write_set, tmp_map); + dbug_tmp_restore_column_map(&table->write_set, tmp_map); #endif table->status = 0; DBUG_RETURN(0); diff --git a/storage/spider/spd_db_mysql.cc b/storage/spider/spd_db_mysql.cc index cac6b063ad0..ce6aef63fd8 100644 --- a/storage/spider/spd_db_mysql.cc +++ b/storage/spider/spd_db_mysql.cc @@ -8181,8 +8181,7 @@ int spider_mbase_handler::append_update_set( mysql_share->append_column_name(str, (*fields)->field_index); str->q_append(SPIDER_SQL_EQUAL_STR, SPIDER_SQL_EQUAL_LEN); #ifndef DBUG_OFF - my_bitmap_map *tmp_map = dbug_tmp_use_all_columns(table, - table->read_set); + MY_BITMAP *tmp_map = dbug_tmp_use_all_columns(table, &table->read_set); #endif if ( spider_db_mbase_utility-> @@ -8191,12 +8190,12 @@ int spider_mbase_handler::append_update_set( str->reserve(SPIDER_SQL_COMMA_LEN) ) { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif DBUG_RETURN(HA_ERR_OUT_OF_MEM); } #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif } str->q_append(SPIDER_SQL_COMMA_STR, SPIDER_SQL_COMMA_LEN); @@ -10888,8 +10887,8 @@ int spider_mbase_handler::append_insert_values( bitmap_is_set(table->read_set, (*field)->field_index) ) { #ifndef DBUG_OFF - my_bitmap_map *tmp_map = - dbug_tmp_use_all_columns(table, table->read_set); + MY_BITMAP *tmp_map = + dbug_tmp_use_all_columns(table, &table->read_set); #endif add_value = TRUE; DBUG_PRINT("info",("spider is_null()=%s", @@ -10911,7 +10910,7 @@ int spider_mbase_handler::append_insert_values( if (str->reserve(SPIDER_SQL_NULL_LEN + SPIDER_SQL_COMMA_LEN)) { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif str->length(0); DBUG_RETURN(HA_ERR_OUT_OF_MEM); @@ -10925,7 +10924,7 @@ int spider_mbase_handler::append_insert_values( str->reserve(SPIDER_SQL_COMMA_LEN) ) { #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif str->length(0); DBUG_RETURN(HA_ERR_OUT_OF_MEM); @@ -10933,7 +10932,7 @@ int spider_mbase_handler::append_insert_values( } str->q_append(SPIDER_SQL_COMMA_STR, SPIDER_SQL_COMMA_LEN); #ifndef DBUG_OFF - dbug_tmp_restore_column_map(table->read_set, tmp_map); + dbug_tmp_restore_column_map(&table->read_set, tmp_map); #endif } } diff --git a/storage/tokudb/ha_tokudb.cc b/storage/tokudb/ha_tokudb.cc index 2f13e4cdbb9..cc61aa85598 100644 --- a/storage/tokudb/ha_tokudb.cc +++ b/storage/tokudb/ha_tokudb.cc @@ -2313,7 +2313,7 @@ int ha_tokudb::pack_row_in_buff( int r = ENOSYS; memset((void *) row, 0, sizeof(*row)); - my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set); // Copy null bytes memcpy(row_buff, record, table_share->null_bytes); @@ -2362,7 +2362,7 @@ int ha_tokudb::pack_row_in_buff( row->size = (size_t) (var_field_data_ptr - row_buff); r = 0; - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return r; } @@ -2758,7 +2758,7 @@ DBT* ha_tokudb::create_dbt_key_from_key( { uint32_t size = 0; uchar* tmp_buff = buff; - my_bitmap_map *old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP *old_map = dbug_tmp_use_all_columns(table, &table->write_set); key->data = buff; @@ -2797,7 +2797,7 @@ DBT* ha_tokudb::create_dbt_key_from_key( key->size = size; DBUG_DUMP("key", (uchar *) key->data, key->size); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); return key; } @@ -2890,7 +2890,7 @@ DBT* ha_tokudb::pack_key( KEY* key_info = &table->key_info[keynr]; KEY_PART_INFO* key_part = key_info->key_part; KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts; - my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP* old_map = dbug_tmp_use_all_columns(table, &table->write_set); memset((void *) key, 0, sizeof(*key)); key->data = buff; @@ -2927,7 +2927,7 @@ DBT* ha_tokudb::pack_key( key->size = (buff - (uchar *) key->data); DBUG_DUMP("key", (uchar *) key->data, key->size); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(key); } @@ -2955,7 +2955,7 @@ DBT* ha_tokudb::pack_ext_key( KEY* key_info = &table->key_info[keynr]; KEY_PART_INFO* key_part = key_info->key_part; KEY_PART_INFO* end = key_part + key_info->user_defined_key_parts; - my_bitmap_map* old_map = dbug_tmp_use_all_columns(table, table->write_set); + MY_BITMAP* old_map = dbug_tmp_use_all_columns(table, &table->write_set); memset((void *) key, 0, sizeof(*key)); key->data = buff; @@ -3034,7 +3034,7 @@ DBT* ha_tokudb::pack_ext_key( key->size = (buff - (uchar *) key->data); DBUG_DUMP("key", (uchar *) key->data, key->size); - dbug_tmp_restore_column_map(table->write_set, old_map); + dbug_tmp_restore_column_map(&table->write_set, old_map); DBUG_RETURN(key); } #endif // defined(TOKU_INCLUDE_EXTENDED_KEYS) && TOKU_INCLUDE_EXTENDED_KEYS From bdae8bb6fdb7e9c7875f9a3fff02eadadea50dab Mon Sep 17 00:00:00 2001 From: Igor Babaev Date: Mon, 25 Jan 2021 15:21:52 -0800 Subject: [PATCH 106/150] MDEV-24675 Server crash when table value constructor uses a subselect This patch actually fixes the bug MDEV-24675 and the bug MDEV-24618: Assertion failure when TVC uses a row in the context expecting scalar value The cause of these bugs is the same wrong call of the function that fixes value expressions in the value list of a table value constructor. The assertion failure happened when an expression in the value list is of the row type. In this case an error message was expected, but it was not issued because the function fix_fields_if_needed() was called for to check fields of value expressions in a TVC instead of the function fix_fields_if_needed_for_scalar() that would also check that the value expressions are are of a scalar type. The first bug happened when a table value expression used an expression returned by single-row subselect. In this case the call of the fix_fields_if_needed_for_scalar virtual function must be provided with and address to which the single-row subselect has to be attached. Test cases were added for each of the bugs. Approved by Oleksandr Byelkin --- mysql-test/main/table_value_constr.result | 54 +++++++++++++++++++++++ mysql-test/main/table_value_constr.test | 49 ++++++++++++++++++++ sql/sql_tvc.cc | 4 +- 3 files changed, 105 insertions(+), 2 deletions(-) diff --git a/mysql-test/main/table_value_constr.result b/mysql-test/main/table_value_constr.result index b95f9289360..69c75ddab75 100644 --- a/mysql-test/main/table_value_constr.result +++ b/mysql-test/main/table_value_constr.result @@ -2621,3 +2621,57 @@ EXECUTE IMMEDIATE 'VALUES (?)' USING IGNORE; ERROR HY000: 'ignore' is not allowed in this context EXECUTE IMMEDIATE 'VALUES (?)' USING DEFAULT; ERROR HY000: 'default' is not allowed in this context +# +# MDEV-24675: TVC using subqueries +# +values((select 1)); +(select 1) +1 +values (2), ((select 1)); +2 +2 +1 +values ((select 1)), (2), ((select 3)); +(select 1) +1 +2 +3 +values ((select 1), 2), (3,4), (5, (select 6)); +(select 1) 2 +1 2 +3 4 +5 6 +create table t1 (a int, b int); +insert into t1 values (1,3), (2,3), (3,2), (1,2); +values((select max(a) from t1)); +(select max(a) from t1) +3 +values((select min(b) from t1)); +(select min(b) from t1) +2 +values ((select max(a) from t1), (select min(b) from t1)); +(select max(a) from t1) (select min(b) from t1) +3 2 +values((select * from (select max(b) from t1) as t)); +(select * from (select max(b) from t1) as t) +3 +drop table t1; +# +# MDEV-24618: TVC contains extra parenthesis for row expressions +# in value list +# +create table t1 (a int, b int); +insert into t1 values (1,3), (2,3); +insert into t1 values ((5,4)); +ERROR 21000: Operand should contain 1 column(s) +values ((1,2)); +ERROR 21000: Operand should contain 1 column(s) +select * from (values ((1,2))) dt; +ERROR 21000: Operand should contain 1 column(s) +values (1,2); +1 2 +1 2 +values ((select min(a), max(b) from t1)); +ERROR 21000: Operand should contain 1 column(s) +drop table t1; +End of 10.3 tests diff --git a/mysql-test/main/table_value_constr.test b/mysql-test/main/table_value_constr.test index 11d553f0b85..e8697bef589 100644 --- a/mysql-test/main/table_value_constr.test +++ b/mysql-test/main/table_value_constr.test @@ -1353,3 +1353,52 @@ VALUES (DEFAULT); EXECUTE IMMEDIATE 'VALUES (?)' USING IGNORE; --error ER_UNKNOWN_ERROR EXECUTE IMMEDIATE 'VALUES (?)' USING DEFAULT; + +--echo # +--echo # MDEV-24675: TVC using subqueries +--echo # + +values((select 1)); + +values (2), ((select 1)); + +values ((select 1)), (2), ((select 3)); + +values ((select 1), 2), (3,4), (5, (select 6)); + +create table t1 (a int, b int); +insert into t1 values (1,3), (2,3), (3,2), (1,2); + +values((select max(a) from t1)); + +values((select min(b) from t1)); + +values ((select max(a) from t1), (select min(b) from t1)); + +values((select * from (select max(b) from t1) as t)); + +drop table t1; + +--echo # +--echo # MDEV-24618: TVC contains extra parenthesis for row expressions +--echo # in value list +--echo # + +create table t1 (a int, b int); +insert into t1 values (1,3), (2,3); +--error ER_OPERAND_COLUMNS +insert into t1 values ((5,4)); + +--error ER_OPERAND_COLUMNS +values ((1,2)); + +--error ER_OPERAND_COLUMNS +select * from (values ((1,2))) dt; + +values (1,2); +--error ER_OPERAND_COLUMNS +values ((select min(a), max(b) from t1)); + +drop table t1; + +--echo End of 10.3 tests diff --git a/sql/sql_tvc.cc b/sql/sql_tvc.cc index 10a279b92ed..1f91539ff45 100644 --- a/sql/sql_tvc.cc +++ b/sql/sql_tvc.cc @@ -47,7 +47,7 @@ bool fix_fields_for_tvc(THD *thd, List_iterator_fast &li) while ((lst= li++)) { - List_iterator_fast it(*lst); + List_iterator it(*lst); Item *item; while ((item= it++)) @@ -59,7 +59,7 @@ bool fix_fields_for_tvc(THD *thd, List_iterator_fast &li) while replacing their values to NAME_CONST()s. So fix only those that have not been. */ - if (item->fix_fields_if_needed(thd, 0) || + if (item->fix_fields_if_needed_for_scalar(thd, it.ref()) || item->check_is_evaluable_expression_or_error()) DBUG_RETURN(true); } From c2c23e598d49fc991b34853d864c77d02741b61c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Wed, 27 Jan 2021 09:11:46 +0200 Subject: [PATCH 107/150] Update galera.disabled.def file --- mysql-test/suite/galera/disabled.def | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mysql-test/suite/galera/disabled.def b/mysql-test/suite/galera/disabled.def index 046feac5566..0185871eaa4 100644 --- a/mysql-test/suite/galera/disabled.def +++ b/mysql-test/suite/galera/disabled.def @@ -17,7 +17,9 @@ MW-286 : MDEV-18464 Killing thread can cause mutex deadlock if done concurrently MW-328A : MDEV-22666 galera.MW-328A MTR failed: "Semaphore wait has lasted > 600 seconds" and do not release port 16002 MW-328B : MDEV-22666 galera.MW-328A MTR failed: "Semaphore wait has lasted > 600 seconds" and do not release port 16002 MW-329 : MDEV-19962 Galera test failure on MW-329 +galera_FK_duplicate_client_insert : MDEV-24473: galera.galera_FK_duplicate_client_insert MTR failed: SIGABRT. InnoDB: Conflicting lock on table. Assertion failure in lock0lock.cc galera_as_slave_replication_bundle : MDEV-15785 OPTION_GTID_BEGIN is set in Gtid_log_event::do_apply_event() +galera_bf_abort_at_after_statement : MDEV-21557: galera_bf_abort_at_after_statement MTR failed: query 'reap' succeeded - should have failed with errno 1213 galera_bf_abort_group_commit : MDEV-18282 Galera test failure on galera.galera_bf_abort_group_commit galera_binlog_stmt_autoinc : MDEV-19959 Galera test failure on galera_binlog_stmt_autoinc galera_encrypt_tmp_files : Get error failed to enable encryption of temporary files @@ -35,12 +37,14 @@ galera_ssl_upgrade : MDEV-19950 Galera test failure on galera_ssl_upgrade galera_sst_mariabackup_encrypt_with_key : MDEV-21484 galera_sst_mariabackup_encrypt_with_key galera_toi_ddl_nonconflicting : MDEV-21518 galera.galera_toi_ddl_nonconflicting galera_toi_truncate : MDEV-22996 Hang on galera_toi_truncate test case +galera_var_ignore_apply_errors : MDEV-20451: Lock wait timeout exceeded in galera_var_ignore_apply_errors galera_var_node_address : MDEV-20485 Galera test failure galera_var_notify_cmd : MDEV-21905 Galera test galera_var_notify_cmd causes hang galera_var_reject_queries : assertion in inline_mysql_socket_send galera_var_replicate_myisam_on : MDEV-24062 Galera test failure on galera_var_replicate_myisam_on galera_var_retry_autocommit: MDEV-18181 Galera test failure on galera.galera_var_retry_autocommit galera_wan : MDEV-17259 Test failure on galera.galera_wan +mysql-wsrep#198 : MDEV-24446: galera.mysql-wsrep#198 MTR failed: query 'reap' failed: 2000: Unknown MySQL error partition : MDEV-19958 Galera test failure on galera.partition query_cache: MDEV-15805 Test failure on galera.query_cache sql_log_bin : MDEV-21491 galera.sql_log_bin From 5b93a483e413b9064d665a7742a96998f3ea410c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 27 Jan 2021 15:11:38 +0200 Subject: [PATCH 108/150] MDEV-24184 preparation: InnoDB RENAME TABLE recovery failure fil_rename_tablespace(): Do not write a redundant MLOG_FILE_RENAME2 record. The recovery bug will be fixed later. The problem is that we are invoking fil_op_replay_rename() too often, while we should skip any 'intermediate' names of a tablespace and only apply the very last rename for each tablespace identifier, and only if the tablespace name is not already correct. --- storage/innobase/fil/fil0fil.cc | 1 - 1 file changed, 1 deletion(-) diff --git a/storage/innobase/fil/fil0fil.cc b/storage/innobase/fil/fil0fil.cc index 328cd104e5d..c71dbeff421 100644 --- a/storage/innobase/fil/fil0fil.cc +++ b/storage/innobase/fil/fil0fil.cc @@ -3492,7 +3492,6 @@ func_exit: ut_ad(strchr(new_file_name, OS_PATH_SEPARATOR) != NULL); if (!recv_recovery_is_on()) { - fil_name_write_rename(id, old_file_name, new_file_name); log_mutex_enter(); } From 5fd3c7471e3e0673b50d309567c9747d36f09412 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Wed, 27 Jan 2021 16:43:29 +0200 Subject: [PATCH 109/150] MDEV-24709 Assertion !recv_no_ibuf_operations failed in ibuf_page_low() recv_recovery_from_checkpoint_start(): Clear the recv_no_ibuf_operations flag at the same time when we enabled writes to the log. The failure to clear the flag might have caused some missed change buffer merges, at least to the secondary index of SYS_TABLES that were accessed by trx_resurrect_table_locks() while the last recovery batch was in progress. Thanks to Thirunarayanan Balathandayuthapani for suggesting this fix. --- storage/innobase/log/log0recv.cc | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/innobase/log/log0recv.cc b/storage/innobase/log/log0recv.cc index 95179ec2271..f8b2cb95a3e 100644 --- a/storage/innobase/log/log0recv.cc +++ b/storage/innobase/log/log0recv.cc @@ -4001,6 +4001,7 @@ skip_apply: mutex_enter(&recv_sys->mutex); recv_sys->apply_log_recs = TRUE; + recv_no_ibuf_operations = is_mariabackup_restore_or_export(); mutex_exit(&recv_sys->mutex); From cbc75e9948acd849956aa78e1d31ed4ec350c35f Mon Sep 17 00:00:00 2001 From: Rucha Deodhar Date: Thu, 21 Jan 2021 11:34:05 +0530 Subject: [PATCH 110/150] MDEV-20939: Race condition between mysqldump import and InnoDB persistent statistics calculation Analysis: When --replace or --insert-ignore is not given, dumping of mysql.innodb_index_stats and mysql.innodb_table_stats will result into race condition. Fix: Check if these options are present with --system=stats (because dumping under --system=stats is safe). Otherwise, dump only structure, ignoring data because innodb will recalculate data anyway. --- client/mysqldump.c | 14 ++ mysql-test/r/mysqldump.result | 267 ++++++++++++++++++++++++++++++++++ mysql-test/t/mysqldump.test | 23 +++ 3 files changed, 304 insertions(+) diff --git a/client/mysqldump.c b/client/mysqldump.c index 2eaec829867..ecca380777f 100644 --- a/client/mysqldump.c +++ b/client/mysqldump.c @@ -1052,6 +1052,20 @@ static int get_options(int *argc, char ***argv) if ((ho_error= handle_options(argc, argv, my_long_options, get_one_option))) return(ho_error); + /* + Dumping under --system=stats with --replace or --inser-ignore is safe and will not + retult into race condition. Otherwise dump only structure and ignore data by default + while dumping. + */ + if (!(opt_system & OPT_SYSTEM_STATS) && !(opt_ignore || opt_replace_into)) + { + if (my_hash_insert(&ignore_data, + (uchar*) my_strdup("mysql.innodb_index_stats", MYF(MY_WME))) || + my_hash_insert(&ignore_data, + (uchar*) my_strdup("mysql.innodb_table_stats", MYF(MY_WME)))) + return(EX_EOM); + } + if (opt_system & OPT_SYSTEM_ALL) opt_system|= ~0; diff --git a/mysql-test/r/mysqldump.result b/mysql-test/r/mysqldump.result index 974ff200e68..da80ff09db9 100644 --- a/mysql-test/r/mysqldump.result +++ b/mysql-test/r/mysqldump.result @@ -5714,4 +5714,271 @@ DELIMITER ; /*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; DROP TABLE t1; +# +# MDEV-20939: Race condition between mysqldump import and InnoDB +# persistent statistics calculation +# +# +# Without --replace and --insert-ignore +# + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +DROP TABLE IF EXISTS `innodb_index_stats`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `innodb_index_stats` ( + `database_name` varchar(64) COLLATE utf8_bin NOT NULL, + `table_name` varchar(199) COLLATE utf8_bin NOT NULL, + `index_name` varchar(64) COLLATE utf8_bin NOT NULL, + `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `stat_name` varchar(64) COLLATE utf8_bin NOT NULL, + `stat_value` bigint(20) unsigned NOT NULL, + `sample_size` bigint(20) unsigned DEFAULT NULL, + `stat_description` varchar(1024) COLLATE utf8_bin NOT NULL, + PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0; +/*!40101 SET character_set_client = @saved_cs_client */; +DROP TABLE IF EXISTS `innodb_table_stats`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `innodb_table_stats` ( + `database_name` varchar(64) COLLATE utf8_bin NOT NULL, + `table_name` varchar(199) COLLATE utf8_bin NOT NULL, + `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `n_rows` bigint(20) unsigned NOT NULL, + `clustered_index_size` bigint(20) unsigned NOT NULL, + `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL, + PRIMARY KEY (`database_name`,`table_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0; +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `general_log` ( + `event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6), + `user_host` mediumtext NOT NULL, + `thread_id` bigint(21) unsigned NOT NULL, + `server_id` int(10) unsigned NOT NULL, + `command_type` varchar(64) NOT NULL, + `argument` mediumtext NOT NULL +) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='General log'; +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `slow_log` ( + `start_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6), + `user_host` mediumtext NOT NULL, + `query_time` time(6) NOT NULL, + `lock_time` time(6) NOT NULL, + `rows_sent` int(11) NOT NULL, + `rows_examined` int(11) NOT NULL, + `db` varchar(512) NOT NULL, + `last_insert_id` int(11) NOT NULL, + `insert_id` int(11) NOT NULL, + `server_id` int(10) unsigned NOT NULL, + `sql_text` mediumtext NOT NULL, + `thread_id` bigint(21) unsigned NOT NULL, + `rows_affected` int(11) NOT NULL +) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'; +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +# +# With --replace +# + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +DROP TABLE IF EXISTS `innodb_index_stats`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `innodb_index_stats` ( + `database_name` varchar(64) COLLATE utf8_bin NOT NULL, + `table_name` varchar(199) COLLATE utf8_bin NOT NULL, + `index_name` varchar(64) COLLATE utf8_bin NOT NULL, + `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `stat_name` varchar(64) COLLATE utf8_bin NOT NULL, + `stat_value` bigint(20) unsigned NOT NULL, + `sample_size` bigint(20) unsigned DEFAULT NULL, + `stat_description` varchar(1024) COLLATE utf8_bin NOT NULL, + PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0; +/*!40101 SET character_set_client = @saved_cs_client */; + +LOCK TABLES `innodb_index_stats` WRITE; +/*!40000 ALTER TABLE `innodb_index_stats` DISABLE KEYS */; +/*!40000 ALTER TABLE `innodb_index_stats` ENABLE KEYS */; +UNLOCK TABLES; +DROP TABLE IF EXISTS `innodb_table_stats`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `innodb_table_stats` ( + `database_name` varchar(64) COLLATE utf8_bin NOT NULL, + `table_name` varchar(199) COLLATE utf8_bin NOT NULL, + `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `n_rows` bigint(20) unsigned NOT NULL, + `clustered_index_size` bigint(20) unsigned NOT NULL, + `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL, + PRIMARY KEY (`database_name`,`table_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0; +/*!40101 SET character_set_client = @saved_cs_client */; + +LOCK TABLES `innodb_table_stats` WRITE; +/*!40000 ALTER TABLE `innodb_table_stats` DISABLE KEYS */; +/*!40000 ALTER TABLE `innodb_table_stats` ENABLE KEYS */; +UNLOCK TABLES; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `general_log` ( + `event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6), + `user_host` mediumtext NOT NULL, + `thread_id` bigint(21) unsigned NOT NULL, + `server_id` int(10) unsigned NOT NULL, + `command_type` varchar(64) NOT NULL, + `argument` mediumtext NOT NULL +) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='General log'; +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `slow_log` ( + `start_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6), + `user_host` mediumtext NOT NULL, + `query_time` time(6) NOT NULL, + `lock_time` time(6) NOT NULL, + `rows_sent` int(11) NOT NULL, + `rows_examined` int(11) NOT NULL, + `db` varchar(512) NOT NULL, + `last_insert_id` int(11) NOT NULL, + `insert_id` int(11) NOT NULL, + `server_id` int(10) unsigned NOT NULL, + `sql_text` mediumtext NOT NULL, + `thread_id` bigint(21) unsigned NOT NULL, + `rows_affected` int(11) NOT NULL +) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'; +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +# +# With --insert-ignore +# + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; +DROP TABLE IF EXISTS `innodb_index_stats`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `innodb_index_stats` ( + `database_name` varchar(64) COLLATE utf8_bin NOT NULL, + `table_name` varchar(199) COLLATE utf8_bin NOT NULL, + `index_name` varchar(64) COLLATE utf8_bin NOT NULL, + `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `stat_name` varchar(64) COLLATE utf8_bin NOT NULL, + `stat_value` bigint(20) unsigned NOT NULL, + `sample_size` bigint(20) unsigned DEFAULT NULL, + `stat_description` varchar(1024) COLLATE utf8_bin NOT NULL, + PRIMARY KEY (`database_name`,`table_name`,`index_name`,`stat_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0; +/*!40101 SET character_set_client = @saved_cs_client */; + +LOCK TABLES `innodb_index_stats` WRITE; +/*!40000 ALTER TABLE `innodb_index_stats` DISABLE KEYS */; +/*!40000 ALTER TABLE `innodb_index_stats` ENABLE KEYS */; +UNLOCK TABLES; +DROP TABLE IF EXISTS `innodb_table_stats`; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE `innodb_table_stats` ( + `database_name` varchar(64) COLLATE utf8_bin NOT NULL, + `table_name` varchar(199) COLLATE utf8_bin NOT NULL, + `last_update` timestamp NOT NULL DEFAULT current_timestamp() ON UPDATE current_timestamp(), + `n_rows` bigint(20) unsigned NOT NULL, + `clustered_index_size` bigint(20) unsigned NOT NULL, + `sum_of_other_index_sizes` bigint(20) unsigned NOT NULL, + PRIMARY KEY (`database_name`,`table_name`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8 COLLATE=utf8_bin STATS_PERSISTENT=0; +/*!40101 SET character_set_client = @saved_cs_client */; + +LOCK TABLES `innodb_table_stats` WRITE; +/*!40000 ALTER TABLE `innodb_table_stats` DISABLE KEYS */; +/*!40000 ALTER TABLE `innodb_table_stats` ENABLE KEYS */; +UNLOCK TABLES; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `general_log` ( + `event_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6), + `user_host` mediumtext NOT NULL, + `thread_id` bigint(21) unsigned NOT NULL, + `server_id` int(10) unsigned NOT NULL, + `command_type` varchar(64) NOT NULL, + `argument` mediumtext NOT NULL +) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='General log'; +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `slow_log` ( + `start_time` timestamp(6) NOT NULL DEFAULT current_timestamp(6) ON UPDATE current_timestamp(6), + `user_host` mediumtext NOT NULL, + `query_time` time(6) NOT NULL, + `lock_time` time(6) NOT NULL, + `rows_sent` int(11) NOT NULL, + `rows_examined` int(11) NOT NULL, + `db` varchar(512) NOT NULL, + `last_insert_id` int(11) NOT NULL, + `insert_id` int(11) NOT NULL, + `server_id` int(10) unsigned NOT NULL, + `sql_text` mediumtext NOT NULL, + `thread_id` bigint(21) unsigned NOT NULL, + `rows_affected` int(11) NOT NULL +) ENGINE=CSV DEFAULT CHARSET=utf8 COMMENT='Slow log'; +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + # End of 10.2 tests diff --git a/mysql-test/t/mysqldump.test b/mysql-test/t/mysqldump.test index 4478406b395..8249810d1e5 100644 --- a/mysql-test/t/mysqldump.test +++ b/mysql-test/t/mysqldump.test @@ -2735,4 +2735,27 @@ INSERT INTO t1 (a) VALUES (1),(2),(3); --exec $MYSQL_DUMP --default-character-set=utf8mb4 --triggers --no-data --no-create-info --add-drop-trigger --skip-comments --databases test DROP TABLE t1; +--echo # +--echo # MDEV-20939: Race condition between mysqldump import and InnoDB +--echo # persistent statistics calculation +--echo # + +--let $ignore= --ignore-table=mysql.proxies_priv --ignore-table=mysql.user --ignore-table=mysql.column_stats --ignore-table=mysql.columns_priv --ignore-table=mysql.db --ignore-table=mysql.event --ignore-table=mysql.func --ignore-table=mysql.gtid_slave_pos --ignore-table=mysql.help_category --ignore-table=mysql.help_keyword --ignore-table=mysql.help_relation --ignore-table=mysql.help_topic --ignore-table=mysql.host --ignore-table=mysql.index_stats --ignore-table=mysql.plugin --ignore-table=mysql.proc --ignore-table=mysql.procs_priv --ignore-table=mysql.roles_mapping --ignore-table=mysql.servers --ignore-table=mysql.table_stats --ignore-table=mysql.tables_priv --ignore-table=mysql.time_zone --ignore-table=mysql.time_zone_leap_second --ignore-table=mysql.time_zone_name --ignore-table=mysql.time_zone_transition --ignore-table=mysql.time_zone_transition_type --ignore-table=mysql.general_log --ignore-table=mysql.slow_log +--let $skip_opts= --skip-dump-date --skip-comments + +--echo # +--echo # Without --replace and --insert-ignore +--echo # +--exec $MYSQL_DUMP $ignore $skip_opts mysql + +--echo # +--echo # With --replace +--echo # +--exec $MYSQL_DUMP $ignore $skip_opts --replace mysql + +--echo # +--echo # With --insert-ignore +--echo # +--exec $MYSQL_DUMP $ignore $skip_opts --insert-ignore mysql + --echo # End of 10.2 tests From 75546dfbb14c9f821018732d5fa66ea0096088f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Wed, 27 Jan 2021 12:49:30 +0200 Subject: [PATCH 111/150] MDEV-24704 : Galera test failure on galera.galera_nopk_unicode Analysis: ========= Reason for test failure was a mutex deadlock between DeadlockChecker with stack Thread 6 (Thread 0xffff70066070 (LWP 24667)): 0 0x0000ffff784e850c in __lll_lock_wait (futex=futex@entry=0xffff04002258, private=0) at lowlevellock.c:46 1 0x0000ffff784e19f0 in __GI___pthread_mutex_lock (mutex=mutex@entry=0xffff04002258) at pthread_mutex_lock.c:135 2 0x0000aaaaac8cd014 in inline_mysql_mutex_lock (src_file=0xaaaaacea0f28 "/home/buildbot/buildbot/build/mariadb-10.2.37/sql/wsrep_thd.cc", src_line=762, that=0xffff04002258) at /home/buildbot/buildbot/build/mariadb-10.2.37/include/mysql/psi/mysql_thread.h:675 3 wsrep_thd_is_BF (thd=0xffff040009a8, sync=sync@entry=1 '\001') at /home/buildbot/buildbot/build/mariadb-10.2.37/sql/wsrep_thd.cc:762 4 0x0000aaaaacadce68 in lock_rec_has_to_wait (for_locking=false, lock_is_on_supremum=, lock2=0xffff628952d0, type_mode=291, trx=0xffff62894070) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/lock/lock0lock.cc:826 5 lock_has_to_wait (lock1=, lock2=0xffff628952d0) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/lock/lock0lock.cc:873 6 0x0000aaaaacadd0b0 in DeadlockChecker::search (this=this@entry=0xffff70061fe8) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/lock/lock0lock.cc:7142 7 0x0000aaaaacae2dd8 in DeadlockChecker::check_and_resolve (lock=lock@entry=0xffff62894120, trx=trx@entry=0xffff62894070) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/lock/lock0lock.cc:7286 8 0x0000aaaaacae3070 in lock_rec_enqueue_waiting (c_lock=0xffff628952d0, type_mode=type_mode@entry=3, block=block@entry=0xffff62076c40, heap_no=heap_no@entry=2, index=index@entry=0xffff4c076f28, thr=thr@entry=0xffff4c078810, prdt=prdt@entry=0x0) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/lock/lock0lock.cc:1796 9 0x0000aaaaacae3900 in lock_rec_lock_slow (thr=0xffff4c078810, index=0xffff4c076f28, heap_no=2, block=0xffff62076c40, mode=3, impl=0) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/lock/lock0lock.cc:2106 10 lock_rec_lock (impl=false, mode=3, block=0xffff62076c40, heap_no=2, index=0xffff4c076f28, thr=0xffff4c078810) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/lock/lock0lock.cc:2168 11 0x0000aaaaacae3ee8 in lock_sec_rec_read_check_and_lock (flags=flags@entry=0, block=block@entry=0xffff62076c40, rec=rec@entry=0xffff6240407f "\303\221\342\200\232\303\220\302\265\303\220\302\272\303\221\302\201\303\221\342\200\232", index=index@entry=0xffff4c076f28, offsets=0xffff4c080690, offsets@entry=0xffff70062a30, mode=LOCK_X, mode@entry=1653162096, gap_mode=0, gap_mode@entry=281470749427104, thr=thr@entry=0xffff4c078810) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/lock/lock0lock.cc:6082 12 0x0000aaaaacb684c4 in sel_set_rec_lock (pcur=0xaaaac841c270, pcur@entry=0xffff4c077d58, rec=0xffff6240407f "\303\221\342\200\232\303\220\302\265\303\220\302\272\303\221\302\201\303\221\342\200\232", rec@entry=0x28 , index=index@entry=0xffff4c076f28, offsets=0xffff70062a30, mode=281472334905456, type=281470749427104, thr=0xffff4c078810, thr@entry=0x9f, mtr=0x0, mtr@entry=0xffff70063928) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/row/row0sel.cc:1270 13 0x0000aaaaacb6bb64 in row_search_mvcc (buf=buf@entry=0xffff4c080690 "\376\026", mode=mode@entry=PAGE_CUR_GE, prebuilt=0xffff4c077b98, match_mode=match_mode@entry=1, direction=direction@entry=0) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/row/row0sel.cc:5181 14 0x0000aaaaacaae568 in ha_innobase::index_read (this=0xffff4c038a80, buf=0xffff4c080690 "\376\026", key_ptr=, key_len=768, find_flag=) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/handler/ha_innodb.cc:9393 15 0x0000aaaaac9201cc in handler::ha_index_read_map (this=0xffff4c038a80, buf=0xffff4c080690 "\376\026", key=0xffff4c07ccf8 "", keypart_map=keypart_map@entry=18446744073709551615, find_flag=find_flag@entry=HA_READ_KEY_EXACT) at /home/buildbot/buildbot/build/mariadb-10.2.37/sql/handler.cc:2718 16 0x0000aaaaac9f36b0 in Rows_log_event::find_row (this=this@entry=0xffff4c030098, rgi=rgi@entry=0xffff4c01b510) at /home/buildbot/buildbot/build/mariadb-10.2.37/sql/log_event.cc:13461 17 0x0000aaaaac9f3e44 in Update_rows_log_event::do_exec_row (this=0xffff4c030098, rgi=0xffff4c01b510) at /home/buildbot/buildbot/build/mariadb-10.2.37/sql/log_event.cc:13936 18 0x0000aaaaac9e7ee8 in Rows_log_event::do_apply_event (this=0xffff4c030098, rgi=0xffff4c01b510) at /home/buildbot/buildbot/build/mariadb-10.2.37/sql/log_event.cc:11101 19 0x0000aaaaac8ca4e8 in Log_event::apply_event (rgi=0xffff4c01b510, this=0xffff4c030098) at /home/buildbot/buildbot/build/mariadb-10.2.37/sql/log_event.h:1454 20 wsrep_apply_events (buf_len=0, events_buf=0x1, thd=0xffff4c0009a8) at /home/buildbot/buildbot/build/mariadb-10.2.37/sql/wsrep_applier.cc:164 21 wsrep_apply_cb (ctx=0xffff4c0009a8, buf=0x1, buf_len=18446743528248705000, flags=, meta=) at /home/buildbot/buildbot/build/mariadb-10.2.37/sql/wsrep_applier.cc:267 22 0x0000ffff7322d29c in galera::TrxHandle::apply (this=this@entry=0xffff4c027960, recv_ctx=recv_ctx@entry=0xffff4c0009a8, apply_cb=apply_cb@entry=0xaaaaac8c9fe8 , meta=...) at /home/buildbot/buildbot/build/galera/src/trx_handle.cpp:317 23 0x0000ffff73239664 in apply_trx_ws (recv_ctx=recv_ctx@entry=0xffff4c0009a8, apply_cb=0xaaaaac8c9fe8 , commit_cb=0xaaaaac8ca8d0 , trx=..., meta=...) at /home/buildbot/buildbot/build/galera/src/replicator_smm.cpp:34 24 0x0000ffff7323c0c4 in galera::ReplicatorSMM::apply_trx (this=this@entry=0xaaaac7c7ebc0, recv_ctx=recv_ctx@entry=0xffff4c0009a8, trx=trx@entry=0xffff4c027960) at /home/buildbot/buildbot/build/galera/src/replicator_smm.cpp:454 25 0x0000ffff7323e8b8 in galera::ReplicatorSMM::process_trx (this=0xaaaac7c7ebc0, recv_ctx=0xffff4c0009a8, trx=0xffff4c027960) at /home/buildbot/buildbot/build/galera/src/replicator_smm.cpp:1258 26 0x0000ffff73268f68 in galera::GcsActionSource::dispatch (this=this@entry=0xaaaac7c7f348, recv_ctx=recv_ctx@entry=0xffff4c0009a8, act=..., exit_loop=@0xffff7006535f: false) at /home/buildbot/buildbot/build/galera/src/gcs_action_source.cpp:115 27 0x0000ffff73269dd0 in galera::GcsActionSource::process (this=0xaaaac7c7f348, recv_ctx=0xffff4c0009a8, exit_loop=@0xffff7006535f: false) at /home/buildbot/buildbot/build/galera/src/gcs_action_source.cpp:180 28 0x0000ffff7323ef5c in galera::ReplicatorSMM::async_recv (this=0xaaaac7c7ebc0, recv_ctx=0xffff4c0009a8) at /home/buildbot/buildbot/build/galera/src/replicator_smm.cpp:362 29 0x0000ffff73217760 in galera_recv (gh=, recv_ctx=) at /home/buildbot/buildbot/build/galera/src/wsrep_provider.cpp:244 30 0x0000aaaaac8cb344 in wsrep_replication_process (thd=0xffff4c0009a8) at /home/buildbot/buildbot/build/mariadb-10.2.37/sql/wsrep_thd.cc:486 31 0x0000aaaaac8bc3a0 in start_wsrep_THD (arg=arg@entry=0xaaaac7cb3e38) at /home/buildbot/buildbot/build/mariadb-10.2.37/sql/wsrep_mysqld.cc:2173 32 0x0000aaaaaca89198 in pfs_spawn_thread (arg=) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/perfschema/pfs.cc:1869 33 0x0000ffff784defc4 in start_thread (arg=0xaaaaaca890d8 ) at pthread_create.c:335 34 0x0000ffff7821c3f0 in thread_start () at ../sysdeps/unix/sysv/linux/aarch64/clone.S:89 and background victim transaction kill with stack Thread 28 (Thread 0xffff485fa070 (LWP 24870)): 0 0x0000ffff784e530c in __pthread_cond_wait (cond=cond@entry=0xaaaac83e98e0, mutex=mutex@entry=0xaaaac83e98b0) at pthread_cond_wait.c:186 1 0x0000aaaaacb10788 in os_event::wait (this=0xaaaac83e98a0) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/os/os0event.cc:158 2 os_event::wait_low (reset_sig_count=2, this=0xaaaac83e98a0) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/os/os0event.cc:325 3 os_event_wait_low (event=0xaaaac83e98a0, reset_sig_count=) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/os/os0event.cc:507 4 0x0000aaaaacb98480 in sync_array_wait_event (arr=arr@entry=0xaaaac7dbb450, cell=@0xffff485f96e8: 0xaaaac7dbb560) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/sync/sync0arr.cc:471 5 0x0000aaaaacab53c8 in TTASEventMutex::enter (line=19524, filename=0xaaaaacf2ce40 "/home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/handler/ha_innodb.cc", max_delay=, max_spins=0, this=0xaaaac83cc8c0) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/include/ib0mutex.h:516 6 PolicyMutex >::enter (this=0xaaaac83cc8c0, n_spins=, n_delay=, name=0xaaaaacf2ce40 "/home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/handler/ha_innodb.cc", line=19524) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/include/ib0mutex.h:637 7 0x0000aaaaacaaa52c in bg_wsrep_kill_trx (void_arg=0xffff4c057430) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/innobase/handler/ha_innodb.cc:19524 8 0x0000aaaaac79e7f0 in handle_manager (arg=arg@entry=0x0) at /home/buildbot/buildbot/build/mariadb-10.2.37/sql/sql_manager.cc:112 9 0x0000aaaaaca89198 in pfs_spawn_thread (arg=) at /home/buildbot/buildbot/build/mariadb-10.2.37/storage/perfschema/pfs.cc:1869 10 0x0000ffff784defc4 in start_thread (arg=0xaaaaaca890d8 ) at pthread_create.c:335 11 0x0000ffff7821c3f0 in thread_start () at ../sysdeps/unix/sysv/linux/aarch64/clone.S:89 Fix: ==== Do not use THD::LOCK_thd_data mutex if we already hold lock_sys->mutex because it will cause mutexing order violation. Victim transaction holding conflicting locks can't be committed or rolled back while we hold lock_sys->mutex. Thus, it is safe to do wsrep_thd_is_BF call with no additional mutexes. --- storage/innobase/lock/lock0lock.cc | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/storage/innobase/lock/lock0lock.cc b/storage/innobase/lock/lock0lock.cc index edd29066c97..4e5c8fbf0ec 100644 --- a/storage/innobase/lock/lock0lock.cc +++ b/storage/innobase/lock/lock0lock.cc @@ -658,6 +658,11 @@ static void wsrep_assert_no_bf_bf_wait( { ut_ad(!lock_rec1 || lock_get_type_low(lock_rec1) == LOCK_REC); ut_ad(lock_get_type_low(lock_rec2) == LOCK_REC); + ut_ad(lock_mutex_own()); + + /* Note that we are holding lock_sys->mutex, thus we should + not acquire THD::LOCK_thd_data mutex below to avoid mutexing + order violation. */ if (!trx1->is_wsrep() || !lock_rec2->trx->is_wsrep()) return; @@ -673,12 +678,8 @@ static void wsrep_assert_no_bf_bf_wait( /* avoiding BF-BF conflict assert, if victim is already aborting or rolling back for replaying */ - wsrep_thd_LOCK(lock_rec2->trx->mysql_thd); - if (wsrep_trx_is_aborting(lock_rec2->trx->mysql_thd)) { - wsrep_thd_UNLOCK(lock_rec2->trx->mysql_thd); + if (wsrep_trx_is_aborting(lock_rec2->trx->mysql_thd)) return; - } - wsrep_thd_UNLOCK(lock_rec2->trx->mysql_thd); mtr_t mtr; @@ -735,6 +736,7 @@ lock_rec_has_to_wait( { ut_ad(trx && lock2); ut_ad(lock_get_type_low(lock2) == LOCK_REC); + ut_ad(lock_mutex_own()); if (trx != lock2->trx && !lock_mode_compatible(static_cast( @@ -821,9 +823,12 @@ lock_rec_has_to_wait( (brute force). If conflicting transaction is also wsrep high priority transaction we should avoid lock conflict because ordering of these transactions is already decided and - conflicting transaction will be later replayed. */ + conflicting transaction will be later replayed. Note + that thread holding conflicting lock can't be + committed or rolled back while we hold + lock_sys->mutex. */ if (trx->is_wsrep_UK_scan() - && wsrep_thd_is_BF(lock2->trx->mysql_thd, true)) { + && wsrep_thd_is_BF(lock2->trx->mysql_thd, false)) { return (FALSE); } From 7edd4294be4e59c19539167620ff140e3f5e7f58 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Thu, 28 Jan 2021 01:02:29 +0100 Subject: [PATCH 112/150] - Continue BSON development modified: storage/connect/bson.cpp modified: storage/connect/bson.h modified: storage/connect/bsonudf.cpp modified: storage/connect/bsonudf.h modified: storage/connect/ha_connect.cc modified: storage/connect/jsonudf.cpp modified: storage/connect/mysql-test/connect/r/bson.result modified: storage/connect/mysql-test/connect/r/bson_udf.result modified: storage/connect/mysql-test/connect/t/bson_udf.inc modified: storage/connect/mysql-test/connect/t/bson_udf.test modified: storage/connect/mysql-test/connect/t/bson_udf2.inc modified: storage/connect/tabbson.cpp modified: storage/connect/tabbson.h --- storage/connect/bson.cpp | 30 +-- storage/connect/bson.h | 2 +- storage/connect/bsonudf.cpp | 215 +++++++++++++++--- storage/connect/bsonudf.h | 12 +- storage/connect/ha_connect.cc | 16 +- storage/connect/jsonudf.cpp | 4 +- .../connect/mysql-test/connect/r/bson.result | 2 +- .../mysql-test/connect/r/bson_udf.result | 51 +++-- .../connect/mysql-test/connect/t/bson_udf.inc | 2 + .../mysql-test/connect/t/bson_udf.test | 1 + .../mysql-test/connect/t/bson_udf2.inc | 2 + storage/connect/tabbson.cpp | 128 ++++++----- storage/connect/tabbson.h | 7 +- 13 files changed, 341 insertions(+), 131 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index fc58303a73f..5731ce9eac5 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -37,6 +37,8 @@ #undef SE_CATCH // Does not work for Linux #endif +int GetJsonDefPrec(void); + #if defined(SE_CATCH) /**************************************************************************/ /* This is the support of catching C interrupts to prevent crashes. */ @@ -1722,14 +1724,22 @@ void BJSON::SetBigint(PBVAL vlp, longlong ll) /***********************************************************************/ /* Set the Value's value as the given DOUBLE. */ /***********************************************************************/ -void BJSON::SetFloat(PBVAL vlp, double d, int nd) +void BJSON::SetFloat(PBVAL vlp, double d, int prec) { - double* dp = (double*)BsonSubAlloc(sizeof(double)); + int nd = MY_MIN((prec < 0) ? GetJsonDefPrec() : prec, 16); - *dp = d; - vlp->To_Val = MOF(dp); - vlp->Nd = MY_MIN(nd, 16); - vlp->Type = TYPE_DBL; + if (nd < 6 && d >= FLT_MIN && d <= FLT_MAX) { + vlp->F = (float)d; + vlp->Type = TYPE_FLOAT; + } else { + double* dp = (double*)BsonSubAlloc(sizeof(double)); + + *dp = d; + vlp->To_Val = MOF(dp); + vlp->Type = TYPE_DBL; + } // endif nd + + vlp->Nd = nd; } // end of SetFloat /***********************************************************************/ @@ -1746,13 +1756,7 @@ void BJSON::SetFloat(PBVAL vlp, PSZ s) for (--p; *p == '0'; nd--, p--); } // endif p - if (nd < 6 && d >= FLT_MIN && d <= FLT_MAX) { - vlp->F = (float)d; - vlp->Nd = nd; - vlp->Type = TYPE_FLOAT; - } else - SetFloat(vlp, d, nd); - + SetFloat(vlp, d, nd); } // end of SetFloat /***********************************************************************/ diff --git a/storage/connect/bson.h b/storage/connect/bson.h index 6eb6c019c1a..32a9c49b00a 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -149,7 +149,7 @@ public: void SetString(PBVAL vlp, PSZ s, int ci = 0); void SetInteger(PBVAL vlp, int n); void SetBigint(PBVAL vlp, longlong ll); - void SetFloat(PBVAL vlp, double f, int nd = 16); + void SetFloat(PBVAL vlp, double f, int nd = -1); void SetFloat(PBVAL vlp, PSZ s); void SetBool(PBVAL vlp, bool b); void Clear(PBVAL vlp) { vlp->N = 0; vlp->Nd = 0; vlp->Next = 0; } diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 719b7d7509a..dd9f95bc4ba 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -27,11 +27,20 @@ #endif #define M 6 +int JsonDefPrec = -1; +int GetDefaultPrec(void); int IsArgJson(UDF_ARGS* args, uint i); void SetChanged(PBSON bsp); /* --------------------------------- JSON UDF ---------------------------------- */ +/*********************************************************************************/ +/* Replaces GetJsonGrpSize not usable when CONNECT is not installed. */ +/*********************************************************************************/ +int GetJsonDefPrec(void) { + return (JsonDefPrec < 0) ? GetDefaultPrec() : JsonDefPrec; +} /* end of GetJsonDefPrec */ + /*********************************************************************************/ /* Program for saving the status of the memory pools. */ /*********************************************************************************/ @@ -262,6 +271,7 @@ my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) return true; } // endif's +#if 0 // For calculated arrays, a local Value must be used switch (jnp->Op) { case OP_NUM: @@ -293,6 +303,7 @@ my_bool BJNX::SetArrayOptions(PGLOBAL g, char* p, int i, PSZ nm) if (jnp->Valp) MulVal = AllocateValue(g, jnp->Valp); +#endif // 0 return false; } // end of SetArrayOptions @@ -376,7 +387,7 @@ my_bool BJNX::ParseJpath(PGLOBAL g) } // endfor i, p Nod = i; - MulVal = AllocateValue(g, Value); +//MulVal = AllocateValue(g, Value); if (trace(1)) for (i = 0; i < Nod; i++) @@ -433,20 +444,42 @@ PSZ BJNX::MakeKey(UDF_ARGS *args, int i) } // end of MakeKey /*********************************************************************************/ -/* MakeJson: Serialize the json item and set value to it. */ +/* MakeJson: Make the Json tree to serialize. */ /*********************************************************************************/ -PVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp) +PBVAL BJNX::MakeJson(PGLOBAL g, PBVAL bvp, int n) { - if (Value->IsTypeNum()) { - strcpy(g->Message, "Cannot make Json for a numeric value"); - Value->Reset(); - } else if (bvp->Type != TYPE_JAR && bvp->Type != TYPE_JOB) { - strcpy(g->Message, "Target is not an array or object"); - Value->Reset(); - } else - Value->SetValue_psz(Serialize(g, bvp, NULL, 0)); + PBVAL vlp, jvp = bvp; - return Value; + if (n < Nod -1) { + if (bvp->Type == TYPE_JAR) { + int ars = GetArraySize(bvp); + PJNODE jnp = &Nodes[n]; + + jvp = NewVal(TYPE_JAR); + jnp->Op = OP_EQ; + + for (int i = 0; i < ars; i++) { + jnp->Rank = i; + vlp = GetRowValue(g, bvp, n); + AddArrayValue(jvp, DupVal(vlp)); + } // endfor i + + jnp->Op = OP_XX; + jnp->Rank = 0; + } else if(bvp->Type == TYPE_JOB) { + jvp = NewVal(TYPE_JOB); + + for (PBPR prp = GetObject(bvp); prp; prp = GetNext(prp)) { + vlp = GetRowValue(g, GetVlp(prp), n + 1); + SetKeyValue(jvp, vlp, MZP(prp->Key)); + } // endfor prp + + } // endif Type + + } // endif n + + Jb = true; + return jvp; } // end of MakeJson /*********************************************************************************/ @@ -459,15 +492,18 @@ void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) if (Jb) { vp->SetValue_psz(Serialize(g, vlp, NULL, 0)); + Jb = false; } else switch (vlp->Type) { case TYPE_DTM: case TYPE_STRG: vp->SetValue_psz(GetString(vlp)); break; case TYPE_INTG: - case TYPE_BINT: vp->SetValue(GetInteger(vlp)); break; + case TYPE_BINT: + vp->SetValue(GetBigint(vlp)); + break; case TYPE_DBL: case TYPE_FLOAT: if (vp->IsTypeNum()) @@ -532,7 +568,7 @@ PVAL BJNX::GetColumnValue(PGLOBAL g, PBVAL row, int i) /*********************************************************************************/ /* GetRowValue: */ /*********************************************************************************/ -PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) +PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i) { my_bool expd = false; PBVAL bap; @@ -544,9 +580,7 @@ PBVAL BJNX::GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b) vlp = NewVal(Value); return vlp; } else if (Nodes[i].Op == OP_XX) { - Jb = b; - // return DupVal(g, row); - return row; // or last line ??? + return MakeJson(g, row, i); } else if (Nodes[i].Op == OP_EXP) { PUSH_WARNING("Expand not supported by this function"); return NULL; @@ -611,14 +645,95 @@ PVAL BJNX::ExpandArray(PGLOBAL g, PBVAL arp, int n) } // end of ExpandArray /*********************************************************************************/ -/* CalculateArray: NIY */ +/* Get the value used for calculating the array. */ +/*********************************************************************************/ +PVAL BJNX::GetCalcValue(PGLOBAL g, PBVAL bap, int n) +{ + // For calculated arrays, a local Value must be used + int lng = 0; + short type, prec = 0; + bool b = n < Nod - 1; + PVAL valp; + PBVAL vlp, vp; + OPVAL op = Nodes[n].Op; + + switch (op) { + case OP_NUM: + type = TYPE_INT; + break; + case OP_ADD: + case OP_MULT: + if (!IsTypeNum(Buf_Type)) { + type = TYPE_INT; + prec = 0; + + for (vlp = GetArray(bap); vlp; vlp = GetNext(vlp)) { + vp = (b && IsJson(vlp)) ? GetRowValue(g, vlp, n + 1) : vlp; + + switch (vp->Type) { + case TYPE_BINT: + if (type == TYPE_INT) + type = TYPE_BIGINT; + + break; + case TYPE_DBL: + case TYPE_FLOAT: + type = TYPE_DOUBLE; + prec = MY_MAX(prec, vp->Nd); + break; + default: + break; + } // endswitch Type + + } // endfor vlp + + } else { + type = Buf_Type; + prec = GetPrecision(); + } // endif Buf_Type + + break; + case OP_SEP: + if (IsTypeChar(Buf_Type)) { + type = TYPE_DOUBLE; + prec = 2; + } else + type = Buf_Type; + + break; + case OP_MIN: + case OP_MAX: + type = Buf_Type; + lng = Long; + prec = GetPrecision(); + break; + case OP_CNC: + type = TYPE_STRING; + + if (IsTypeChar(Buf_Type)) { + lng = Long; + prec = GetPrecision(); + } else + lng = 512; + + break; + default: + break; + } // endswitch Op + + return valp = AllocateValue(g, type, lng, prec); +} // end of GetCalcValue + +/*********************************************************************************/ +/* CalculateArray */ /*********************************************************************************/ PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) { int i, ars = GetArraySize(bap), nv = 0; bool err; OPVAL op = Nodes[n].Op; - PVAL val[2], vp = Nodes[n].Valp; + PVAL val[2], vp = GetCalcValue(g, bap, n); + PVAL mulval = AllocateValue(g, vp); PBVAL bvrp, bvp; BVAL bval; @@ -647,9 +762,9 @@ PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) SetJsonValue(g, vp, bvp); continue; } else - SetJsonValue(g, MulVal, bvp); + SetJsonValue(g, mulval, bvp); - if (!MulVal->IsNull()) { + if (!mulval->IsNull()) { switch (op) { case OP_CNC: if (Nodes[n].CncVal) { @@ -657,18 +772,18 @@ PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) err = vp->Compute(g, val, 1, op); } // endif CncVal - val[0] = MulVal; + val[0] = mulval; err = vp->Compute(g, val, 1, op); break; // case OP_NUM: case OP_SEP: - val[0] = Nodes[n].Valp; - val[1] = MulVal; + val[0] = vp; + val[1] = mulval; err = vp->Compute(g, val, 2, OP_ADD); break; default: - val[0] = Nodes[n].Valp; - val[1] = MulVal; + val[0] = vp; + val[1] = mulval; err = vp->Compute(g, val, 2, op); } // endswitch Op @@ -690,9 +805,9 @@ PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) if (op == OP_SEP) { // Calculate average - MulVal->SetValue(nv); + mulval->SetValue(nv); val[0] = vp; - val[1] = MulVal; + val[1] = mulval; if (vp->Compute(g, val, 2, OP_DIV)) vp->Reset(); @@ -2698,6 +2813,45 @@ void bson_object_values_deinit(UDF_INIT* initid) JsonFreeMem((PGLOBAL)initid->ptr); } // end of bson_object_values_deinit +/*********************************************************************************/ +/* Set the value of JsonGrpSize. */ +/*********************************************************************************/ +my_bool bsonset_def_prec_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + if (args->arg_count != 1 || args->arg_type[0] != INT_RESULT) { + strcpy(message, "This function must have 1 integer argument"); + return true; + } else + return false; + +} // end of bsonset_def_prec_init + +long long bsonset_def_prec(UDF_INIT *initid, UDF_ARGS *args, char *, char *) +{ + long long n = *(long long*)args->args[0]; + + JsonDefPrec = (int)n; + return (long long)GetJsonDefPrec(); +} // end of bsonset_def_prec + +/*********************************************************************************/ +/* Get the value of JsonGrpSize. */ +/*********************************************************************************/ +my_bool bsonget_def_prec_init(UDF_INIT *initid, UDF_ARGS *args, char *message) +{ + if (args->arg_count != 0) { + strcpy(message, "This function must have no arguments"); + return true; + } else + return false; + +} // end of bsonget_def_prec_init + +long long bsonget_def_prec(UDF_INIT *initid, UDF_ARGS *args, char *, char *) +{ + return (long long)GetJsonDefPrec(); +} // end of bsonget_def_prec + /*********************************************************************************/ /* Set the value of JsonGrpSize. */ /*********************************************************************************/ @@ -4714,7 +4868,8 @@ char *bson_serialize(UDF_INIT *initid, UDF_ARGS *args, char *result, BJNX bnx(bsp->G); PBVAL bvp = (args->arg_count == 1) ? (PBVAL)bsp->Jsp : (PBVAL)bsp->Top; - if (!(str = bnx.Serialize(g, bvp, bsp->Filename, bsp->Pretty))) +// if (!(str = bnx.Serialize(g, bvp, bsp->Filename, bsp->Pretty))) + if (!(str = bnx.Serialize(g, bvp, NULL, 0))) str = strcpy(result, g->Message); // Keep result of constant function @@ -5513,7 +5668,7 @@ void bbin_object_values_deinit(UDF_INIT* initid) my_bool bbin_get_item_init(UDF_INIT *initid, UDF_ARGS *args, char *message) { return bson_get_item_init(initid, args, message); -} // end of bbin_get_item_init +} // end of bbin_get_item_init char *bbin_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, unsigned long *res_length, char *is_null, char *error) diff --git a/storage/connect/bsonudf.h b/storage/connect/bsonudf.h index 01b9b9d55d5..bbfd1ceed80 100644 --- a/storage/connect/bsonudf.h +++ b/storage/connect/bsonudf.h @@ -81,6 +81,7 @@ typedef struct _jpn { extern uint JsonGrpSize; uint GetJsonGroupSize(void); + typedef class BJNX* PBJNX; /*********************************************************************************/ @@ -102,7 +103,7 @@ public: my_bool SetJpath(PGLOBAL g, char* path, my_bool jb = false); my_bool ParseJpath(PGLOBAL g); void ReadValue(PGLOBAL g); - PBVAL GetRowValue(PGLOBAL g, PBVAL row, int i, my_bool b = true); + PBVAL GetRowValue(PGLOBAL g, PBVAL row, int i); PBVAL GetJson(PGLOBAL g); my_bool CheckPath(PGLOBAL g); my_bool CheckPath(PGLOBAL g, UDF_ARGS* args, PBVAL jsp, PBVAL& jvp, int n); @@ -123,7 +124,8 @@ protected: PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i); PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n); PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n); - PVAL MakeJson(PGLOBAL g, PBVAL bvp); + PVAL GetCalcValue(PGLOBAL g, PBVAL bap, int n); + PBVAL MakeJson(PGLOBAL g, PBVAL bvp, int n); void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp); PBVAL GetRow(PGLOBAL g); PBVAL MoveVal(PBVAL vlp); @@ -259,6 +261,12 @@ extern "C" { DllExport double bsonget_real(UDF_INIT*, UDF_ARGS*, char*, char*); DllExport void bsonget_real_deinit(UDF_INIT*); + DllExport my_bool bsonset_def_prec_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bsonset_def_prec(UDF_INIT*, UDF_ARGS*, char*, char*); + + DllExport my_bool bsonget_def_prec_init(UDF_INIT*, UDF_ARGS*, char*); + DllExport long long bsonget_def_prec(UDF_INIT*, UDF_ARGS*, char*, char*); + DllExport my_bool bsonset_grp_size_init(UDF_INIT*, UDF_ARGS*, char*); DllExport long long bsonset_grp_size(UDF_INIT*, UDF_ARGS*, char*, char*); diff --git a/storage/connect/ha_connect.cc b/storage/connect/ha_connect.cc index 9b40b5c9a13..69646e22e30 100644 --- a/storage/connect/ha_connect.cc +++ b/storage/connect/ha_connect.cc @@ -170,7 +170,7 @@ #define JSONMAX 10 // JSON Default max grp size extern "C" { - char version[]= "Version 1.07.0002 December 25, 2020"; + char version[]= "Version 1.07.0002 January 27, 2021"; #if defined(__WIN__) char compver[]= "Version 1.07.0002 " __DATE__ " " __TIME__; char slash= '\\'; @@ -255,6 +255,7 @@ USETEMP UseTemp(void); int GetConvSize(void); TYPCONV GetTypeConv(void); int GetDefaultDepth(void); +int GetDefaultPrec(void); bool JsonAllPath(void); char *GetJsonNull(void); uint GetJsonGrpSize(void); @@ -420,9 +421,15 @@ static MYSQL_THDVAR_INT(default_depth, "Default depth used by Json, XML and Mongo discovery", NULL, NULL, 5, -1, 16, 1); // Defaults to 5 +// Default precision for doubles +static MYSQL_THDVAR_INT(default_prec, + PLUGIN_VAR_RQCMDARG, + "Default precision used for doubles", + NULL, NULL, 6, 0, 16, 1); // Defaults to 6 + // Estimate max number of rows for JSON aggregate functions static MYSQL_THDVAR_UINT(json_grp_size, - PLUGIN_VAR_RQCMDARG, // opt + PLUGIN_VAR_RQCMDARG, // opt "max number of rows for JSON aggregate functions.", NULL, NULL, JSONMAX, 1, INT_MAX, 1); @@ -495,6 +502,7 @@ TYPCONV GetTypeConv(void) {return (TYPCONV)THDVAR(current_thd, type_conv);} char *GetJsonNull(void) {return connect_hton ? THDVAR(current_thd, json_null) : NULL;} int GetDefaultDepth(void) {return THDVAR(current_thd, default_depth);} +int GetDefaultPrec(void) {return THDVAR(current_thd, default_prec);} uint GetJsonGrpSize(void) {return connect_hton ? THDVAR(current_thd, json_grp_size) : 10;} size_t GetWorkSize(void) {return (size_t)THDVAR(current_thd, work_size);} @@ -4834,6 +4842,7 @@ int ha_connect::start_stmt(THD *thd, thr_lock_type lock_type) lock.cc by lock_external() and unlock_external() in lock.cc; the section "locking functions for mysql" in lock.cc; copy_data_between_tables() in sql_table.cc. + */ int ha_connect::external_lock(THD *thd, int lock_type) { @@ -7445,7 +7454,8 @@ static struct st_mysql_sys_var* connect_system_variables[]= { MYSQL_SYSVAR(json_null), MYSQL_SYSVAR(json_all_path), MYSQL_SYSVAR(default_depth), - MYSQL_SYSVAR(json_grp_size), + MYSQL_SYSVAR(default_prec), + MYSQL_SYSVAR(json_grp_size), #if defined(JAVA_SUPPORT) MYSQL_SYSVAR(jvm_path), MYSQL_SYSVAR(class_path), diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 53818cbe00b..49a36407cec 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -354,9 +354,11 @@ void JSNX::SetJsonValue(PGLOBAL g, PVAL vp, PJVAL val) vp->SetValue_psz(val->GetString(g)); break; case TYPE_INTG: - case TYPE_BINT: vp->SetValue(val->GetInteger()); break; + case TYPE_BINT: + vp->SetValue(val->GetBigint()); + break; case TYPE_DBL: if (vp->IsTypeNum()) vp->SetValue(val->GetFloat()); diff --git a/storage/connect/mysql-test/connect/r/bson.result b/storage/connect/mysql-test/connect/r/bson.result index fd15e020aac..98abdbb8744 100644 --- a/storage/connect/mysql-test/connect/r/bson.result +++ b/storage/connect/mysql-test/connect/r/bson.result @@ -279,7 +279,7 @@ SELECT * FROM t1; WHO WEEKS SUMS SUM AVGS SUMAVG AVGSUM AVGAVG AVERAGE Joe 3, 4, 5 69.00+83.00+26.00 178.00 17.25+16.60+13.00 46.85 59.33 15.62 16.18 Beth 3, 4, 5 16.00+32.00+32.00 80.00 16.00+16.00+16.00 48.00 26.67 16.00 16.00 -Janet 3, 4, 5 55.00+17.00+57.00 129.00 18.33+17.00+14.25 49.58 43.00 16.53 16.12 +Janet 3, 4, 5 55.00+17.00+57.00 129.00 18.33+17.00+14.25 49.58 43.00 16.53 16.13 DROP TABLE t1; # # Expand expense in 3 one week tables diff --git a/storage/connect/mysql-test/connect/r/bson_udf.result b/storage/connect/mysql-test/connect/r/bson_udf.result index a0b93f2e547..14b3629e4b1 100644 --- a/storage/connect/mysql-test/connect/r/bson_udf.result +++ b/storage/connect/mysql-test/connect/r/bson_udf.result @@ -176,6 +176,9 @@ Value List # # Test UDF's with column arguments # +SELECT Bsonset_Def_Prec(2); +Bsonset_Def_Prec(2) +2 CREATE TABLE t2 ( ISBN CHAR(15), @@ -213,7 +216,7 @@ SALARY DOUBLE(8,2) NOT NULL FLAG=52 ) ENGINE=CONNECT TABLE_TYPE=FIX BLOCK_SIZE=8 FILE_NAME='employee.dat' ENDING=1; SELECT Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY) FROM t3 WHERE NAME = 'MERCHANT'; Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY) -{"SERIALNO":"78943","NAME":"MERCHANT","TITLE":"SALESMAN","SALARY":8700.0000000000000000} +{"SERIALNO":"78943","NAME":"MERCHANT","TITLE":"SALESMAN","SALARY":8700.00} SELECT DEPARTMENT, Bson_Array_Grp(NAME) FROM t3 GROUP BY DEPARTMENT; DEPARTMENT Bson_Array_Grp(NAME) 0021 ["STRONG","SHORTSIGHT"] @@ -249,30 +252,30 @@ Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(NAME) json_NAMES) {"DEPARTMENT":"2452","NAMES":["BIGHEAD","ORELLY","BIGHORN","SMITH","CHERRY"]} SELECT Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT; Bson_Make_Object(DEPARTMENT, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, TITLE, SALARY)) json_EMPLOYES) -{"DEPARTMENT":"0021","EMPLOYES":[{"SERIALNO":"87777","NAME":"STRONG","TITLE":"DIRECTOR","SALARY":23000.0000000000000000},{"SERIALNO":"22222","NAME":"SHORTSIGHT","TITLE":"SECRETARY","SALARY":5500.0000000000000000}]} -{"DEPARTMENT":"0318","EMPLOYES":[{"SERIALNO":"74200","NAME":"BANCROFT","TITLE":"SALESMAN","SALARY":9600.0000000000000000},{"SERIALNO":"24888","NAME":"PLUMHEAD","TITLE":"TYPIST","SALARY":2800.0000000000000000},{"SERIALNO":"27845","NAME":"HONEY","TITLE":"SECRETARY","SALARY":4900.0000000000000000},{"SERIALNO":"73452","NAME":"TONGHO","TITLE":"ENGINEER","SALARY":6800.0000000000000000},{"SERIALNO":"74234","NAME":"WALTER","TITLE":"ENGINEER","SALARY":7400.0000000000000000},{"SERIALNO":"77777","NAME":"SHRINKY","TITLE":"ADMINISTRATOR","SALARY":7500.0000000000000000},{"SERIALNO":"70012","NAME":"WERTHER","TITLE":"DIRECTOR","SALARY":14500.0000000000000000},{"SERIALNO":"78943","NAME":"MERCHANT","TITLE":"SALESMAN","SALARY":8700.0000000000000000},{"SERIALNO":"73111","NAME":"WHEELFOR","TITLE":"SALESMAN","SALARY":10030.0000000000000000}]} -{"DEPARTMENT":"0319","EMPLOYES":[{"SERIALNO":"76543","NAME":"BULLOZER","TITLE":"SALESMAN","SALARY":14800.0000000000000000},{"SERIALNO":"40567","NAME":"QUINN","TITLE":"DIRECTOR","SALARY":14000.0000000000000000},{"SERIALNO":"00137","NAME":"BROWNY","TITLE":"ENGINEER","SALARY":10500.0000000000000000},{"SERIALNO":"12345","NAME":"KITTY","TITLE":"TYPIST","SALARY":3000.4499999999998181},{"SERIALNO":"33333","NAME":"MONAPENNY","TITLE":"SECRETARY","SALARY":3800.0000000000000000},{"SERIALNO":"00023","NAME":"MARTIN","TITLE":"ENGINEER","SALARY":10000.0000000000000000},{"SERIALNO":"07654","NAME":"FUNNIGUY","TITLE":"ADMINISTRATOR","SALARY":8500.0000000000000000},{"SERIALNO":"45678","NAME":"BUGHAPPY","TITLE":"PROGRAMMER","SALARY":8500.0000000000000000},{"SERIALNO":"56789","NAME":"FODDERMAN","TITLE":"SALESMAN","SALARY":7000.0000000000000000},{"SERIALNO":"55555","NAME":"MESSIFUL","TITLE":"SECRETARY","SALARY":5000.5000000000000000},{"SERIALNO":"98765","NAME":"GOOSEPEN","TITLE":"ADMINISTRATOR","SALARY":4700.0000000000000000}]} -{"DEPARTMENT":"2452","EMPLOYES":[{"SERIALNO":"34567","NAME":"BIGHEAD","TITLE":"SCIENTIST","SALARY":8000.0000000000000000},{"SERIALNO":"31416","NAME":"ORELLY","TITLE":"ENGINEER","SALARY":13400.0000000000000000},{"SERIALNO":"36666","NAME":"BIGHORN","TITLE":"SCIENTIST","SALARY":11000.0000000000000000},{"SERIALNO":"02345","NAME":"SMITH","TITLE":"ENGINEER","SALARY":9000.0000000000000000},{"SERIALNO":"11111","NAME":"CHERRY","TITLE":"SECRETARY","SALARY":4500.0000000000000000}]} +{"DEPARTMENT":"0021","EMPLOYES":[{"SERIALNO":"87777","NAME":"STRONG","TITLE":"DIRECTOR","SALARY":23000.00},{"SERIALNO":"22222","NAME":"SHORTSIGHT","TITLE":"SECRETARY","SALARY":5500.00}]} +{"DEPARTMENT":"0318","EMPLOYES":[{"SERIALNO":"74200","NAME":"BANCROFT","TITLE":"SALESMAN","SALARY":9600.00},{"SERIALNO":"24888","NAME":"PLUMHEAD","TITLE":"TYPIST","SALARY":2800.00},{"SERIALNO":"27845","NAME":"HONEY","TITLE":"SECRETARY","SALARY":4900.00},{"SERIALNO":"73452","NAME":"TONGHO","TITLE":"ENGINEER","SALARY":6800.00},{"SERIALNO":"74234","NAME":"WALTER","TITLE":"ENGINEER","SALARY":7400.00},{"SERIALNO":"77777","NAME":"SHRINKY","TITLE":"ADMINISTRATOR","SALARY":7500.00},{"SERIALNO":"70012","NAME":"WERTHER","TITLE":"DIRECTOR","SALARY":14500.00},{"SERIALNO":"78943","NAME":"MERCHANT","TITLE":"SALESMAN","SALARY":8700.00},{"SERIALNO":"73111","NAME":"WHEELFOR","TITLE":"SALESMAN","SALARY":10030.00}]} +{"DEPARTMENT":"0319","EMPLOYES":[{"SERIALNO":"76543","NAME":"BULLOZER","TITLE":"SALESMAN","SALARY":14800.00},{"SERIALNO":"40567","NAME":"QUINN","TITLE":"DIRECTOR","SALARY":14000.00},{"SERIALNO":"00137","NAME":"BROWNY","TITLE":"ENGINEER","SALARY":10500.00},{"SERIALNO":"12345","NAME":"KITTY","TITLE":"TYPIST","SALARY":3000.45},{"SERIALNO":"33333","NAME":"MONAPENNY","TITLE":"SECRETARY","SALARY":3800.00},{"SERIALNO":"00023","NAME":"MARTIN","TITLE":"ENGINEER","SALARY":10000.00},{"SERIALNO":"07654","NAME":"FUNNIGUY","TITLE":"ADMINISTRATOR","SALARY":8500.00},{"SERIALNO":"45678","NAME":"BUGHAPPY","TITLE":"PROGRAMMER","SALARY":8500.00},{"SERIALNO":"56789","NAME":"FODDERMAN","TITLE":"SALESMAN","SALARY":7000.00},{"SERIALNO":"55555","NAME":"MESSIFUL","TITLE":"SECRETARY","SALARY":5000.50},{"SERIALNO":"98765","NAME":"GOOSEPEN","TITLE":"ADMINISTRATOR","SALARY":4700.00}]} +{"DEPARTMENT":"2452","EMPLOYES":[{"SERIALNO":"34567","NAME":"BIGHEAD","TITLE":"SCIENTIST","SALARY":8000.00},{"SERIALNO":"31416","NAME":"ORELLY","TITLE":"ENGINEER","SALARY":13400.00},{"SERIALNO":"36666","NAME":"BIGHORN","TITLE":"SCIENTIST","SALARY":11000.00},{"SERIALNO":"02345","NAME":"SMITH","TITLE":"ENGINEER","SALARY":9000.00},{"SERIALNO":"11111","NAME":"CHERRY","TITLE":"SECRETARY","SALARY":4500.00}]} SELECT Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) FROM t3 GROUP BY DEPARTMENT, TITLE; Bson_Make_Object(DEPARTMENT, TITLE, Bson_Array_Grp(Bson_Make_Object(SERIALNO, NAME, SALARY)) json_EMPLOYES) -{"DEPARTMENT":"0021","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"87777","NAME":"STRONG","SALARY":23000.0000000000000000}]} -{"DEPARTMENT":"0021","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"22222","NAME":"SHORTSIGHT","SALARY":5500.0000000000000000}]} -{"DEPARTMENT":"0318","TITLE":"ADMINISTRATOR","EMPLOYES":[{"SERIALNO":"77777","NAME":"SHRINKY","SALARY":7500.0000000000000000}]} -{"DEPARTMENT":"0318","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"70012","NAME":"WERTHER","SALARY":14500.0000000000000000}]} -{"DEPARTMENT":"0318","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"73452","NAME":"TONGHO","SALARY":6800.0000000000000000},{"SERIALNO":"74234","NAME":"WALTER","SALARY":7400.0000000000000000}]} -{"DEPARTMENT":"0318","TITLE":"SALESMAN","EMPLOYES":[{"SERIALNO":"74200","NAME":"BANCROFT","SALARY":9600.0000000000000000},{"SERIALNO":"78943","NAME":"MERCHANT","SALARY":8700.0000000000000000},{"SERIALNO":"73111","NAME":"WHEELFOR","SALARY":10030.0000000000000000}]} -{"DEPARTMENT":"0318","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"27845","NAME":"HONEY","SALARY":4900.0000000000000000}]} -{"DEPARTMENT":"0318","TITLE":"TYPIST","EMPLOYES":[{"SERIALNO":"24888","NAME":"PLUMHEAD","SALARY":2800.0000000000000000}]} -{"DEPARTMENT":"0319","TITLE":"ADMINISTRATOR","EMPLOYES":[{"SERIALNO":"98765","NAME":"GOOSEPEN","SALARY":4700.0000000000000000},{"SERIALNO":"07654","NAME":"FUNNIGUY","SALARY":8500.0000000000000000}]} -{"DEPARTMENT":"0319","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"40567","NAME":"QUINN","SALARY":14000.0000000000000000}]} -{"DEPARTMENT":"0319","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"00023","NAME":"MARTIN","SALARY":10000.0000000000000000},{"SERIALNO":"00137","NAME":"BROWNY","SALARY":10500.0000000000000000}]} -{"DEPARTMENT":"0319","TITLE":"PROGRAMMER","EMPLOYES":[{"SERIALNO":"45678","NAME":"BUGHAPPY","SALARY":8500.0000000000000000}]} -{"DEPARTMENT":"0319","TITLE":"SALESMAN","EMPLOYES":[{"SERIALNO":"76543","NAME":"BULLOZER","SALARY":14800.0000000000000000},{"SERIALNO":"56789","NAME":"FODDERMAN","SALARY":7000.0000000000000000}]} -{"DEPARTMENT":"0319","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"33333","NAME":"MONAPENNY","SALARY":3800.0000000000000000},{"SERIALNO":"55555","NAME":"MESSIFUL","SALARY":5000.5000000000000000}]} -{"DEPARTMENT":"0319","TITLE":"TYPIST","EMPLOYES":[{"SERIALNO":"12345","NAME":"KITTY","SALARY":3000.4499999999998181}]} -{"DEPARTMENT":"2452","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"31416","NAME":"ORELLY","SALARY":13400.0000000000000000},{"SERIALNO":"02345","NAME":"SMITH","SALARY":9000.0000000000000000}]} -{"DEPARTMENT":"2452","TITLE":"SCIENTIST","EMPLOYES":[{"SERIALNO":"34567","NAME":"BIGHEAD","SALARY":8000.0000000000000000},{"SERIALNO":"36666","NAME":"BIGHORN","SALARY":11000.0000000000000000}]} -{"DEPARTMENT":"2452","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"11111","NAME":"CHERRY","SALARY":4500.0000000000000000}]} +{"DEPARTMENT":"0021","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"87777","NAME":"STRONG","SALARY":23000.00}]} +{"DEPARTMENT":"0021","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"22222","NAME":"SHORTSIGHT","SALARY":5500.00}]} +{"DEPARTMENT":"0318","TITLE":"ADMINISTRATOR","EMPLOYES":[{"SERIALNO":"77777","NAME":"SHRINKY","SALARY":7500.00}]} +{"DEPARTMENT":"0318","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"70012","NAME":"WERTHER","SALARY":14500.00}]} +{"DEPARTMENT":"0318","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"73452","NAME":"TONGHO","SALARY":6800.00},{"SERIALNO":"74234","NAME":"WALTER","SALARY":7400.00}]} +{"DEPARTMENT":"0318","TITLE":"SALESMAN","EMPLOYES":[{"SERIALNO":"74200","NAME":"BANCROFT","SALARY":9600.00},{"SERIALNO":"78943","NAME":"MERCHANT","SALARY":8700.00},{"SERIALNO":"73111","NAME":"WHEELFOR","SALARY":10030.00}]} +{"DEPARTMENT":"0318","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"27845","NAME":"HONEY","SALARY":4900.00}]} +{"DEPARTMENT":"0318","TITLE":"TYPIST","EMPLOYES":[{"SERIALNO":"24888","NAME":"PLUMHEAD","SALARY":2800.00}]} +{"DEPARTMENT":"0319","TITLE":"ADMINISTRATOR","EMPLOYES":[{"SERIALNO":"98765","NAME":"GOOSEPEN","SALARY":4700.00},{"SERIALNO":"07654","NAME":"FUNNIGUY","SALARY":8500.00}]} +{"DEPARTMENT":"0319","TITLE":"DIRECTOR","EMPLOYES":[{"SERIALNO":"40567","NAME":"QUINN","SALARY":14000.00}]} +{"DEPARTMENT":"0319","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"00023","NAME":"MARTIN","SALARY":10000.00},{"SERIALNO":"00137","NAME":"BROWNY","SALARY":10500.00}]} +{"DEPARTMENT":"0319","TITLE":"PROGRAMMER","EMPLOYES":[{"SERIALNO":"45678","NAME":"BUGHAPPY","SALARY":8500.00}]} +{"DEPARTMENT":"0319","TITLE":"SALESMAN","EMPLOYES":[{"SERIALNO":"76543","NAME":"BULLOZER","SALARY":14800.00},{"SERIALNO":"56789","NAME":"FODDERMAN","SALARY":7000.00}]} +{"DEPARTMENT":"0319","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"33333","NAME":"MONAPENNY","SALARY":3800.00},{"SERIALNO":"55555","NAME":"MESSIFUL","SALARY":5000.50}]} +{"DEPARTMENT":"0319","TITLE":"TYPIST","EMPLOYES":[{"SERIALNO":"12345","NAME":"KITTY","SALARY":3000.45}]} +{"DEPARTMENT":"2452","TITLE":"ENGINEER","EMPLOYES":[{"SERIALNO":"31416","NAME":"ORELLY","SALARY":13400.00},{"SERIALNO":"02345","NAME":"SMITH","SALARY":9000.00}]} +{"DEPARTMENT":"2452","TITLE":"SCIENTIST","EMPLOYES":[{"SERIALNO":"34567","NAME":"BIGHEAD","SALARY":8000.00},{"SERIALNO":"36666","NAME":"BIGHORN","SALARY":11000.00}]} +{"DEPARTMENT":"2452","TITLE":"SECRETARY","EMPLOYES":[{"SERIALNO":"11111","NAME":"CHERRY","SALARY":4500.00}]} SELECT Bson_Object_Grp(SALARY) FROM t3; ERROR HY000: Can't initialize function 'bson_object_grp'; This function requires 2 arguments (key, value) SELECT Bson_Object_Grp(NAME, SALARY) FROM t3; @@ -325,7 +328,7 @@ BsonGet_String(Bson_Make_Array(45,28,36,45,89),'3') 45 SELECT BsonGet_String(Bson_Make_Array(45,28,36,45,89),'["+"]') "list",'=' as "egal",BsonGet_String(Bson_Make_Array(45,28,36,45,89),'[+]') "sum"; list egal sum -45+28+36+45+89 = 243.00 +45+28+36+45+89 = 243 SELECT BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0'); BsonGet_String(Bson_Make_Array(Bson_Make_Array(45,28),Bson_Make_Array(36,45,89)),'1.0') 36 diff --git a/storage/connect/mysql-test/connect/t/bson_udf.inc b/storage/connect/mysql-test/connect/t/bson_udf.inc index 366f48e5861..c4722722ef7 100644 --- a/storage/connect/mysql-test/connect/t/bson_udf.inc +++ b/storage/connect/mysql-test/connect/t/bson_udf.inc @@ -22,6 +22,8 @@ if (!$HA_CONNECT_SO) { --eval CREATE FUNCTION bson_object_delete RETURNS STRING SONAME '$HA_CONNECT_SO'; --eval CREATE FUNCTION bson_object_list RETURNS STRING SONAME '$HA_CONNECT_SO'; --eval CREATE FUNCTION bson_object_values RETURNS STRING SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonset_def_prec RETURNS INTEGER SONAME '$HA_CONNECT_SO'; +--eval CREATE FUNCTION bsonget_def_prec RETURNS INTEGER SONAME '$HA_CONNECT_SO'; --eval CREATE FUNCTION bsonset_grp_size RETURNS INTEGER SONAME '$HA_CONNECT_SO'; --eval CREATE FUNCTION bsonget_grp_size RETURNS INTEGER SONAME '$HA_CONNECT_SO'; --eval CREATE AGGREGATE FUNCTION bson_array_grp RETURNS STRING SONAME '$HA_CONNECT_SO'; diff --git a/storage/connect/mysql-test/connect/t/bson_udf.test b/storage/connect/mysql-test/connect/t/bson_udf.test index 84a3db6d061..0da2de38864 100644 --- a/storage/connect/mysql-test/connect/t/bson_udf.test +++ b/storage/connect/mysql-test/connect/t/bson_udf.test @@ -77,6 +77,7 @@ SELECT Bson_Object_Values('{"One":1,"Two":2,"Three":3}') "Value List"; --echo # --echo # Test UDF's with column arguments --echo # +SELECT Bsonset_Def_Prec(2); CREATE TABLE t2 ( ISBN CHAR(15), diff --git a/storage/connect/mysql-test/connect/t/bson_udf2.inc b/storage/connect/mysql-test/connect/t/bson_udf2.inc index ceddf8b0632..d06d7fac435 100644 --- a/storage/connect/mysql-test/connect/t/bson_udf2.inc +++ b/storage/connect/mysql-test/connect/t/bson_udf2.inc @@ -13,6 +13,8 @@ DROP FUNCTION bson_object_add; DROP FUNCTION bson_object_delete; DROP FUNCTION bson_object_list; DROP FUNCTION bson_object_values; +DROP FUNCTION bsonset_def_prec; +DROP FUNCTION bsonget_def_prec; DROP FUNCTION bsonset_grp_size; DROP FUNCTION bsonget_grp_size; DROP FUNCTION bson_array_grp; diff --git a/storage/connect/tabbson.cpp b/storage/connect/tabbson.cpp index ba4380c5f89..db63b8e78db 100644 --- a/storage/connect/tabbson.cpp +++ b/storage/connect/tabbson.cpp @@ -719,7 +719,10 @@ void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp) if (jvp) { vp->SetNull(false); - switch (jvp->Type) { + if (Jb) { + vp->SetValue_psz(Serialize(g, jvp, NULL, 0)); + Jb = false; + } else switch (jvp->Type) { case TYPE_STRG: case TYPE_INTG: case TYPE_BINT: @@ -727,29 +730,29 @@ void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp) case TYPE_DTM: case TYPE_FLOAT: switch (vp->GetType()) { - case TYPE_STRING: - case TYPE_DATE: - case TYPE_DECIM: - vp->SetValue_psz(GetString(jvp)); - break; - case TYPE_INT: - case TYPE_SHORT: - case TYPE_TINY: - vp->SetValue(GetInteger(jvp)); - break; - case TYPE_BIGINT: - vp->SetValue(GetBigint(jvp)); - break; - case TYPE_DOUBLE: - vp->SetValue(GetDouble(jvp)); + case TYPE_STRING: + case TYPE_DATE: + case TYPE_DECIM: + vp->SetValue_psz(GetString(jvp)); + break; + case TYPE_INT: + case TYPE_SHORT: + case TYPE_TINY: + vp->SetValue(GetInteger(jvp)); + break; + case TYPE_BIGINT: + vp->SetValue(GetBigint(jvp)); + break; + case TYPE_DOUBLE: + vp->SetValue(GetDouble(jvp)); - if (jvp->Type == TYPE_DBL || jvp->Type == TYPE_FLOAT) - vp->SetPrec(jvp->Nd); + if (jvp->Type == TYPE_DBL || jvp->Type == TYPE_FLOAT) + vp->SetPrec(jvp->Nd); - break; - default: - sprintf(G->Message, "Unsupported column type %d", vp->GetType()); - throw 888; + break; + default: + sprintf(G->Message, "Unsupported column type %d", vp->GetType()); + throw 888; } // endswitch Type break; @@ -780,53 +783,59 @@ void BCUTIL::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp) /***********************************************************************/ /* MakeJson: Serialize the json item and set value to it. */ /***********************************************************************/ -PVAL BCUTIL::MakeBson(PGLOBAL g, PBVAL jsp) +PBVAL BCUTIL::MakeBson(PGLOBAL g, PBVAL jsp, int n) { - if (Cp->Value->IsTypeNum()) { - strcpy(g->Message, "Cannot make Json for a numeric column"); + PBVAL vlp, jvp = jsp; - if (!Cp->Warned) { - PushWarning(g, Tp); - Cp->Warned = true; - } // endif Warned + if (n < Cp->Nod - 1) { + if (jsp->Type == TYPE_JAR) { + int ars = GetArraySize(jsp); + PJNODE jnp = &Cp->Nodes[n]; - Cp->Value->Reset(); -#if 0 - } else if (Value->GetType() == TYPE_BIN) { - if ((unsigned)Value->GetClen() >= sizeof(BSON)) { - ulong len = Tjp->Lrecl ? Tjp->Lrecl : 500; - PBSON bsp = JbinAlloc(g, NULL, len, jsp); + jvp = NewVal(TYPE_JAR); + jnp->Op = OP_EQ; - strcat(bsp->Msg, " column"); - ((BINVAL*)Value)->SetBinValue(bsp, sizeof(BSON)); - } else { - strcpy(g->Message, "Column size too small"); - Value->SetValue_char(NULL, 0); - } // endif Clen -#endif // 0 - } else - Cp->Value->SetValue_psz(Serialize(g, jsp, NULL, 0)); + for (int i = 0; i < ars; i++) { + jnp->Rank = i; + vlp = GetRowValue(g, jsp, n); + AddArrayValue(jvp,DupVal(vlp)); + } // endfor i - return Cp->Value; -} // end of MakeJson + jnp->Op = OP_XX; + jnp->Rank = 0; + } else if (jsp->Type == TYPE_JOB) { + jvp = NewVal(TYPE_JOB); + + for (PBPR prp = GetObject(jsp); prp; prp = GetNext(prp)) { + vlp = GetRowValue(g, GetVlp(prp), n + 1); + SetKeyValue(jvp, vlp, MZP(prp->Key)); + } // endfor prp + + } // endif Type + + } // endif's + + Jb = true; + return jvp; +} // end of MakeBson /***********************************************************************/ -/* GetColumnValue: */ +/* GetRowValue: */ /***********************************************************************/ -PVAL BCUTIL::GetColumnValue(PGLOBAL g, PBVAL row, int i) +PBVAL BCUTIL::GetRowValue(PGLOBAL g, PBVAL row, int i) { int nod = Cp->Nod, n = nod - 1; JNODE *nodes = Cp->Nodes; - PVAL value = Cp->Value; PBVAL arp; PBVAL bvp = NULL; for (; i < nod && row; i++) { if (nodes[i].Op == OP_NUM) { - value->SetValue(row->Type == TYPE_JAR ? GetSize(row) : 1); - return(value); + bvp = NewVal(TYPE_INT); + bvp->N = (row->Type == TYPE_JAR) ? GetSize(row) : 1; + return(bvp); } else if (nodes[i].Op == OP_XX) { - return MakeBson(g, row); + return MakeBson(g, row, i); } else switch (row->Type) { case TYPE_JOB: if (!nodes[i].Key) { @@ -847,9 +856,9 @@ PVAL BCUTIL::GetColumnValue(PGLOBAL g, PBVAL row, int i) if (nodes[i].Op == OP_EQ) bvp = GetArrayValue(arp, nodes[i].Rank); else if (nodes[i].Op == OP_EXP) - return ExpandArray(g, arp, i); + return NewVal(ExpandArray(g, arp, i)); else - return CalculateArray(g, arp, i); + return NewVal(CalculateArray(g, arp, i)); } else { // Unexpected array, unwrap it as [0] @@ -871,6 +880,17 @@ PVAL BCUTIL::GetColumnValue(PGLOBAL g, PBVAL row, int i) } // endfor i + return bvp; +} // end of GetColumnValue + +/***********************************************************************/ +/* GetColumnValue: */ +/***********************************************************************/ +PVAL BCUTIL::GetColumnValue(PGLOBAL g, PBVAL row, int i) +{ + PVAL value = Cp->Value; + PBVAL bvp = GetRowValue(g, row, i); + SetJsonValue(g, value, bvp); return value; } // end of GetColumnValue diff --git a/storage/connect/tabbson.h b/storage/connect/tabbson.h index bb3f32bd945..adb02dd28e4 100644 --- a/storage/connect/tabbson.h +++ b/storage/connect/tabbson.h @@ -127,11 +127,13 @@ protected: class BCUTIL : public BTUTIL { public: // Constructor - BCUTIL(PGLOBAL G, PBSCOL cp, TDBBSN* tp) : BTUTIL(G, tp) { Cp = cp; } + BCUTIL(PGLOBAL G, PBSCOL cp, TDBBSN* tp) : BTUTIL(G, tp) + { Cp = cp; Jb = false; } // Utility functions void SetJsonValue(PGLOBAL g, PVAL vp, PBVAL jvp); - PVAL MakeBson(PGLOBAL g, PBVAL jsp); + PBVAL MakeBson(PGLOBAL g, PBVAL jsp, int n); + PBVAL GetRowValue(PGLOBAL g, PBVAL row, int i); PVAL GetColumnValue(PGLOBAL g, PBVAL row, int i); PVAL ExpandArray(PGLOBAL g, PBVAL arp, int n); PVAL CalculateArray(PGLOBAL g, PBVAL arp, int n); @@ -140,6 +142,7 @@ public: protected: // Member PBSCOL Cp; + bool Jb; }; // end of class BCUTIL /* -------------------------- TDBBSN class --------------------------- */ From 3edad542988a78b22034e7c42a9fc5e5334bb849 Mon Sep 17 00:00:00 2001 From: Mingli Yu Date: Mon, 25 Jan 2021 19:01:06 -0800 Subject: [PATCH 113/150] MDEV-24131: unittest stacktrace-t fails to compile (OpenBSD) This was because OpenBSD (and others) can be missing HAVE_BACKTRACE / HAVE_BACKTRACE_FD which is the condition for my_safe_print_str to be defined. Fixes: /prj/tmp/work/cortexa57-poky-linux-musl/mariadb/10.5.8-r0/recipe-sysroot-native/usr/bin/aarch64-poky-linux-musl/../../libexec/aarch64-poky-linux-musl/gcc/aarch64-poky-linux-musl/10.2.0/ld.bfd: /usr/src/debug/mariadb/10.5.8-r0/mariadb-10.5.8/unittest/mysys/stacktrace-t.c:36: undefined reference to `my_safe_print_str' Signed-off-by: Mingli Yu --- unittest/mysys/stacktrace-t.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/unittest/mysys/stacktrace-t.c b/unittest/mysys/stacktrace-t.c index 8fa0db15b36..d8408f80d76 100644 --- a/unittest/mysys/stacktrace-t.c +++ b/unittest/mysys/stacktrace-t.c @@ -29,6 +29,7 @@ void test_my_safe_print_str() memcpy(b_stack, "LEGAL", 6); memcpy(b_bss, "LEGAL", 6); +#ifdef HAVE_STACKTRACE #ifndef __SANITIZE_ADDRESS__ fprintf(stderr, "\n===== stack =====\n"); my_safe_print_str(b_stack, 65535); @@ -48,6 +49,7 @@ void test_my_safe_print_str() fprintf(stderr, "\n===== (const char*) 1 =====\n"); my_safe_print_str((const char*)1, 5); #endif /*__SANITIZE_ADDRESS__*/ +#endif /*HAVE_STACKTRACE*/ free(b_heap); From dd0b844a9cb30cc5adc1cfb57e75755d339323bf Mon Sep 17 00:00:00 2001 From: Stepan Patryshev Date: Wed, 27 Jan 2021 17:11:49 +0200 Subject: [PATCH 114/150] MDEV-24699: Added wait condition to make sure table t2 is replicated to node_1. --- mysql-test/suite/galera/t/galera_truncate.test | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mysql-test/suite/galera/t/galera_truncate.test b/mysql-test/suite/galera/t/galera_truncate.test index 3c3ee56a23f..f490943db7c 100644 --- a/mysql-test/suite/galera/t/galera_truncate.test +++ b/mysql-test/suite/galera/t/galera_truncate.test @@ -32,6 +32,9 @@ CREATE TABLE t2 (f1 VARCHAR(255)) Engine=InnoDB; INSERT INTO t2 VALUES ('abc'); --connection node_1 +--let $wait_condition = SELECT COUNT(*) = 1 FROM INFORMATION_SCHEMA.INNODB_SYS_TABLES WHERE NAME LIKE 'test/t2'; +--source include/wait_condition.inc + TRUNCATE TABLE t2; --connection node_2 From 20f6c403eb976a6dd25cb58d0ce17f6da2566253 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Lindstr=C3=B6m?= Date: Thu, 28 Jan 2021 10:31:57 +0200 Subject: [PATCH 115/150] MDEV-20717 : Plugin system variables and activation options can break "mysqld --wsrep_recover" Problem is that not all plugins are loaded when wsrep_recover is executed. Thus, we allow unknown system variables and extra system variables during wsrep_recover. Any unknown system variables would still be caught when the server starts up normally after the SST. --- sql/mysqld.cc | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 55ed5a6a680..4bfac5c20d1 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5417,6 +5417,10 @@ static int init_server_components() that there are unprocessed options. */ my_getopt_skip_unknown= 0; +#ifdef WITH_WSREP + if (wsrep_recovery) + my_getopt_skip_unknown= TRUE; +#endif if ((ho_error= handle_options(&remaining_argc, &remaining_argv, no_opts, mysqld_get_one_option))) @@ -5426,12 +5430,19 @@ static int init_server_components() remaining_argv--; my_getopt_skip_unknown= TRUE; - if (remaining_argc > 1) +#ifdef WITH_WSREP + if (!wsrep_recovery) { - fprintf(stderr, "%s: Too many arguments (first extra is '%s').\n", - my_progname, remaining_argv[1]); - unireg_abort(1); +#endif + if (remaining_argc > 1) + { + fprintf(stderr, "%s: Too many arguments (first extra is '%s').\n", + my_progname, remaining_argv[1]); + unireg_abort(1); + } +#ifdef WITH_WSREP } +#endif } if (opt_abort) From 3a89ae3364ce415caed2ab5b008a454ce6204f59 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Thu, 28 Jan 2021 11:22:54 +0100 Subject: [PATCH 116/150] last CC 3.1 --- libmariadb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libmariadb b/libmariadb index e3824422064..e62ff462c58 160000 --- a/libmariadb +++ b/libmariadb @@ -1 +1 @@ -Subproject commit e38244220646a7e95c9be22576460aa7a4eb715f +Subproject commit e62ff462c58ce154596a0f1da9e79cd4395396e3 From 33ede50f207552df835d7606f990fa9ccc4e0d12 Mon Sep 17 00:00:00 2001 From: Sergei Petrunia Date: Thu, 28 Jan 2021 20:46:13 +0300 Subject: [PATCH 117/150] MDEV-22251: get_key_scans_params: Conditional jump or move depends on uninitialised value Apply the patch based on the patch by Varun Gupta: PARAM::is_ror_scan might be used unitialized when check_quick_select() is invoked for a "degenerate" SEL_ARG tree (e.g. one having type SEL_ARG::IMPOSSIBLE). Make check_quick_select() always initialize PARAM::is_ror_scan. --- mysql-test/r/range.result | 17 +++++++++++++++++ mysql-test/r/range_mrr_icp.result | 17 +++++++++++++++++ mysql-test/t/range.test | 14 ++++++++++++++ sql/opt_range.cc | 1 + 4 files changed, 49 insertions(+) diff --git a/mysql-test/r/range.result b/mysql-test/r/range.result index 1d07cb04c06..6a3850c0ed9 100644 --- a/mysql-test/r/range.result +++ b/mysql-test/r/range.result @@ -3184,5 +3184,22 @@ SELECT * FROM t1 JOIN t2 ON (t2.code = t1.b) WHERE t1.a NOT IN ('baz', 'qux') OR id a b code num DROP TABLE t1, t2; # +# MDEV-22251: get_key_scans_params: Conditional jump or move depends on uninitialised value +# +create table t1 (pk int, i int, v int, primary key (pk), key(v)); +insert into t1 (pk,i,v) values (1,1,2),(2,2,4),(3,3,6),(4,4,8),(5,5,10),(6,6,12),(7,7,14),(8,8,16); +create table t2 (a int, b int); +insert into t2 values (1,2),(2,4); +EXPLAIN +select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 +1 SIMPLE t1 ALL PRIMARY,v NULL NULL NULL 8 Range checked for each record (index map: 0x3) +select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk); +pk i v a b +1 1 2 1 2 +2 2 4 2 4 +drop table t1, t2; +# # End of 10.2 tests # diff --git a/mysql-test/r/range_mrr_icp.result b/mysql-test/r/range_mrr_icp.result index f3203fea73d..24f42f34ce5 100644 --- a/mysql-test/r/range_mrr_icp.result +++ b/mysql-test/r/range_mrr_icp.result @@ -3196,6 +3196,23 @@ SELECT * FROM t1 JOIN t2 ON (t2.code = t1.b) WHERE t1.a NOT IN ('baz', 'qux') OR id a b code num DROP TABLE t1, t2; # +# MDEV-22251: get_key_scans_params: Conditional jump or move depends on uninitialised value +# +create table t1 (pk int, i int, v int, primary key (pk), key(v)); +insert into t1 (pk,i,v) values (1,1,2),(2,2,4),(3,3,6),(4,4,8),(5,5,10),(6,6,12),(7,7,14),(8,8,16); +create table t2 (a int, b int); +insert into t2 values (1,2),(2,4); +EXPLAIN +select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk); +id select_type table type possible_keys key key_len ref rows Extra +1 SIMPLE t2 ALL NULL NULL NULL NULL 2 +1 SIMPLE t1 ALL PRIMARY,v NULL NULL NULL 8 Range checked for each record (index map: 0x3) +select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk); +pk i v a b +1 1 2 1 2 +2 2 4 2 4 +drop table t1, t2; +# # End of 10.2 tests # set optimizer_switch=@mrr_icp_extra_tmp; diff --git a/mysql-test/t/range.test b/mysql-test/t/range.test index 2f55889afec..890377ed977 100644 --- a/mysql-test/t/range.test +++ b/mysql-test/t/range.test @@ -2217,6 +2217,20 @@ SELECT * FROM t1 JOIN t2 ON (t2.code = t1.b) WHERE t1.a NOT IN ('baz', 'qux') OR DROP TABLE t1, t2; + +--echo # +--echo # MDEV-22251: get_key_scans_params: Conditional jump or move depends on uninitialised value +--echo # + +create table t1 (pk int, i int, v int, primary key (pk), key(v)); +insert into t1 (pk,i,v) values (1,1,2),(2,2,4),(3,3,6),(4,4,8),(5,5,10),(6,6,12),(7,7,14),(8,8,16); +create table t2 (a int, b int); +insert into t2 values (1,2),(2,4); +EXPLAIN +select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk); +select * from t1 inner join t2 on ( t2.b = t1.v or t2.a = t1.pk); +drop table t1, t2; + --echo # --echo # End of 10.2 tests --echo # diff --git a/sql/opt_range.cc b/sql/opt_range.cc index 7785c768fbc..f3f184367c9 100644 --- a/sql/opt_range.cc +++ b/sql/opt_range.cc @@ -10385,6 +10385,7 @@ ha_rows check_quick_select(PARAM *param, uint idx, bool index_only, uint keynr= param->real_keynr[idx]; DBUG_ENTER("check_quick_select"); + param->is_ror_scan= FALSE; /* Handle cases when we don't have a valid non-empty list of range */ if (!tree) DBUG_RETURN(HA_POS_ERROR); From 848a1a613c103209e4f2fb5d02572237d46832a1 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Thu, 28 Jan 2021 19:54:24 +0100 Subject: [PATCH 118/150] Fix decimal problems in bson udf's --- storage/connect/bson.cpp | 46 +++--- storage/connect/bsonudf.cpp | 156 +++++++++--------- .../connect/mysql-test/connect/r/bson.result | 2 +- .../mysql-test/connect/r/bson_udf.result | 48 ++---- 4 files changed, 115 insertions(+), 137 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index 5731ce9eac5..6c979498286 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -1501,31 +1501,27 @@ double BJSON::GetDouble(PBVAL vp) PBVAL vlp = (vp->Type == TYPE_JVAL) ? MVP(vp->To_Val) : vp; switch (vlp->Type) { - case TYPE_DBL: - d = *(double*)MP(vlp->To_Val); - break; - case TYPE_BINT: - d = (double)*(longlong*)MP(vlp->To_Val); - break; - case TYPE_INTG: - d = (double)vlp->N; - break; - case TYPE_FLOAT: - { char buf[32]; - int n = (vlp->Nd) ? vlp->Nd : 5; - - sprintf(buf, "%.*f", n, vlp->F); - d = atof(buf); - } break; - case TYPE_DTM: - case TYPE_STRG: - d = atof(MZP(vlp->To_Val)); - break; - case TYPE_BOOL: - d = (vlp->B) ? 1.0 : 0.0; - break; - default: - d = 0.0; + case TYPE_DBL: + d = *(double*)MP(vlp->To_Val); + break; + case TYPE_BINT: + d = (double)*(longlong*)MP(vlp->To_Val); + break; + case TYPE_INTG: + d = (double)vlp->N; + break; + case TYPE_FLOAT: + d = (double)vlp->F; + break; + case TYPE_DTM: + case TYPE_STRG: + d = atof(MZP(vlp->To_Val)); + break; + case TYPE_BOOL: + d = (vlp->B) ? 1.0 : 0.0; + break; + default: + d = 0.0; } // endswitch Type return d; diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index dd9f95bc4ba..b8f4f9699d4 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -116,7 +116,7 @@ BJNX::BJNX(PGLOBAL g) : BDOC(g) Value = NULL; MulVal = NULL; Jpath = NULL; - Buf_Type = TYPE_NULL; + Buf_Type = TYPE_STRING; Long = len; Prec = 0; Nod = 0; @@ -171,10 +171,9 @@ BJNX::BJNX(PGLOBAL g, PBVAL row, int type, int len, int prec, my_bool wr) : BDOC my_bool BJNX::SetJpath(PGLOBAL g, char* path, my_bool jb) { // Check Value was allocated - if (!Value) - return true; + if (Value) + Value->SetNullable(true); - Value->SetNullable(true); Jpath = path; // Parse the json path @@ -697,8 +696,10 @@ PVAL BJNX::GetCalcValue(PGLOBAL g, PBVAL bap, int n) if (IsTypeChar(Buf_Type)) { type = TYPE_DOUBLE; prec = 2; - } else + } else { type = Buf_Type; + prec = GetPrecision(); + } // endif Buf_Type break; case OP_MIN: @@ -711,7 +712,7 @@ PVAL BJNX::GetCalcValue(PGLOBAL g, PBVAL bap, int n) type = TYPE_STRING; if (IsTypeChar(Buf_Type)) { - lng = Long; + lng = (Long) ? Long : 512; prec = GetPrecision(); } else lng = 512; @@ -740,79 +741,87 @@ PVAL BJNX::CalculateArray(PGLOBAL g, PBVAL bap, int n) vp->Reset(); xtrc(1, "CalculateArray size=%d op=%d\n", ars, op); - for (i = 0; i < ars; i++) { - bvrp = GetArrayValue(bap, i); - xtrc(1, "i=%d nv=%d\n", i, nv); + try { + for (i = 0; i < ars; i++) { + bvrp = GetArrayValue(bap, i); + xtrc(1, "i=%d nv=%d\n", i, nv); - if (!IsValueNull(bvrp) || (op == OP_CNC && GetJsonNull())) { - if (IsValueNull(bvrp)) { - SetString(bvrp, NewStr(GetJsonNull()), 0); - bvp = bvrp; - } else if (n < Nod - 1 && IsJson(bvrp)) { - SetValue(&bval, GetColumnValue(g, bvrp, n + 1)); - bvp = &bval; - } else - bvp = bvrp; + if (!IsValueNull(bvrp) || (op == OP_CNC && GetJsonNull())) { + if (IsValueNull(bvrp)) { + SetString(bvrp, NewStr(GetJsonNull()), 0); + bvp = bvrp; + } else if (n < Nod - 1 && IsJson(bvrp)) { + SetValue(&bval, GetColumnValue(g, bvrp, n + 1)); + bvp = &bval; + } else + bvp = bvrp; - if (trace(1)) - htrc("bvp=%s null=%d\n", - GetString(bvp), IsValueNull(bvp) ? 1 : 0); + if (trace(1)) + htrc("bvp=%s null=%d\n", + GetString(bvp), IsValueNull(bvp) ? 1 : 0); - if (!nv++) { - SetJsonValue(g, vp, bvp); - continue; - } else - SetJsonValue(g, mulval, bvp); + if (!nv++) { + SetJsonValue(g, vp, bvp); + continue; + } else + SetJsonValue(g, mulval, bvp); - if (!mulval->IsNull()) { - switch (op) { - case OP_CNC: - if (Nodes[n].CncVal) { - val[0] = Nodes[n].CncVal; + if (!mulval->IsNull()) { + switch (op) { + case OP_CNC: + if (Nodes[n].CncVal) { + val[0] = Nodes[n].CncVal; + err = vp->Compute(g, val, 1, op); + } // endif CncVal + + val[0] = mulval; err = vp->Compute(g, val, 1, op); - } // endif CncVal + break; + // case OP_NUM: + case OP_SEP: + val[0] = vp; + val[1] = mulval; + err = vp->Compute(g, val, 2, OP_ADD); + break; + default: + val[0] = vp; + val[1] = mulval; + err = vp->Compute(g, val, 2, op); + } // endswitch Op - val[0] = mulval; - err = vp->Compute(g, val, 1, op); - break; - // case OP_NUM: - case OP_SEP: - val[0] = vp; - val[1] = mulval; - err = vp->Compute(g, val, 2, OP_ADD); - break; - default: - val[0] = vp; - val[1] = mulval; - err = vp->Compute(g, val, 2, op); - } // endswitch Op + if (err) + vp->Reset(); - if (err) - vp->Reset(); + if (trace(1)) { + char buf(32); - if (trace(1)) { - char buf(32); + htrc("vp='%s' err=%d\n", + vp->GetCharString(&buf), err ? 1 : 0); + } // endif trace - htrc("vp='%s' err=%d\n", - vp->GetCharString(&buf), err ? 1 : 0); - } // endif trace + } // endif Zero - } // endif Zero + } // endif jvrp - } // endif jvrp + } // endfor i - } // endfor i + if (op == OP_SEP) { + // Calculate average + mulval->SetValue(nv); + val[0] = vp; + val[1] = mulval; - if (op == OP_SEP) { - // Calculate average - mulval->SetValue(nv); - val[0] = vp; - val[1] = mulval; + if (vp->Compute(g, val, 2, OP_DIV)) + vp->Reset(); - if (vp->Compute(g, val, 2, OP_DIV)) - vp->Reset(); + } // endif Op - } // endif Op + } catch (int n) { + xtrc(1, "Exception %d: %s\n", n, g->Message); + PUSH_WARNING(g->Message); + } catch (const char* msg) { + strcpy(g->Message, msg); + } // end catch return vp; } // end of CalculateArray @@ -4024,8 +4033,8 @@ double bsonget_real(UDF_INIT *initid, UDF_ARGS *args, char *p, *path; double d; PBVAL jsp, jvp; - PBJNX bxp = NULL; PGLOBAL g = (PGLOBAL)initid->ptr; + BJNX bnx(g); if (g->N) { if (!g->Activityp) { @@ -4044,8 +4053,6 @@ double bsonget_real(UDF_INIT *initid, UDF_ARGS *args, *is_null = 1; return 0.0; } else { - BJNX bnx(g); - jvp = bnx.MakeValue(args, 0); if ((p = bnx.GetString(jvp))) { @@ -4068,21 +4075,22 @@ double bsonget_real(UDF_INIT *initid, UDF_ARGS *args, jsp = (PBVAL)g->Xchk; path = MakePSZ(g, args, 1); - bxp = new(g) BJNX(g, jsp, TYPE_DOUBLE); +//bxp = new(g) BJNX(g, jsp, TYPE_DOUBLE, 32, jsp->Nd); - if (bxp->SetJpath(g, path)) { + if (bnx.SetJpath(g, path)) { PUSH_WARNING(g->Message); *is_null = 1; return 0.0; } else - bxp->ReadValue(g); + jvp = bnx.GetRowValue(g, jsp, 0); - if (bxp->GetValue()->IsNull()) { + if (!jvp || bnx.IsValueNull(jvp)) { *is_null = 1; return 0.0; - } // endif IsNull - - d = bxp->GetValue()->GetFloatValue(); + } else if (args->arg_count == 2) { + d = atof(bnx.GetString(jvp)); + } else + d = bnx.GetDouble(jvp); if (initid->const_item) { // Keep result of constant function diff --git a/storage/connect/mysql-test/connect/r/bson.result b/storage/connect/mysql-test/connect/r/bson.result index 98abdbb8744..fd15e020aac 100644 --- a/storage/connect/mysql-test/connect/r/bson.result +++ b/storage/connect/mysql-test/connect/r/bson.result @@ -279,7 +279,7 @@ SELECT * FROM t1; WHO WEEKS SUMS SUM AVGS SUMAVG AVGSUM AVGAVG AVERAGE Joe 3, 4, 5 69.00+83.00+26.00 178.00 17.25+16.60+13.00 46.85 59.33 15.62 16.18 Beth 3, 4, 5 16.00+32.00+32.00 80.00 16.00+16.00+16.00 48.00 26.67 16.00 16.00 -Janet 3, 4, 5 55.00+17.00+57.00 129.00 18.33+17.00+14.25 49.58 43.00 16.53 16.13 +Janet 3, 4, 5 55.00+17.00+57.00 129.00 18.33+17.00+14.25 49.58 43.00 16.53 16.12 DROP TABLE t1; # # Expand expense in 3 one week tables diff --git a/storage/connect/mysql-test/connect/r/bson_udf.result b/storage/connect/mysql-test/connect/r/bson_udf.result index 14b3629e4b1..4ec1f0c87fd 100644 --- a/storage/connect/mysql-test/connect/r/bson_udf.result +++ b/storage/connect/mysql-test/connect/r/bson_udf.result @@ -606,14 +606,12 @@ Bson_File('test/fx.json', 0) [{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]},{"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]},{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]},{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":40},"ratings":[5,8,7]}] SELECT Bson_File('test/fx.json', '0'); Bson_File('test/fx.json', '0') -NULL -Warnings: -Warning 1105 +{"_id":5,"type":"food","item":"beer","taste":"light","price":5.65,"ratings":[5,8,9]} SELECT Bson_File('test/fx.json', '[?]'); Bson_File('test/fx.json', '[?]') NULL Warnings: -Warning 1105 +Warning 1105 Invalid function specification ? SELECT BsonGet_String(Bson_File('test/fx.json'), '1.*'); BsonGet_String(Bson_File('test/fx.json'), '1.*') {"_id":6,"type":"car","item":"roadster","mileage":56000,"ratings":[6,9]} @@ -628,57 +626,33 @@ Price 5.65 SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings'); Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings') -NULL -Warnings: -Warning 1105 -Warning 1105 No sub-item at 'ratings' +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4,6]} SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings'); Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 1, 'ratings') -NULL -Warnings: -Warning 1105 -Warning 1105 No sub-item at 'ratings' +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,6,4]} SELECT Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1); Bson_Array_Add(Bson_File('test/fx.json', '2'), 6, 'ratings', 1) -NULL -Warnings: -Warning 1105 -Warning 1105 No sub-item at 'ratings' +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,6,4]} SELECT Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0); Bson_Array_Add(Bson_File('test/fx.json', '2.ratings'), 6, 0) -[6,null] -Warnings: -Warning 1105 +[6,2,4] SELECT Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1); Bson_Array_Delete(Bson_File('test/fx.json', '2'), 'ratings', 1) -NULL -Warnings: -Warning 1105 -Warning 1105 No sub-item at 'ratings' +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2]} SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin); Bson_Object_Add(Bson_File('test/fx.json', '2'), 'france' origin) -NULL -Warnings: -Warning 1105 -Warning 1105 First argument target is not an object +{"_id":7,"type":"food","item":"meat","origin":"france","ratings":[2,4]} SELECT Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size'); Bson_Object_Add(Bson_File('test/fx.json', '2'), 70 H, 'size') -NULL +{"_id":7,"type":"food","item":"meat","origin":"argentina","ratings":[2,4]} Warnings: -Warning 1105 Warning 1105 No sub-item at 'size' SELECT Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size'); Bson_Object_Add(Bson_File('test/fx.json', '3'), 70 H, 'size') -NULL -Warnings: -Warning 1105 -Warning 1105 No sub-item at 'size' +{"_id":8,"type":"furniture","item":"table","size":{"W":60,"L":80,"H":70},"ratings":[5,8,7]} SELECT Bson_Object_List(Bson_File('test/fx.json', '3.size')); Bson_Object_List(Bson_File('test/fx.json', '3.size')) -NULL -Warnings: -Warning 1105 -Warning 1105 First argument is not an object +["W","L","H"] # # Testing new functions # From 17867608a2c3a13c909a2362ec5ee2a5a41547c1 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Fri, 29 Jan 2021 11:18:06 +0100 Subject: [PATCH 119/150] ASAN heap-use-after-free in Item_exists_subselect::is_top_level_item check that we can do type casting --- sql/item_cmpfunc.cc | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/sql/item_cmpfunc.cc b/sql/item_cmpfunc.cc index d16c7413f0a..d5b89f13f04 100644 --- a/sql/item_cmpfunc.cc +++ b/sql/item_cmpfunc.cc @@ -1234,7 +1234,9 @@ longlong Item_func_truth::val_int() bool Item_in_optimizer::is_top_level_item() { - return ((Item_in_subselect *)args[1])->is_top_level_item(); + if (!invisible_mode()) + return ((Item_in_subselect *)args[1])->is_top_level_item(); + return false; } From 40868c4765383549d1fe5650e6bce1ca5078b875 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Thu, 28 Jan 2021 18:30:32 +0100 Subject: [PATCH 120/150] fix warnings returned by gcc v10.0 --- storage/connect/bson.cpp | 5 ++--- storage/connect/bson.h | 2 +- storage/connect/bsonudf.cpp | 8 +++++--- storage/connect/filamtxt.cpp | 2 +- storage/connect/jsonudf.cpp | 4 +++- 5 files changed, 12 insertions(+), 9 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index fc58303a73f..7728d488a00 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -1199,15 +1199,14 @@ void BJSON::SetArrayValue(PBVAL bap, PBVAL nvp, int n) { CheckType(bap, TYPE_JAR); int i = 0; - PBVAL bvp = NULL, pvp = NULL; + PBVAL bvp = NULL; if (bap->To_Val) for (bvp = GetArray(bap); bvp; i++, bvp = GetNext(bvp)) if (i == n) { SetValueVal(bvp, nvp); return; - } else - pvp = bvp; + } if (!bvp) AddArrayValue(bap, MOF(nvp)); diff --git a/storage/connect/bson.h b/storage/connect/bson.h index 6eb6c019c1a..5420c6f2f36 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -105,7 +105,7 @@ public: PSZ GetArrayText(PGLOBAL g, PBVAL bap, PSTRG text); void MergeArray(PBVAL bap1,PBVAL bap2); bool DeleteValue(PBVAL bap, int n); - void AddArrayValue(PBVAL bap, OFFSET nvp = NULL, int* x = NULL); + void AddArrayValue(PBVAL bap, OFFSET nvp = 0, int* x = NULL); inline void AddArrayValue(PBVAL bap, PBVAL nvp = NULL, int* x = NULL) {AddArrayValue(bap, MOF(nvp), x);} void SetArrayValue(PBVAL bap, PBVAL nvp, int n); diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 719b7d7509a..533d641219e 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -30,6 +30,8 @@ int IsArgJson(UDF_ARGS* args, uint i); void SetChanged(PBSON bsp); +static PBSON BbinAlloc(PGLOBAL g, ulong len, PBVAL jsp); + /* --------------------------------- JSON UDF ---------------------------------- */ /*********************************************************************************/ @@ -50,7 +52,7 @@ inline void JsonFreeMem(PGLOBAL g) { /*********************************************************************************/ /* Allocate and initialize a BSON structure. */ /*********************************************************************************/ -PBSON BbinAlloc(PGLOBAL g, ulong len, PBVAL jsp) +static PBSON BbinAlloc(PGLOBAL g, ulong len, PBVAL jsp) { PBSON bsp = (PBSON)PlgDBSubAlloc(g, NULL, sizeof(BSON)); @@ -415,7 +417,7 @@ PSZ BJNX::MakeKey(UDF_ARGS *args, int i) } // endif *s if (n < 1) - return NewStr("Key"); + return NewStr((PSZ)"Key"); if (!b) { p = (PSZ)BsonSubAlloc(n + 1); @@ -429,7 +431,7 @@ PSZ BJNX::MakeKey(UDF_ARGS *args, int i) return NewStr((PSZ)s); } // endif count - return NewStr("Key"); + return NewStr((PSZ)"Key"); } // end of MakeKey /*********************************************************************************/ diff --git a/storage/connect/filamtxt.cpp b/storage/connect/filamtxt.cpp index 3c2c49de8b7..30ce19a5d37 100644 --- a/storage/connect/filamtxt.cpp +++ b/storage/connect/filamtxt.cpp @@ -1734,7 +1734,7 @@ bool BINFAM::OpenTableFile(PGLOBAL g) { /*********************************************************************/ return AllocateBuffer(g); } // end of OpenTableFile -#endif 0 +#endif // 0 /***********************************************************************/ /* Allocate the line buffer. For mode Delete a bigger buffer has to */ diff --git a/storage/connect/jsonudf.cpp b/storage/connect/jsonudf.cpp index 53818cbe00b..6669b7af5d8 100644 --- a/storage/connect/jsonudf.cpp +++ b/storage/connect/jsonudf.cpp @@ -34,6 +34,8 @@ static char *bin_handle_item(UDF_INIT *initid, UDF_ARGS *args, char *result, static PJSON JsonNew(PGLOBAL g, JTYP type); static PJVAL JvalNew(PGLOBAL g, JTYP type, void *vp = NULL); static PJSNX JsnxNew(PGLOBAL g, PJSON jsp, int type, int len = 64); +uint GetJsonGroupSize(void); +static void SetChanged(PBSON bsp); uint JsonGrpSize = 10; @@ -1155,7 +1157,7 @@ PBSON JbinAlloc(PGLOBAL g, UDF_ARGS *args, ulong len, PJSON jsp) /*********************************************************************************/ /* Set the BSON chain as changed. */ /*********************************************************************************/ -void SetChanged(PBSON bsp) +static void SetChanged(PBSON bsp) { if (bsp->Bsp) SetChanged(bsp->Bsp); From 496f7090a825ac7ee54a6b5f9700e5f261e4bce0 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Fri, 29 Jan 2021 12:35:17 +0100 Subject: [PATCH 121/150] Fix of warnings on aarch64 like: bson.cpp:1775:3: error: case label value is less than minimum value for type [-Werror] case TYPE_NULL: bson.cpp:1776:7: error: statement will never be executed [-Werror=switch-unreachable] b = true; --- storage/connect/bson.cpp | 6 +++--- storage/connect/bsonudf.cpp | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index 7728d488a00..f6a8db67d42 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -767,7 +767,7 @@ bool BDOC::SerializeValue(PBVAL jvp, bool b) case TYPE_DBL: sprintf(buf, "%.*lf", jvp->Nd, *(double*)MakePtr(Base, jvp->To_Val)); return jp->WriteStr(buf); - case TYPE_NULL: + case (char)TYPE_NULL: return jp->WriteStr("null"); case TYPE_JVAL: return SerializeValue(MVP(jvp->To_Val)); @@ -1557,7 +1557,7 @@ PSZ BJSON::GetString(PBVAL vp, char* buff) case TYPE_BOOL: p = (PSZ)((vlp->B) ? "true" : "false"); break; - case TYPE_NULL: + case (char)TYPE_NULL: p = (PSZ)"null"; break; default: @@ -1772,7 +1772,7 @@ bool BJSON::IsValueNull(PBVAL vlp) bool b; switch (vlp->Type) { - case TYPE_NULL: + case (char)TYPE_NULL: b = true; break; case TYPE_JOB: diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 533d641219e..4145e21deb5 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -491,7 +491,7 @@ void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) case TYPE_JOB: vp->SetValue_psz(GetObjectText(g, vlp, NULL)); break; - case TYPE_NULL: + case (char)TYPE_NULL: vp->SetNull(true); default: vp->Reset(); @@ -1320,7 +1320,7 @@ my_bool BJNX::CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2) b = (v1->B == v2->B); break; - case TYPE_NULL: + case (char)TYPE_NULL: b = (v2->Type == TYPE_NULL); break; default: From 9c84852809214e97cf91327a798204c1b745881e Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Fri, 29 Jan 2021 12:42:34 +0100 Subject: [PATCH 122/150] fix of warning on windows --- storage/connect/mysql-test/connect/r/xml.result | 3 +-- storage/connect/mysql-test/connect/t/xml.test | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/storage/connect/mysql-test/connect/r/xml.result b/storage/connect/mysql-test/connect/r/xml.result index 599c35cb1ed..575c903bbbc 100644 --- a/storage/connect/mysql-test/connect/r/xml.result +++ b/storage/connect/mysql-test/connect/r/xml.result @@ -374,8 +374,7 @@ INSERT INTO t1 VALUES (_cp1251 0xC0C1C2C3); Warnings: Level Warning Code 1105 -Message Com error: Impossible d'enregistrer le caractre dans le codage iso-8859-1. - +Message warning about characters outside of iso-8859-1 INSERT INTO t1 VALUES ('&<>"\''); SELECT node, hex(node) FROM t1; node &<>"' diff --git a/storage/connect/mysql-test/connect/t/xml.test b/storage/connect/mysql-test/connect/t/xml.test index 0fdf8e90b6e..e837ec79604 100644 --- a/storage/connect/mysql-test/connect/t/xml.test +++ b/storage/connect/mysql-test/connect/t/xml.test @@ -300,6 +300,7 @@ CREATE TABLE t1 (node VARCHAR(50)) ENGINE=connect TABLE_TYPE=xml FILE_NAME='t1.xml' OPTION_LIST='xmlsup=domdoc,rownode=line,encoding=iso-8859-1'; INSERT INTO t1 VALUES (_latin1 0xC0C1C2C3); +--replace_regex /.*iso-8859-1.*/warning about characters outside of iso-8859-1/ INSERT INTO t1 VALUES (_cp1251 0xC0C1C2C3); INSERT INTO t1 VALUES ('&<>"\''); SELECT node, hex(node) FROM t1; From 7ab30f5d95e1214e52b631d01a21494c6eb33c2f Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 29 Jan 2021 15:45:08 +0100 Subject: [PATCH 123/150] Update bson_get_item modified: bsonudf.cpp --- storage/connect/bsonudf.cpp | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index b8f4f9699d4..0a9d3813aeb 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -3693,8 +3693,8 @@ char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, { char *path, *str = NULL; PBVAL jvp; - PBJNX bxp = NULL; PGLOBAL g = (PGLOBAL)initid->ptr; + BJNX bnx(g, NULL, TYPE_STRING, initid->max_length); if (g->N) { str = (char*)g->Activityp; @@ -3707,8 +3707,6 @@ char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, PUSH_WARNING("CheckMemory error"); goto fin; } else { - BJNX bnx(g); - jvp = bnx.MakeValue(args, 0, true); if (g->Mrr) { // First argument is a constant @@ -3722,16 +3720,16 @@ char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, jvp = (PBVAL)g->Xchk; path = MakePSZ(g, args, 1); - bxp = new(g) BJNX(g, jvp, TYPE_STRING, initid->max_length); - if (bxp->SetJpath(g, path, true)) { - PUSH_WARNING(g->Message); + if (bnx.SetJpath(g, path, true)) { goto fin; } else - bxp->ReadValue(g); + jvp = bnx.GetRowValue(g, jvp, 0); - if (!bxp->GetValue()->IsNull()) - str = bxp->GetValue()->GetCharValue(); + if (!bnx.IsJson(jvp)) { + strcpy(g->Message, "Not a Json item"); + } else + str = bnx.Serialize(g, jvp, NULL, 0); if (initid->const_item) // Keep result of constant function @@ -3739,6 +3737,7 @@ char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, fin: if (!str) { + PUSH_WARNING(g->Message); *is_null = 1; *res_length = 0; } else From 4a4171286654cfc43d958ef1a26ceb37333c5623 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vicen=C8=9Biu=20Ciorbaru?= Date: Thu, 28 Jan 2021 23:29:43 +0200 Subject: [PATCH 124/150] Skip TokuDB within autobake-deb.sh Skipping the package within debian/rules won't work because starting with Debian 10, the helper scripts read the control file before the recipe. --- debian/autobake-deb.sh | 6 ++++++ debian/rules | 5 ----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/debian/autobake-deb.sh b/debian/autobake-deb.sh index ddef69bb350..5cf2398575a 100755 --- a/debian/autobake-deb.sh +++ b/debian/autobake-deb.sh @@ -91,6 +91,12 @@ then sed '/Package: mariadb-plugin-rocksdb/,/^$/d' -i debian/control fi +## Skip TokuDB if arch is not amd64 +if [[ ! $(dpkg-architecture -q DEB_BUILD_ARCH) =~ amd64 ]] +then + sed '/Package: mariadb-plugin-tokudb/,/^$/d' -i debian/control +fi + # Always remove aws plugin, see -DNOT_FOR_DISTRIBUTION in CMakeLists.txt sed '/Package: mariadb-plugin-aws-key-management-10.2/,/^$/d' -i debian/control diff --git a/debian/rules b/debian/rules index 809911d4179..35d5adf3f8a 100755 --- a/debian/rules +++ b/debian/rules @@ -112,11 +112,6 @@ override_dh_auto_install: dh_testdir dh_testroot -# Skip TokuDB if arch is not amd64 -ifneq ($(ARCH), amd64) - sed -i -e "/Package: mariadb-plugin-tokudb/,+16d" debian/control -endif - # Copy systemd files to a location available for dh_installinit cp $(BUILDDIR)/support-files/mariadb.service debian/mariadb-server-10.2.mariadb.service cp $(BUILDDIR)/support-files/mariadb@.service debian/mariadb-server-10.2.mariadb@.service From c2aecb05751ee7319722cbe4c65b974e1f10fbbb Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Sat, 30 Jan 2021 12:07:37 +0100 Subject: [PATCH 125/150] Fix failed test bson and xml --- storage/connect/bson.cpp | 1 - storage/connect/bson.h | 3 +-- storage/connect/mysql-test/connect/r/xml.result | 2 +- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index 6c979498286..a494cf3166b 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -886,7 +886,6 @@ PBPR BJSON::NewPair(OFFSET key, int type) PBPR bpp = (PBPR)BsonSubAlloc(sizeof(BPAIR)); bpp->Key = key; - bpp->Vlp.Ktp = TYPE_STRG; bpp->Vlp.Type = type; bpp->Vlp.To_Val = 0; bpp->Vlp.Nd = 0; diff --git a/storage/connect/bson.h b/storage/connect/bson.h index 32a9c49b00a..4da86aeac67 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -37,8 +37,7 @@ typedef struct _jvalue { bool B; // A boolean value True or false (0) }; short Nd; // Number of decimals - char Type; // The value type - char Ktp; // The key type + short Type; // The value type OFFSET Next; // Offset to the next value in array } BVAL, *PBVAL; // end of struct BVALUE diff --git a/storage/connect/mysql-test/connect/r/xml.result b/storage/connect/mysql-test/connect/r/xml.result index 599c35cb1ed..6a0c9db27b3 100644 --- a/storage/connect/mysql-test/connect/r/xml.result +++ b/storage/connect/mysql-test/connect/r/xml.result @@ -374,7 +374,7 @@ INSERT INTO t1 VALUES (_cp1251 0xC0C1C2C3); Warnings: Level Warning Code 1105 -Message Com error: Impossible d'enregistrer le caractre dans le codage iso-8859-1. +Message Com error: Unable to save character to 'iso-8859-1' encoding. INSERT INTO t1 VALUES ('&<>"\''); SELECT node, hex(node) FROM t1; From ff5186fd2b1ed06ed40fe3d817b41326e3f943d5 Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Fri, 29 Jan 2021 19:31:07 +0200 Subject: [PATCH 126/150] List of unstable tests for 10.2.37 release Test code modifications and new failures from buildbot only registered for the main suite. The rest was updated partially, based on the status of existing JIRA items --- mysql-test/unstable-tests | 294 ++++++++++++++------------------------ 1 file changed, 111 insertions(+), 183 deletions(-) diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests index 7eac636dc5d..286c99cba14 100644 --- a/mysql-test/unstable-tests +++ b/mysql-test/unstable-tests @@ -23,28 +23,31 @@ # ############################################################################## # -# Based on bb-10.2-release 72eea39d4c4a8bcadccfdac457e61abc7b618ff8 -# Fri Oct 30 12:58:16 2020 +0200 -# MDEV-23991 fixup: Initialize the memory +# Based on 10.2 9c84852809214e97cf91327a798204c1b745881e +# for main suite changes and failures, and +# bb-10.2-release 72eea39d4c4a8bcadccfdac457e61abc7b618ff8 +# for the rest -main.alter_table : Modified in 10.2.35 -main.alter_table_trans : MDEV-12084 - timeout +main.alter_table_trans : MDEV-12084 - Timeout main.analyze_stmt_slow_query_log : MDEV-12237 - Wrong result -main.aria_icp_debug : Added in 10.2.35 main.auth_named_pipe : MDEV-14724 - System error 2 -main.blackhole : Modified in 10.2.35 -main.bootstrap_innodb : Added in 10.2.35 main.connect : MDEV-17282 - Wrong result main.connect2 : MDEV-13885 - Server crash -main.count_distinct2 : MDEV-11768 - timeout +main.create : Modified in 10.2.37 main.create_delayed : MDEV-10605 - failed with timeout main.create_drop_event : MDEV-16271 - Wrong result +main.cte_nonrecursive : Modified in 10.2.37 +main.cte_nonrecursive_not_embedded : Added in 10.2.37 +main.cte_recursive : Modified in 10.2.37 main.ctype_ucs : MDEV-17681 - Data too long for column main.ctype_upgrade : MDEV-16945 - Error upon mysql_upgrade main.ctype_utf16 : MDEV-10675: timeout or extra warnings -main.ctype_utf8 : Modified in 10.2.35 +main.ctype_utf8mb4 : Modified in 10.2.37 +main.ctype_utf8mb4_heap : Include file modified in 10.2.37 +main.ctype_utf8mb4_innodb : Include file modified in 10.2.37 +main.ctype_utf8mb4_myisam : Include file modified in 10.2.37 main.debug_sync : MDEV-10607 - internal error -main.derived_opt : MDEV-11768 - timeout +main.derived_cond_pushdown : Modified in 10.2.37 main.dirty_close : MDEV-19368 - mysqltest failed but provided no output main.distinct : MDEV-14194 - Crash main.drop_bad_db_type : MDEV-15676 - Wrong result @@ -52,104 +55,92 @@ main.events_2 : MDEV-13277 - Crash main.events_bugs : MDEV-12892 - Crash main.events_restart : MDEV-12236 - Server shutdown problem main.events_slowlog : MDEV-12821 - Wrong result -main.fast_prefix_index_fetch_innodb : Modified in 10.2.35 main.flush : MDEV-19368 - mysqltest failed but provided no output -main.func_gconcat : MDEV-21379 - Valgrind warnings -main.func_json : Modified in 10.2.35 -main.func_test : Modified in 10.2.35 +main.func_gconcat : MDEV-21379 - Valgrind warnings; modified in 10.2.37 +main.func_like : Modified in 10.2.37 main.gis : MDEV-13411 - wrong result on P8 +main.gis-json : Modified in 10.2.37 main.gis_notembedded : MDEV-21264 - Wrong result with different default charset -main.grant : Modified in 10.2.35 -main.grant5 : Modified in 10.2.35 +main.group_by : Modified in 10.2.37 main.host_cache_size_functionality : MDEV-10606 - sporadic failure on shutdown main.index_intersect_innodb : MDEV-10643 - failed with timeout main.index_merge_innodb : MDEV-7142 - Plan mismatch -main.information_schema : Modified in 10.2.35 -main.innodb_ext_key : Modified in 10.2.35 -main.innodb_icp_debug : Added in 10.2.35 +main.information_schema : Modified in 10.2.37 +main.innodb_mrr_cpk : MDEV-24737 - Server crash main.innodb_mysql_lock : MDEV-7861 - sporadic lock detection failure (Fixed in 10.3+, MDEV-22947) +main.kill : Modified in 10.2.37 main.kill-2 : MDEV-13257 - Wrong result main.kill_processlist-6619 : MDEV-10793 - Wrong result -main.limit_rows_examined : Modified in 10.2.35 main.loaddata : MDEV-19368 - mysqltest failed but provided no output main.locale : MDEV-20521 - Missing warning -main.lock_view : Added in 10.2.35 +main.lock_tables_lost_commit : MDEV-24624 - Timeout +main.lock_view : Modified in 10.2.37 main.log_slow : MDEV-13263 - Wrong result -main.log_tables : Modified in 10.2.35 main.log_tables-big : MDEV-13408 - wrong result main.mdev-504 : MDEV-15171 - warning main.mdev375 : MDEV-10607 - sporadic "can't connect" main.merge : MDEV-10607 - sporadic "can't connect" -main.multi_update_big : Modified in 10.2.35 -main.myisam_icp_debug : Added in 10.2.35 +main.myisam : Modified in 10.2.37 main.mysql_client_test : MDEV-19369 - error: 5888, status: 23, errno: 2 main.mysql_client_test_comp : MDEV-16641 - Error in exec main.mysql_client_test_nonblock : CONC-208 - Error on Power; MDEV-15096 - exec failed -main.mysql_upgrade : Modified in 10.2.35 +main.mysql_upgrade : Modified in 10.2.37 main.mysql_upgrade_noengine : MDEV-14355 - Wrong result -main.mysqlbinlog_row_minimal : Modified in 10.2.35 -main.mysqld--help : Modified in 10.2.35 main.mysqld_option_err : MDEV-21571 - Crash on bootstrap -main.mysqldump : MDEV-14800 - Stack smashing detected -main.mysqlhotcopy_myisam : MDEV-10995 - Hang on debug +main.mysqldump : Modified in 10.2.37 +main.mysqldump-system : Added in 10.2.37 main.mysqlslap : MDEV-11801 - timeout main.mysqltest : MDEV-9269 - fails on Alpha; MDEV-13887 - Wrong result main.old-mode : MDEV-19373 - Wrong result main.openssl_6975 : MDEV-17184 - Failures with OpenSSL 1.1.1 -main.order_by : Modified in 10.2.35 +main.order_by : Modified in 10.2.37 main.order_by_optimizer_innodb : MDEV-10683 - Wrong result -main.parser : Modified in 10.2.35 -main.partition : Modified in 10.2.35 main.partition_debug_sync : MDEV-15669 - Deadlock found when trying to get lock main.partition_innodb_plugin : MDEV-12901 - Valgrind warnings main.partition_innodb_semi_consistent : MDEV-19411 - Failed to start mysqld.1 -main.plugin_innodb : Modified in 10.2.35 -main.pool_of_threads : Modified in 10.2.35 -main.precedence : Added in 10.2.35 -main.precedence_bugs : Added in 10.2.35 -main.processlist_notembedded : Modified in 10.2.35 +main.precedence : Modified in 10.2.37 +main.processlist_notembedded : MDEV-23752 - Not explainable command main.ps : MDEV-11017 - sporadic wrong Prepared_stmt_count -main.query_cache : MDEV-16180 - Wrong result +main.ps_show_log : Added in 10.2.37 +main.query_cache : MDEV-16180 - Wrong result; modified in 10.2.37 main.query_cache_debug : MDEV-15281 - Query cache is disabled -main.range : Modified in 10.2.35 +main.range : Modified in 10.2.37 main.range_innodb : MDEV-23371 - Server crash main.range_vs_index_merge_innodb : MDEV-15283 - Server has gone away main.set_statement : MDEV-13183 - Wrong result main.set_statement_notembedded : MDEV-19414 - Wrong result main.shm : MDEV-12727 - Mismatch, ERROR 2013 main.show_explain : MDEV-10674 - Wrong result code +main.skip_grants : Modified in 10.2.37 +main.sp : Modified in 10.2.37 main.sp-security : MDEV-10607 - sporadic "can't connect" +main.sp-ucs2 : Modified in 10.2.37 main.sp_notembedded : MDEV-10607 - internal error main.ssl : MDEV-17184 - Failures with OpenSSL 1.1.1 main.ssl_ca : MDEV-10895 - SSL connection error on Power main.ssl_cipher : MDEV-17184 - Failures with OpenSSL 1.1.1 main.ssl_timeout : MDEV-11244 - Crash +main.stat_tables : Modified in 10.2.37 main.stat_tables_par_innodb : MDEV-14155 - Wrong rounding main.status : MDEV-13255 - Wrong result -main.subselect4 : Modified in 10.2.35 -main.subselect_innodb : MDEV-10614 - Sporadic wrong results; modified in 10.2.35 -main.sum_distinct-big : Modified in 10.2.35 +main.subselect_innodb : MDEV-10614 - Sporadic wrong results main.tc_heuristic_recover : MDEV-14189 - Wrong result -main.temp_table_symlink : MDEV-24058 - Wrong error code; added in 10.2.35 +main.temp_table_symlink : MDEV-24058 - Wrong error code main.type_blob : MDEV-15195 - Wrong result -main.type_date : Modified in 10.2.35 -main.type_datetime : Modified in 10.2.35 main.type_datetime_hires : MDEV-10687 - Timeout -main.type_newdecimal : Modified in 10.2.35 main.type_temporal_innodb : MDEV-24025 - Wrong result -main.udf : Modified in 10.2.35 +main.type_year : Modified in 10.2.37 +main.user_limits : Modified in 10.2.37 main.userstat : MDEV-12904 - SSL errors -main.view : Modified in 10.2.35 +main.view : Modified in 10.2.37 main.wait_timeout : MDEV-19023 - Lost connection to MySQL server during query -main.win : Modified in 10.2.35 -main.xa : MDEV-11769 - lock wait timeout +main.xa : MDEV-11769 - lock wait timeout; modified in 10.2.37 #----------------------------------------------------------------------- -archive.archive_bitfield : MDEV-11771 - table is marked as crashed -archive.archive_symlink : MDEV-12170 - unexpected error on rmdir -archive.discover : MDEV-10510 - Table is marked as crashed -archive.mysqlhotcopy_archive : MDEV-10995 - Hang on debug +archive.archive_bitfield : MDEV-11771 - table is marked as crashed +archive.archive_symlink : MDEV-12170 - unexpected error on rmdir +archive.discover : MDEV-10510 - Table is marked as crashed #----------------------------------------------------------------------- @@ -157,18 +148,14 @@ archive-test_sql_discovery.discover : MDEV-16817 - Table marked as crashed #----------------------------------------------------------------------- -binlog.binlog_commit_wait : MDEV-10150 - Mismatch -binlog.binlog_killed : MDEV-12925 - Wrong result -binlog.binlog_max_extension : MDEV-19762 - Crash on shutdown -binlog.binlog_mysqlbinlog_row : Modified in 10.2.35 -binlog.binlog_mysqlbinlog_row_frag : Modified in 10.2.35 -binlog.binlog_no_uniqfile_crash : MDEV-24078 - Server crash upon shutdown -binlog.binlog_recover_checksum_error : Added in 10.2.35 -binlog.binlog_show_binlog_event_random_pos : Modified in 10.2.35 -binlog.binlog_stm_mix_innodb_myisam : MDEV-24057 - Wrong result -binlog.binlog_xa_recover : MDEV-8517 - Extra checkpoint -binlog.flashback-largebinlog : MDEV-19764 - Out of memory -binlog.load_data_stm_view : MDEV-16948 - Wrong result +binlog.binlog_commit_wait : MDEV-10150 - Mismatch +binlog.binlog_killed : MDEV-12925 - Wrong result +binlog.binlog_max_extension : MDEV-19762 - Crash on shutdown +binlog.binlog_no_uniqfile_crash : MDEV-24078 - Server crash upon shutdown +binlog.binlog_stm_mix_innodb_myisam : MDEV-24057 - Wrong result +binlog.binlog_xa_recover : MDEV-12908 - Extra checkpoint +binlog.flashback-largebinlog : MDEV-19764 - Out of memory +binlog.load_data_stm_view : MDEV-16948 - Wrong result #----------------------------------------------------------------------- @@ -193,10 +180,9 @@ binlog_encryption.rpl_typeconv : MDEV-14362 - Lost con #----------------------------------------------------------------------- -connect.pivot : MDEV-14803 - Failed to discover table -connect.updelx : Modified in 10.2.35 -connect.vcol : MDEV-12374 - Fails on Windows -connect.zip : MDEV-13884 - Wrong result +connect.pivot : MDEV-14803 - Failed to discover table +connect.vcol : MDEV-12374 - Fails on Windows +connect.zip : MDEV-13884 - Wrong result #----------------------------------------------------------------------- @@ -204,40 +190,34 @@ disks.disks_notembedded : MDEV-21587 - Wrong result #----------------------------------------------------------------------- -encryption.create_or_replace : Modified in 10.2.35 -encryption.create_or_replace_big : Added in 10.2.35 -encryption.debug_key_management : MDEV-13841 - Timeout -encryption.encrypt_and_grep : MDEV-13765 - Wrong result -encryption.innochecksum : MDEV-13644 - Assertion failure -encryption.innodb-bad-key-change2 : MDEV-19118 - Can't connect through socket -encryption.innodb-compressed-blob : MDEV-14728 - Unable to get certificate -encryption.innodb-discard-import : MDEV-19113 - Timeout -encryption.innodb-encryption-alter : MDEV-13566 - Lock wait timeout -encryption.innodb-first-page-read : MDEV-14356 - Timeout in wait condition -encryption.innodb-force-corrupt : MDEV-17286 - SSL error -encryption.innodb-missing-key : MDEV-17286 - SSL error -encryption.innodb-page_encryption : MDEV-10641 - mutex problem -encryption.innodb-page_encryption_compression : Modified in 10.2.35 -encryption.innodb-page_encryption_log_encryption : Modified in 10.2.35 -encryption.innodb-read-only : MDEV-16563 - Crash on startup -encryption.innodb-redo-badkey : MDEV-12898 - Server hang on startup -encryption.innodb-remove-encryption : MDEV-16493 - Timeout in wait condition -encryption.innodb-spatial-index : MDEV-13746 - Wrong result -encryption.innodb_encrypt_key_rotation_age : MDEV-19763 - Timeout -encryption.innodb_encrypt_log : MDEV-13725 - Wrong result -encryption.innodb_encrypt_log_corruption : MDEV-14379 - Server crash -encryption.innodb_encrypt_temporary_tables : MDEV-20142 - Wrong result -encryption.innodb_encryption : MDEV-14728 - Unable to get certificate; MDEV-15675 - Timeout -encryption.innodb_encryption-page-compression : MDEV-12630 - crash or assertion failure -encryption.innodb_encryption_discard_import : MDEV-16116 - Wrong result -encryption.innodb_encryption_is : MDEV-12898 - Server hang on startup -encryption.innodb_encryption_row_compressed : MDEV-16113 - Crash -encryption.innodb_first_page : MDEV-10689 - Crash -encryption.innodb_onlinealter_encryption : MDEV-17287 - SIGABRT on server restart -encryption.innodb_scrub : MDEV-8139 - scrubbing tests need fixing (Fixed in 10.5+) -encryption.innodb_scrub_background : MDEV-8139 - scrubbing tests need fixing (Fixed in 10.5+) -encryption.innodb_scrub_compressed : MDEV-8139 - scrubbing tests need fixing (Fixed in 10.5+) -encryption.tempfiles_encrypted : Added in 10.2.35 +encryption.create_or_replace : MDEV-24081 - Lock wait timeout exceeded +encryption.debug_key_management : MDEV-13841 - Timeout +encryption.encrypt_and_grep : MDEV-13765 - Wrong result +encryption.innochecksum : MDEV-13644 - Assertion failure +encryption.innodb-bad-key-change2 : MDEV-19118 - Can't connect through socket +encryption.innodb-compressed-blob : MDEV-14728 - Unable to get certificate +encryption.innodb-discard-import : MDEV-19113 - Timeout +encryption.innodb-encryption-alter : MDEV-13566 - Lock wait timeout +encryption.innodb-first-page-read : MDEV-14356 - Timeout in wait condition +encryption.innodb-force-corrupt : MDEV-17286 - SSL error +encryption.innodb-missing-key : MDEV-17286 - SSL error +encryption.innodb-page_encryption : MDEV-10641 - mutex problem +encryption.innodb-read-only : MDEV-16563 - Crash on startup +encryption.innodb-remove-encryption : MDEV-16493 - Timeout in wait condition +encryption.innodb-spatial-index : MDEV-13746 - Wrong result +encryption.innodb_encrypt_key_rotation_age : MDEV-19763 - Timeout +encryption.innodb_encrypt_log : MDEV-13725 - Wrong result +encryption.innodb_encrypt_log_corruption : MDEV-14379 - Server crash +encryption.innodb_encrypt_temporary_tables : MDEV-20142 - Wrong result +encryption.innodb_encryption : MDEV-14728 - Unable to get certificate; MDEV-15675 - Timeout +encryption.innodb_encryption-page-compression : MDEV-12630 - crash or assertion failure +encryption.innodb_encryption_discard_import : MDEV-16116 - Wrong result +encryption.innodb_encryption_row_compressed : MDEV-16113 - Crash +encryption.innodb_first_page : MDEV-10689 - Crash +encryption.innodb_onlinealter_encryption : MDEV-17287 - SIGABRT on server restart +encryption.innodb_scrub : MDEV-8139 - scrubbing tests need fixing (Fixed in 10.5+) +encryption.innodb_scrub_background : MDEV-8139 - scrubbing tests need fixing (Fixed in 10.5+) +encryption.innodb_scrub_compressed : MDEV-8139 - scrubbing tests need fixing (Fixed in 10.5+) #----------------------------------------------------------------------- @@ -285,33 +265,20 @@ galera_3nodes.* : Suite is not stable yet #----------------------------------------------------------------------- -gcol.gcol_keys_innodb : Include file modified in 10.2.35 -gcol.gcol_keys_myisam : Include file modified in 10.2.35 -gcol.gcol_partition_innodb : Include file modified in 10.2.35 -gcol.gcol_update : Include file modified in 10.2.35 -gcol.innodb_virtual_basic : MDEV-16950 - Failing assertion -gcol.innodb_virtual_debug : MDEV-23111 - Server crash -gcol.innodb_virtual_debug_purge : Include file modified in 10.2.35 -gcol.innodb_virtual_fk : Modified in 10.2.35 -gcol.innodb_virtual_index : Modified in 10.2.35 -gcol.innodb_virtual_purge : Include file modified in 10.2.35 +gcol.innodb_virtual_basic : MDEV-16950 - Failing assertion +gcol.innodb_virtual_debug : MDEV-23111 - Server crash #----------------------------------------------------------------------- innodb.101_compatibility : MDEV-13891 - Wrong result innodb.alter_copy : MDEV-16181 - Assertion failure innodb.alter_crash : MDEV-16944 - The process cannot access the file -innodb.alter_table : Modified in 10.2.35 innodb.binlog_consistent : MDEV-10618 - Server fails to start innodb.blob-crash : MDEV-20481 - Failures upon recovery innodb.doublewrite : MDEV-12905 - Server crash -innodb.foreign-keys : Modified in 10.2.35 -innodb.foreign_key : Modified in 10.2.35 innodb.group_commit_crash : MDEV-14191 - InnoDB registration failed innodb.group_commit_crash_no_optimize_thread : MDEV-11770 - Checksum mismatch innodb.ibuf_not_empty : MDEV-19021 - Wrong result -innodb.innodb : Modified in 10.2.35 -innodb.innodb-64k : Modified in 10.2.35 innodb.innodb-64k-crash : MDEV-13872 - Failure and crash on startup innodb.innodb-alter-debug : MDEV-13182 - InnoDB: adjusting FSP_SPACE_FLAGS innodb.innodb-alter-table : MDEV-10619 - Testcase timeout @@ -319,18 +286,15 @@ innodb.innodb-blob : MDEV-12053 - Client crash innodb.innodb-change-buffer-recovery : MDEV-19115 - Lost connection to MySQL server during query innodb.innodb-fk : MDEV-13832 - Assertion failure on shutdown innodb.innodb-get-fk : MDEV-13276 - Server crash -innodb.innodb-index : Include file modified in 10.2.35 -innodb.innodb-index-debug : Include file modified in 10.2.35 innodb.innodb-index-online : MDEV-14809 - Cannot save statistics innodb.innodb-page_compression_default : MDEV-13644 - Assertion failure innodb.innodb-page_compression_lzma : MDEV-14353 - Wrong result innodb.innodb-page_compression_zip : MDEV-10641 - mutex problem innodb.innodb-table-online : MDEV-13894 - Wrong result +innodb.innodb-ucs2 : MDEV-24505 - Assertion failure innodb.innodb-wl5522-1 : MDEV-22945 - Server crash innodb.innodb-wl5522-debug : MDEV-14200 - Wrong errno innodb.innodb_buffer_pool_dump_pct : MDEV-20139 - Timeout in wait_condition.inc -innodb.innodb_buffer_pool_resize_with_chunks : MDEV-16964 - Assertion failure -innodb.innodb_bug14147491 : MDEV-11808 - Index is corrupt innodb.innodb_bug30423 : MDEV-7311 - Wrong result innodb.innodb_bug47167 : MDEV-20524 - Table 'user' is marked as crashed and should be repaired innodb.innodb_bug48024 : MDEV-14352 - Assertion failure @@ -339,14 +303,11 @@ innodb.innodb_defrag_stats_many_tables : MDEV-14198 - Table is full innodb.innodb_information_schema : MDEV-8851 - Wrong result innodb.innodb_max_recordsize_32k : MDEV-14801 - Operation failed innodb.innodb_max_recordsize_64k : MDEV-15203 - Wrong result -innodb.innodb_monitor : MDEV-10939 - Testcase timeout innodb.innodb_mysql : MDEV-19873 - Wrong result innodb.innodb_stats : MDEV-10682 - wrong result -innodb.innodb_stats_drop_locked : Modified in 10.2.35 innodb.innodb_stats_persistent : MDEV-21567 - Wrong result in execution plan innodb.innodb_stats_persistent_debug : MDEV-14801 - Operation failed innodb.innodb_sys_semaphore_waits : MDEV-10331 - Semaphore wait -innodb.innodb_trx_weight : Configuration deleted in 10.2.35 innodb.innodb_zip_innochecksum2 : MDEV-13882 - Warning: difficult to find free blocks innodb.log_corruption : MDEV-13251 - Wrong result innodb.log_data_file_size : MDEV-14204 - Server failed to start @@ -357,24 +318,19 @@ innodb.purge_secondary : MDEV-15681 - Wrong result innodb.purge_thread_shutdown : MDEV-13792 - Wrong result innodb.read_only_recovery : MDEV-13886 - Server crash innodb.recovery_shutdown : MDEV-15671 - Checksum mismatch in datafile -innodb.row_format_redundant : MDEV-15192 - Trying to access missing tablespace; modified in 10.2.35 -innodb.stats_persistent : Added in 10.2.35 +innodb.row_format_redundant : MDEV-15192 - Trying to access missing tablespace innodb.table_definition_cache_debug : MDEV-14206 - Extra warning innodb.table_flags : MDEV-13572 - Wrong result; MDEV-19374 - Server failed to start innodb.temp_table_savepoint : MDEV-24077 - Assertion failure innodb.temporary_table : MDEV-13265 - Wrong result -innodb.truncate : Modified in 10.2.35 innodb.undo_truncate : MDEV-17340 - Server hung innodb.undo_truncate_recover : MDEV-17679 - MySQL server has gone away -innodb.update-cascade : Combinations added in 10.2.35 innodb.update_time : MDEV-14804 - Wrong result innodb.xa_recovery : MDEV-15279 - mysqld got exception #----------------------------------------------------------------------- -innodb_fts.basic : Modified in 10.2.35 innodb_fts.fulltext2 : MDEV-24074 - Server crash -innodb_fts.innodb_fts_misc_1 : Modified in 10.2.35 innodb_fts.innodb_fts_misc_debug : MDEV-14156 - Unexpected warning innodb_fts.innodb_fts_plugin : MDEV-13888 - Errors in server log innodb_fts.innodb_fts_stopword_charset : MDEV-13259 - Table crashed @@ -383,13 +339,11 @@ innodb_fts.sync_ddl : MDEV-21568 - Errno: 2000 #----------------------------------------------------------------------- innodb_gis.gis_split_nan : MDEV-21678 - Cannot get geometry object -innodb_gis.rtree_add_index : Include file modified in 10.2.35 -innodb_gis.rtree_compress : Include file modified in 10.2.35 innodb_gis.rtree_concurrent_srch : MDEV-15284 - Wrong result with embedded -innodb_gis.rtree_purge : MDEV-15275 - Timeout; include file modified in 10.2.35 +innodb_gis.rtree_purge : MDEV-15275 - Timeout innodb_gis.rtree_recovery : MDEV-15274 - Error on check innodb_gis.rtree_split : MDEV-14208 - Too many arguments -innodb_gis.rtree_undo : MDEV-14456 - Timeout in include file; include file modified in 10.2.35 +innodb_gis.rtree_undo : MDEV-14456 - Timeout in include file innodb_gis.types : MDEV-15679 - Table is marked as crashed #----------------------------------------------------------------------- @@ -412,24 +366,18 @@ maria.maria : MDEV-14430 - Extra warning #----------------------------------------------------------------------- -mariabackup.apply-log-only : MDEV-20135 - Timeout -mariabackup.backup_ssl : MDEV-24073 - Server crash upon shutdown -mariabackup.data_directory : MDEV-15270 - Error on exec -mariabackup.ddl_incremental_encrypted : Added in 10.2.35 -mariabackup.full_backup : MDEV-16571 - Wrong result -mariabackup.huge_lsn : MDEV-17286 - SSL error -mariabackup.incremental_backup : MDEV-21222 - Memory allocation failure -mariabackup.incremental_ddl_during_backup : Modified in 10.2.35 -mariabackup.incremental_encrypted : MDEV-15667 - Timeout -mariabackup.innodb_redo_overwrite : Added in 10.2.35 -mariabackup.mdev-14447 : MDEV-15201 - Timeout -mariabackup.mlog_index_load : Modified in 10.2.35 -mariabackup.partial_exclude : MDEV-15270 - Error on exec -mariabackup.rpl_slave_info : Added in 10.2.35 -mariabackup.unencrypted_page_compressed : Include file modified in 10.2.35 -mariabackup.xb_compressed_encrypted : MDEV-14812 - Segmentation fault -mariabackup.xb_page_compress : MDEV-14810 - status: 1, errno: 11 -mariabackup.xb_partition : MDEV-17584 - Crash on shutdown +mariabackup.apply-log-only : MDEV-20135 - Timeout +mariabackup.backup_ssl : MDEV-24073 - Server crash upon shutdown +mariabackup.data_directory : MDEV-15270 - Error on exec +mariabackup.full_backup : MDEV-16571 - Wrong result +mariabackup.huge_lsn : MDEV-17286 - SSL error +mariabackup.incremental_backup : MDEV-21222 - Memory allocation failure +mariabackup.incremental_encrypted : MDEV-15667 - Timeout +mariabackup.mdev-14447 : MDEV-15201 - Timeout +mariabackup.partial_exclude : MDEV-15270 - Error on exec +mariabackup.xb_compressed_encrypted : MDEV-14812 - Segmentation fault +mariabackup.xb_page_compress : MDEV-14810 - status: 1, errno: 11 +mariabackup.xb_partition : MDEV-17584 - Crash on shutdown #----------------------------------------------------------------------- @@ -453,8 +401,6 @@ multi_source.info_logs : MDEV-12629 - Valgrind, MDEV-10042 - Wrong result, MDE multi_source.mdev-8874 : MDEV-19415 - AddressSanitizer: heap-use-after-free multi_source.mdev-9544 : MDEV-19415 - AddressSanitizer: heap-use-after-free multi_source.multisource : MDEV-10417 - Fails on Mips -multi_source.reset_slave : MDEV-10690 - Wrong result -multi_source.simple : MDEV-4633 - Wrong result multi_source.status_vars : MDEV-4632 - failed while waiting for Slave_received_heartbeats #----------------------------------------------------------------------- @@ -503,7 +449,7 @@ perfschema_stress.* : MDEV-10996 - Not maintained plugins.feedback_plugin_send : MDEV-7932, MDEV-11118 - Connection problems and such plugins.processlist : MDEV-16574 - Wrong result -plugins.server_audit : MDEV-9562 - crashes on sol10-sparc; modified in 10.2.35 +plugins.server_audit : MDEV-9562 - crashes on sol10-sparc plugins.thread_pool_server_audit : MDEV-14295 - Wrong result #----------------------------------------------------------------------- @@ -553,7 +499,6 @@ rpl.rpl_auto_increment : MDEV-10417 - Fails on Mips rpl.rpl_auto_increment_bug45679 : MDEV-10417 - Fails on Mips rpl.rpl_auto_increment_update_failure : MDEV-10625 - warnings in error log rpl.rpl_binlog_errors : MDEV-12742 - Crash -rpl.rpl_binlog_index : Modified in 10.2.35 rpl.rpl_checksum_cache : MDEV-22510 - Server crash rpl.rpl_colSize : MDEV-16112 - Server crash rpl.rpl_ctype_latin1 : MDEV-14813 - Wrong result on Mac @@ -563,13 +508,10 @@ rpl.rpl_domain_id_filter_master_crash : MDEV-19043 - Warnings/errors rpl.rpl_domain_id_filter_restart : MDEV-10684 - Wrong result rpl.rpl_drop_db_fail : MDEV-16898 - Slave fails to start rpl.rpl_extra_col_master_innodb : MDEV-16570 - Extra warning -rpl.rpl_filter_tables_dynamic : Modified in 10.2.35 -rpl.rpl_filter_wild_tables_dynamic : Modified in 10.2.35 rpl.rpl_flushlog_loop : MDEV-21570 - Server crash rpl.rpl_get_lock : MDEV-19368 - mysqltest failed but provided no output rpl.rpl_gtid_basic : MDEV-10681 - server startup problem -rpl.rpl_gtid_crash : MDEV-13643 - Lost connection; modified in 10.2.35 -rpl.rpl_gtid_delete_domain : MDEV-14463 - Timeout; MDEV-23103 - Could not delete gtid domain; modified in 10.2.35 +rpl.rpl_gtid_crash : MDEV-13643 - Lost connection rpl.rpl_gtid_errorhandling : MDEV-13261 - Crash rpl.rpl_gtid_mdev9033 : MDEV-10680 - warnings rpl.rpl_gtid_reconnect : MDEV-14497 - Crash @@ -587,12 +529,9 @@ rpl.rpl_mariadb_slave_capability : MDEV-11018 - Extra lines in binlog rpl.rpl_mdev12179 : MDEV-19043 - Warnings/errors rpl.rpl_mdev6020 : MDEV-23426 - Server crash, ASAN failures; MDEV-15272 - Server crash rpl.rpl_mixed_mixing_engines : MDEV-21266 - Timeout -rpl.rpl_mysql_upgrade : Modified in 10.2.35 rpl.rpl_non_direct_row_mixing_engines : MDEV-16561 - Timeout in master_pos_wait rpl.rpl_parallel : MDEV-10653 - Timeouts -rpl.rpl_parallel2 : Modified in 10.2.35 rpl.rpl_parallel_conflicts : MDEV-15272 - Server crash -rpl.rpl_parallel_mdev6589 : MDEV-12979 - Assertion failure rpl.rpl_parallel_multilevel2 : MDEV-14723 - Timeout rpl.rpl_parallel_optimistic : MDEV-15278 - Failed to sync with master rpl.rpl_parallel_optimistic_nobinlog : MDEV-15278 - Failed to sync with master @@ -613,19 +552,15 @@ rpl.rpl_row_img_eng_noblob : MDEV-13875 - command "diff_files" failed rpl.rpl_row_index_choice : MDEV-15196 - Slave crash rpl.rpl_row_sp001 : MDEV-9329 - Fails on Ubuntu/s390x rpl.rpl_row_until : MDEV-14052 - Master will not send events with checksum -rpl.rpl_semi_sync : MDEV-11220 - Wrong result -rpl.rpl_semi_sync_after_sync : MDEV-14366 - Wrong result -rpl.rpl_semi_sync_after_sync_row : MDEV-14366 - Wrong result rpl.rpl_semi_sync_event_after_sync : MDEV-11806 - warnings -rpl.rpl_semi_sync_uninstall_plugin : MDEV-7140 - Assorted failures +rpl.rpl_semi_sync_uninstall_plugin : MDEV-24561 - Wrong usage of mutex; MDEV-7140 - Assorted failures rpl.rpl_semi_sync_wait_point : MDEV-11807 - timeout in wait condition rpl.rpl_show_slave_hosts : MDEV-10681 - Crash rpl.rpl_skip_replication : MDEV-23372 - Extra warning -rpl.rpl_slave_grp_exec : MDEV-10514 - Deadlock; ; modified in 10.2.35 rpl.rpl_slave_load_tmpdir_not_exist : MDEV-23372 - Extra warning rpl.rpl_slow_query_log : MDEV-13250 - Test abort rpl.rpl_sp_effects : MDEV-13249 - Crash -rpl.rpl_start_stop_slave : MDEV-13567 - Sync slave timeout; modified in 10.2.35 +rpl.rpl_start_stop_slave : MDEV-13567 - Sync slave timeout rpl.rpl_stm_relay_ign_space : MDEV-14360 - Test assertion rpl.rpl_stm_stop_middle_group : MDEV-13791 - Server crash rpl.rpl_sync : MDEV-10633 - Database page corruption @@ -635,7 +570,6 @@ rpl.rpl_trigger : MDEV-18055 - Wrong result rpl.rpl_truncate_3innodb : MDEV-19454 - Sporadic syntax error rpl.rpl_user_variables : MDEV-20522 - Wrong result rpl.sec_behind_master-5114 : MDEV-13878 - Wrong result -rpl.show_status_stop_slave_race-7126 : Modified in 10.2.35 #----------------------------------------------------------------------- @@ -682,14 +616,9 @@ sys_vars.innodb_buffer_pool_dump_at_shutdown_basic : MDEV-14280 - Unexpected err sys_vars.innodb_checksum_algorithm_basic : MDEV-21568 - Errno: 2000 sys_vars.keep_files_on_create_basic : MDEV-10676 - timeout sys_vars.log_slow_admin_statements_func : MDEV-12235 - Server crash -sys_vars.replicate_do_db_basic : Modified in 10.2.35 -sys_vars.rpl_init_slave_func : Modified in 10.2.35 -sys_vars.session_track_system_variables_basic : Modified in 10.2.35 sys_vars.slow_query_log_func : MDEV-14273 - Wrong result sys_vars.thread_cache_size_func : MDEV-11775 - Wrong result sys_vars.wait_timeout_func : MDEV-12896 - Wrong result -sys_vars.wsrep_cluster_address_basic : Modified in 10.2.35 -sys_vars.wsrep_on_basic : Configuration deleted in 10.2.35 #----------------------------------------------------------------------- @@ -721,7 +650,7 @@ tokudb_alter_table.hcad_all_add2 : MDEV-15269 - Timeout #----------------------------------------------------------------------- -tokudb_backup.* : MDEV-11001 - Missing include file +tokudb_backup.* : MDEV-11001 - Missing include file (Won't fix) #----------------------------------------------------------------------- @@ -738,11 +667,11 @@ tokudb_parts.partition_alter4_tokudb : MDEV-12640 - Lost connection #----------------------------------------------------------------------- -tokudb_rpl.* : MDEV-11001 - Missing include file +tokudb_rpl.* : MDEV-11001 - Missing include file (Won't fix) #----------------------------------------------------------------------- -tokudb_sys_vars.* : MDEV-11001 - Missing include file +tokudb_sys_vars.* : MDEV-11001 - Missing include file (Won't fix) #----------------------------------------------------------------------- @@ -760,14 +689,13 @@ unit.mf_iocache : MDEV-20952 - Buffer overflow vcol.not_supported : MDEV-10639 - Testcase timeout vcol.vcol_keys_innodb : MDEV-10639 - Testcase timeout -vcol.vcol_misc : MDEV-16651 - Wrong error message; modified in 10.2.35 +vcol.vcol_misc : MDEV-16651 - Wrong error message #----------------------------------------------------------------------- wsrep.foreign_key : MDEV-14725 - WSREP has not yet prepared node wsrep.mdev_6832 : MDEV-14195 - Check testcase failed wsrep.pool_of_threads : MDEV-17345 - WSREP has not yet prepared node for application use -wsrep.variables : Modified in 10.2.35 #----------------------------------------------------------------------- From c88fcf07d9f9478166f1fc981286f6e758668fea Mon Sep 17 00:00:00 2001 From: Nikita Malyavin Date: Fri, 8 Jan 2021 22:09:26 +1000 Subject: [PATCH 127/150] fixup MDEV-17556: fix mroonga --- storage/mroonga/ha_mroonga.cpp | 28 +++++++++---------- .../mroonga/lib/mrn_debug_column_access.cpp | 6 ++-- .../mroonga/lib/mrn_debug_column_access.hpp | 4 +-- 3 files changed, 19 insertions(+), 19 deletions(-) diff --git a/storage/mroonga/ha_mroonga.cpp b/storage/mroonga/ha_mroonga.cpp index 4e084400ed6..c2bff4dd56b 100644 --- a/storage/mroonga/ha_mroonga.cpp +++ b/storage/mroonga/ha_mroonga.cpp @@ -5917,7 +5917,7 @@ int ha_mroonga::wrapper_write_row_index(uchar *buf) DBUG_RETURN(0); } - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; for (i = 0; i < n_keys; i++) { @@ -5992,7 +5992,7 @@ int ha_mroonga::storage_write_row(uchar *buf) DBUG_RETURN(error); } - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); for (i = 0; i < n_columns; i++) { Field *field = table->field[i]; @@ -6273,7 +6273,7 @@ int ha_mroonga::storage_write_row_multiple_column_indexes(uchar *buf, int error = 0; - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; for (i = 0; i < n_keys; i++) { @@ -6567,7 +6567,7 @@ int ha_mroonga::wrapper_update_row_index(const uchar *old_data, DBUG_RETURN(0); } - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; for (i = 0; i < n_keys; i++) { @@ -6688,7 +6688,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, grn_obj new_value; GRN_VOID_INIT(&new_value); { - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); generic_store_bulk(field, &new_value); } grn_obj casted_value; @@ -6717,7 +6717,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, storage_store_fields_for_prep_update(old_data, new_data, record_id); { mrn::Lock lock(&(share->record_mutex), have_unique_index()); - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); if ((error = storage_prepare_delete_row_unique_indexes(old_data, record_id))) { DBUG_RETURN(error); @@ -6742,7 +6742,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, #endif if (bitmap_is_set(table->write_set, field->field_index)) { - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); DBUG_PRINT("info", ("mroonga: update column %d(%d)",i,field->field_index)); if (field->is_null()) continue; @@ -6819,7 +6819,7 @@ int ha_mroonga::storage_update_row(const uchar *old_data, if (table->found_next_number_field && !table->s->next_number_keypart && new_data == table->record[0]) { - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); Field_num *field = (Field_num *) table->found_next_number_field; if (field->unsigned_flag || field->val_int() > 0) { MRN_LONG_TERM_SHARE *long_term_share = share->long_term_share; @@ -6876,7 +6876,7 @@ int ha_mroonga::storage_update_row_index(const uchar *old_data, my_ptrdiff_t ptr_diff = PTR_BYTE_DIFF(old_data, table->record[0]); - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; mrn_change_encoding(ctx, NULL); @@ -7092,7 +7092,7 @@ int ha_mroonga::wrapper_delete_row_index(const uchar *buf) DBUG_RETURN(0); } - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; for (i = 0; i < n_keys; i++) { @@ -7243,7 +7243,7 @@ int ha_mroonga::storage_delete_row_index(const uchar *buf) GRN_TEXT_INIT(&key, 0); GRN_TEXT_INIT(&encoded_key, 0); - mrn::DebugColumnAccess debug_column_access(table, table->read_set); + mrn::DebugColumnAccess debug_column_access(table, &table->read_set); uint i; uint n_keys = table->s->keys; mrn_change_encoding(ctx, NULL); @@ -11434,7 +11434,7 @@ void ha_mroonga::storage_store_fields(uchar *buf, grn_id record_id) } } - mrn::DebugColumnAccess debug_column_access(table, table->write_set); + mrn::DebugColumnAccess debug_column_access(table, &table->write_set); DBUG_PRINT("info", ("mroonga: store column %d(%d)",i,field->field_index)); field->move_field_offset(ptr_diff); if (strcmp(MRN_COLUMN_NAME_ID, column_name) == 0) { @@ -11499,7 +11499,7 @@ void ha_mroonga::storage_store_fields_for_prep_update(const uchar *old_data, ) #endif ) { - mrn::DebugColumnAccess debug_column_access(table, table->write_set); + mrn::DebugColumnAccess debug_column_access(table, &table->write_set); DBUG_PRINT("info", ("mroonga: store column %d(%d)",i,field->field_index)); grn_obj value; GRN_OBJ_INIT(&value, GRN_BULK, 0, grn_obj_get_range(ctx, grn_columns[i])); @@ -11535,7 +11535,7 @@ void ha_mroonga::storage_store_fields_by_index(uchar *buf) if (KEY_N_KEY_PARTS(key_info) == 1) { my_ptrdiff_t ptr_diff = PTR_BYTE_DIFF(buf, table->record[0]); Field *field = key_info->key_part->field; - mrn::DebugColumnAccess debug_column_access(table, table->write_set); + mrn::DebugColumnAccess debug_column_access(table, &table->write_set); field->move_field_offset(ptr_diff); storage_store_field(field, (const char *)key, key_length); field->move_field_offset(-ptr_diff); diff --git a/storage/mroonga/lib/mrn_debug_column_access.cpp b/storage/mroonga/lib/mrn_debug_column_access.cpp index 5e875953b57..cb2ce7e35ca 100644 --- a/storage/mroonga/lib/mrn_debug_column_access.cpp +++ b/storage/mroonga/lib/mrn_debug_column_access.cpp @@ -20,17 +20,17 @@ #include "mrn_debug_column_access.hpp" namespace mrn { - DebugColumnAccess::DebugColumnAccess(TABLE *table, MY_BITMAP *bitmap) + DebugColumnAccess::DebugColumnAccess(TABLE *table, MY_BITMAP **bitmap) : table_(table), bitmap_(bitmap) { #ifdef DBUG_ASSERT_EXISTS - map_ = dbug_tmp_use_all_columns(table_, &bitmap_); + map_ = dbug_tmp_use_all_columns(table_, bitmap_); #endif } DebugColumnAccess::~DebugColumnAccess() { #ifdef DBUG_ASSERT_EXISTS - dbug_tmp_restore_column_map(&bitmap_, map_); + dbug_tmp_restore_column_map(bitmap_, map_); #endif } } diff --git a/storage/mroonga/lib/mrn_debug_column_access.hpp b/storage/mroonga/lib/mrn_debug_column_access.hpp index 77fe05f383c..954e04135f8 100644 --- a/storage/mroonga/lib/mrn_debug_column_access.hpp +++ b/storage/mroonga/lib/mrn_debug_column_access.hpp @@ -25,12 +25,12 @@ namespace mrn { class DebugColumnAccess { TABLE *table_; - MY_BITMAP *bitmap_; + MY_BITMAP **bitmap_; #ifdef DBUG_ASSERT_EXISTS MY_BITMAP *map_; #endif public: - DebugColumnAccess(TABLE *table, MY_BITMAP *bitmap); + DebugColumnAccess(TABLE *table, MY_BITMAP **bitmap); ~DebugColumnAccess(); }; } From 59eda73eff1a22ac0373d818bc802c05e82b5449 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20M=C3=A4kel=C3=A4?= Date: Mon, 1 Feb 2021 13:17:17 +0200 Subject: [PATCH 128/150] MDEV-24751: member call on fil_system.temp_space in innodb_shutdown() innodb_shutdown(): Check that fil_system.temp_space is not null before invoking a member function. This regression was caused by the merge commit fa1aef39ebc7d84d24d4e3d2124f982526632ee9 of MDEV-24340 (commit 1eb59c307de163b507efade1fc372d8ff2bb94b7). --- storage/innobase/srv/srv0start.cc | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/storage/innobase/srv/srv0start.cc b/storage/innobase/srv/srv0start.cc index f7ea8c985f6..dc5eee0793b 100644 --- a/storage/innobase/srv/srv0start.cc +++ b/storage/innobase/srv/srv0start.cc @@ -3,7 +3,7 @@ Copyright (c) 1996, 2017, Oracle and/or its affiliates. All rights reserved. Copyright (c) 2008, Google Inc. Copyright (c) 2009, Percona Inc. -Copyright (c) 2013, 2020, MariaDB Corporation. +Copyright (c) 2013, 2021, MariaDB Corporation. Portions of this file contain modifications contributed and copyrighted by Google, Inc. Those modifications are gratefully acknowledged and are described @@ -2656,7 +2656,9 @@ void innodb_shutdown() srv_sys_space.shutdown(); if (srv_tmp_space.get_sanity_check_status()) { - fil_system.temp_space->close(); + if (fil_system.temp_space) { + fil_system.temp_space->close(); + } srv_tmp_space.delete_files(); } srv_tmp_space.shutdown(); From bbbe7e781f1309f1125569fb7eeb3727b0cc6505 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Sun, 31 Jan 2021 21:51:50 +0100 Subject: [PATCH 129/150] sync changes in oracle parser --- sql/sql_yacc_ora.yy | 82 ++++++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 39 deletions(-) diff --git a/sql/sql_yacc_ora.yy b/sql/sql_yacc_ora.yy index 503cb7dcf1b..eaeaf2e5e7e 100644 --- a/sql/sql_yacc_ora.yy +++ b/sql/sql_yacc_ora.yy @@ -288,7 +288,7 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); /* We should not introduce any further shift/reduce conflicts. */ -%expect 63 +%expect 83 /* Comments for TOKENS. @@ -1258,9 +1258,9 @@ bool my_yyoverflow(short **a, YYSTYPE **b, size_t *yystacksize); %type int_type real_type -%type type_with_opt_collate field_type +%type field_type field_type_all qualified_field_type - sp_param_type_with_opt_collate + sp_param_type sp_param_field_type sp_param_field_type_string field_type_numeric @@ -3044,7 +3044,7 @@ sp_param_name: ; sp_param_name_and_type: - sp_param_name sp_param_type_with_opt_collate + sp_param_name sp_param_type { if (unlikely(Lex->sp_param_fill_definition($$= $1))) MYSQL_YYABORT; @@ -3088,7 +3088,7 @@ sp_pdparams: ; sp_pdparam: - sp_param_name sp_opt_inout sp_param_type_with_opt_collate + sp_param_name sp_opt_inout sp_param_type { $1->mode= $2; if (unlikely(Lex->sp_param_fill_definition($1))) @@ -3273,7 +3273,7 @@ row_field_name: ; row_field_definition: - row_field_name type_with_opt_collate + row_field_name field_type ; row_field_definition_list: @@ -3307,7 +3307,7 @@ sp_decl_idents_init_vars: sp_decl_vars: sp_decl_idents_init_vars - type_with_opt_collate + field_type sp_opt_default { if (unlikely(Lex->sp_variable_declarations_finalize(thd, $1, @@ -6764,19 +6764,26 @@ column_default_expr: } ; +field_type: field_type_all + { + Lex->map_data_type(Lex_ident_sys(), &($$= $1)); + Lex->last_field->set_attributes($$, Lex->charset); + } + ; + qualified_field_type: - field_type + field_type_all { Lex->map_data_type(Lex_ident_sys(), &($$= $1)); } - | sp_decl_ident '.' field_type + | sp_decl_ident '.' field_type_all { if (Lex->map_data_type($1, &($$= $3))) MYSQL_YYABORT; } ; -field_type: +field_type_all: field_type_numeric | field_type_temporal | field_type_string @@ -7319,30 +7326,10 @@ with_or_without_system: ; -type_with_opt_collate: - field_type opt_collate +sp_param_type: + sp_param_field_type { Lex->map_data_type(Lex_ident_sys(), &($$= $1)); - - if ($2) - { - if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2)))) - MYSQL_YYABORT; - } - Lex->last_field->set_attributes($$, Lex->charset); - } - ; - -sp_param_type_with_opt_collate: - sp_param_field_type opt_collate - { - Lex->map_data_type(Lex_ident_sys(), &($$= $1)); - - if ($2) - { - if (unlikely(!(Lex->charset= merge_charset_and_collation(Lex->charset, $2)))) - MYSQL_YYABORT; - } Lex->last_field->set_attributes($$, Lex->charset); } ; @@ -7420,6 +7407,12 @@ charset_or_alias: } ; +collate: COLLATE_SYM collation_name_or_default + { + Lex->charset= $2; + } + ; + opt_binary: /* empty */ { bincmp_collation(NULL, false); } | binary {} @@ -7430,6 +7423,13 @@ binary: | charset_or_alias opt_bin_mod { bincmp_collation($1, $2); } | BINARY { bincmp_collation(NULL, true); } | BINARY charset_or_alias { bincmp_collation($2, true); } + | charset_or_alias collate + { + if (!my_charset_same(Lex->charset, $1)) + my_yyabort_error((ER_COLLATION_CHARSET_MISMATCH, MYF(0), + Lex->charset->name, $1->csname)); + } + | collate { } ; opt_bin_mod: @@ -14730,7 +14730,7 @@ kill: lex->sql_command= SQLCOM_KILL; lex->kill_type= KILL_TYPE_ID; } - kill_type kill_option kill_expr + kill_type kill_option { Lex->kill_signal= (killed_state) ($3 | $4); } @@ -14743,16 +14743,21 @@ kill_type: ; kill_option: - /* empty */ { $$= (int) KILL_CONNECTION; } - | CONNECTION_SYM { $$= (int) KILL_CONNECTION; } - | QUERY_SYM { $$= (int) KILL_QUERY; } - | QUERY_SYM ID_SYM + opt_connection kill_expr { $$= (int) KILL_CONNECTION; } + | QUERY_SYM kill_expr { $$= (int) KILL_QUERY; } + | QUERY_SYM ID_SYM expr { $$= (int) KILL_QUERY; Lex->kill_type= KILL_TYPE_QUERY; + Lex->value_list.push_front($3, thd->mem_root); } ; +opt_connection: + /* empty */ { } + | CONNECTION_SYM { } + ; + kill_expr: expr { @@ -14765,7 +14770,6 @@ kill_expr: } ; - shutdown: SHUTDOWN { Lex->sql_command= SQLCOM_SHUTDOWN; } ; @@ -18025,7 +18029,7 @@ sf_return_type: &empty_clex_str, thd->variables.collation_database); } - sp_param_type_with_opt_collate + sp_param_type { if (unlikely(Lex->sphead->fill_field_definition(thd, Lex->last_field))) From a7b6943ee4bd740bb42e7259abbe911273c752e3 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 31 Jan 2021 19:42:14 +0100 Subject: [PATCH 130/150] CONNECT: compiler warnings --- storage/connect/array.cpp | 2 ++ storage/connect/colblk.cpp | 3 ++- storage/connect/filamzip.cpp | 4 ++-- storage/connect/libdoc.cpp | 2 +- storage/connect/tabfmt.cpp | 21 ++++++++++++++------- storage/connect/tabvir.cpp | 4 ++-- 6 files changed, 23 insertions(+), 13 deletions(-) diff --git a/storage/connect/array.cpp b/storage/connect/array.cpp index 84a686cc145..3c736941b6f 100644 --- a/storage/connect/array.cpp +++ b/storage/connect/array.cpp @@ -599,10 +599,12 @@ int ARRAY::Convert(PGLOBAL g, int k, PVAL vp) /* Converting STRING to DATE can be done according to date format. */ /*********************************************************************/ if (Type == TYPE_DATE && ovblp->GetType() == TYPE_STRING && vp) + { if (((DTVAL*)Value)->SetFormat(g, vp)) return TYPE_ERROR; else b = true; // Sort the new array on date internal values + } /*********************************************************************/ /* Do the actual conversion. */ diff --git a/storage/connect/colblk.cpp b/storage/connect/colblk.cpp index a9cf43f3d96..e42d9703ad7 100644 --- a/storage/connect/colblk.cpp +++ b/storage/connect/colblk.cpp @@ -79,7 +79,7 @@ COLBLK::COLBLK(PCOL col1, PTDB tdbp) if (trace(2)) htrc(" copying COLBLK %s from %p to %p\n", Name, col1, this); - if (tdbp) + if (tdbp) { // Attach the new column to the table block if (!tdbp->GetColumns()) tdbp->SetColumns(this); @@ -88,6 +88,7 @@ COLBLK::COLBLK(PCOL col1, PTDB tdbp) colp->Next = this; } // endelse + } } // end of COLBLK copy constructor diff --git a/storage/connect/filamzip.cpp b/storage/connect/filamzip.cpp index 8968078e9b1..79599382693 100644 --- a/storage/connect/filamzip.cpp +++ b/storage/connect/filamzip.cpp @@ -147,7 +147,6 @@ static bool ZipFile(PGLOBAL g, ZIPUTIL *zutp, PCSZ fn, PCSZ entry, char *buf) static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf) { char filename[_MAX_PATH]; - int rc; /*********************************************************************/ /* pat is a multiple file name with wildcard characters */ @@ -155,6 +154,7 @@ static bool ZipFiles(PGLOBAL g, ZIPUTIL *zutp, PCSZ pat, char *buf) strcpy(filename, pat); #if defined(__WIN__) + int rc; char drive[_MAX_DRIVE], direc[_MAX_DIR]; WIN32_FIND_DATA FileData; HANDLE hSearch; @@ -1207,7 +1207,7 @@ int UZDFAM::Cardinality(PGLOBAL g) return 1; int card = -1; - int len = GetFileLength(g); + GetFileLength(g); card = Records; diff --git a/storage/connect/libdoc.cpp b/storage/connect/libdoc.cpp index 58b0267bd6d..0966477cbfd 100644 --- a/storage/connect/libdoc.cpp +++ b/storage/connect/libdoc.cpp @@ -378,7 +378,7 @@ bool LIBXMLDOC::Initialize(PGLOBAL g, PCSZ entry, bool zipped) if (zipped && InitZip(g, entry)) return true; - int n = xmlKeepBlanksDefault(1); + xmlKeepBlanksDefault(1); return MakeNSlist(g); } // end of Initialize diff --git a/storage/connect/tabfmt.cpp b/storage/connect/tabfmt.cpp index 9a1e43dd798..e592d0016bf 100644 --- a/storage/connect/tabfmt.cpp +++ b/storage/connect/tabfmt.cpp @@ -311,12 +311,13 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) } else if (*p == q) { if (phase == 0) { - if (blank) + if (blank) { if (++nerr > mxr) { sprintf(g->Message, MSG(MISPLACED_QUOTE), num_read); goto err; } else goto skip; + } n = 0; phase = digit = 1; @@ -341,12 +342,13 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) goto skip; } else { - if (phase == 2) + if (phase == 2) { if (++nerr > mxr) { sprintf(g->Message, MSG(MISPLACED_QUOTE), num_read); goto err; } else goto skip; + } // isdigit cannot be used here because of debug assert if (!strchr("0123456789", *p)) { @@ -362,12 +364,13 @@ PQRYRES CSVColumns(PGLOBAL g, PCSZ dp, PTOS topt, bool info) blank = 1; } // endif's *p - if (phase == 1) + if (phase == 1) { if (++nerr > mxr) { sprintf(g->Message, MSG(UNBALANCE_QUOTE), num_read); goto err; } else goto skip; + } if (n) { len[i] = MY_MAX(len[i], n); @@ -741,7 +744,7 @@ bool TDBCSV::OpenDB(PGLOBAL g) int i, len; PCSVCOL colp; - if (!Fields) // May have been set in TABFMT::OpenDB + if (!Fields) { // May have been set in TABFMT::OpenDB if (Mode != MODE_UPDATE && Mode != MODE_INSERT) { for (colp = (PCSVCOL)Columns; colp; colp = (PCSVCOL)colp->Next) if (!colp->IsSpecial() && !colp->IsVirtual()) @@ -754,6 +757,7 @@ bool TDBCSV::OpenDB(PGLOBAL g) for (cdp = tdp->GetCols(); cdp; cdp = cdp->GetNext()) if (!cdp->IsSpecial() && !cdp->IsVirtual()) Fields++; + } Offset = (int*)PlugSubAlloc(g, NULL, sizeof(int) * Fields); Fldlen = (int*)PlugSubAlloc(g, NULL, sizeof(int) * Fields); @@ -774,7 +778,7 @@ bool TDBCSV::OpenDB(PGLOBAL g) } // endfor i - if (Field) + if (Field) { // Prepare writing fields if (Mode != MODE_UPDATE) { for (colp = (PCSVCOL)Columns; colp; colp = (PCSVCOL)colp->Next) @@ -797,6 +801,7 @@ bool TDBCSV::OpenDB(PGLOBAL g) Fldlen[i] = len; Fldtyp[i] = IsTypeNum(cdp->GetType()); } // endif cdp + } } // endif Use @@ -1046,7 +1051,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g) if (i) strcat(To_Line, sep); - if (Field[i]) + if (Field[i]) { if (!strlen(Field[i])) { // Generally null fields are not quoted if (Quoted > 2) @@ -1054,7 +1059,7 @@ bool TDBCSV::PrepareWriting(PGLOBAL g) strcat(strcat(To_Line, qot), qot); } else if (Qot && (strchr(Field[i], Sep) || *Field[i] == Qot - || Quoted > 1 || (Quoted == 1 && !Fldtyp[i]))) + || Quoted > 1 || (Quoted == 1 && !Fldtyp[i]))) { if (strchr(Field[i], Qot)) { // Field contains quotes that must be doubled int j, k = strlen(To_Line), n = strlen(Field[i]); @@ -1072,9 +1077,11 @@ bool TDBCSV::PrepareWriting(PGLOBAL g) To_Line[k] = '\0'; } else strcat(strcat(strcat(To_Line, qot), Field[i]), qot); + } else strcat(To_Line, Field[i]); + } } // endfor i diff --git a/storage/connect/tabvir.cpp b/storage/connect/tabvir.cpp index c78a8f531f6..2fdb7f64744 100644 --- a/storage/connect/tabvir.cpp +++ b/storage/connect/tabvir.cpp @@ -168,13 +168,13 @@ int TDBVIR::TestFilter(PFIL filp, bool nop) } // endswitch op if (!nop) switch (op) { - case OP_LT: l1--; + case OP_LT: l1--; /* fall through */ case OP_LE: limit = l1; break; default: ok = false; } // endswitch op else switch (op) { - case OP_GE: l1--; + case OP_GE: l1--; /* fall through */ case OP_GT: limit = l1; break; default: ok = false; } // endswitch op From 251b52190070095e4c65ffb0ae545d49330a02b2 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 1 Feb 2021 13:44:50 +0100 Subject: [PATCH 131/150] main.mysqldump test isn't that big and it is that important to be run every time --- mysql-test/main/mysqldump.result | 6 +----- mysql-test/main/mysqldump.test | 14 -------------- 2 files changed, 1 insertion(+), 19 deletions(-) diff --git a/mysql-test/main/mysqldump.result b/mysql-test/main/mysqldump.result index d8b24e4f5e7..76105ab0236 100644 --- a/mysql-test/main/mysqldump.result +++ b/mysql-test/main/mysqldump.result @@ -1,14 +1,10 @@ call mtr.add_suppression("@003f.frm' \\(errno: 22\\)"); +call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); # Bug#37938 Test "mysqldump" lacks various insert statements # Turn off concurrent inserts to avoid random errors # NOTE: We reset the variable back to saved value at the end of test SET @OLD_CONCURRENT_INSERT = @@GLOBAL.CONCURRENT_INSERT; SET @@GLOBAL.CONCURRENT_INSERT = 0; -DROP TABLE IF EXISTS t1, `"t"1`, t1aa, t2, t2aa, t3; -drop database if exists mysqldump_test_db; -drop database if exists db1; -drop database if exists db2; -drop view if exists v1, v2, v3; CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024; INSERT INTO t1 VALUES (1), (2); diff --git a/mysql-test/main/mysqldump.test b/mysql-test/main/mysqldump.test index 7362c0d1aea..3e88ff3bc39 100644 --- a/mysql-test/main/mysqldump.test +++ b/mysql-test/main/mysqldump.test @@ -19,12 +19,7 @@ let collation=utf8_unicode_ci; # There are tables in 'mysql' database of type innodb --source include/have_innodb.inc -# This test is slow on buildbot. ---source include/big_test.inc - -disable_query_log; call mtr.add_suppression("Unsafe statement written to the binary log using statement format since BINLOG_FORMAT = STATEMENT"); -enable_query_log; --echo # Bug#37938 Test "mysqldump" lacks various insert statements --echo # Turn off concurrent inserts to avoid random errors @@ -32,15 +27,6 @@ enable_query_log; SET @OLD_CONCURRENT_INSERT = @@GLOBAL.CONCURRENT_INSERT; SET @@GLOBAL.CONCURRENT_INSERT = 0; - ---disable_warnings -DROP TABLE IF EXISTS t1, `"t"1`, t1aa, t2, t2aa, t3; -drop database if exists mysqldump_test_db; -drop database if exists db1; -drop database if exists db2; -drop view if exists v1, v2, v3; ---enable_warnings - # XML output CREATE TABLE t1(a INT, KEY (a)) KEY_BLOCK_SIZE=1024; From 571294f954779c195763d996c3235ec979b4cdd9 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Tue, 2 Feb 2021 00:03:07 +0100 Subject: [PATCH 132/150] Fix failed test bson and xml --- storage/connect/tabjson.cpp | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/storage/connect/tabjson.cpp b/storage/connect/tabjson.cpp index 19f721f692b..fb5a64c7d55 100644 --- a/storage/connect/tabjson.cpp +++ b/storage/connect/tabjson.cpp @@ -1024,8 +1024,14 @@ bool TDBJSN::OpenDB(PGLOBAL g) /* Lrecl is Ok. */ /*********************************************************************/ size_t linelen = Lrecl; + MODE mode = Mode; - //To_Line = (char*)PlugSubAlloc(g, NULL, linelen); + // Buffer must be allocated in g->Sarea + Mode = MODE_ANY; + Txfp->AllocateBuffer(g); + Mode = mode; + + //To_Line = (char*)PlugSubAlloc(g, NULL, linelen); //memset(To_Line, 0, linelen); To_Line = Txfp->GetBuf(); xtrc(1, "OpenJSN: R%hd mode=%d To_Line=%p\n", Tdb_No, Mode, To_Line); From 2676c9aad79b66705420922d393a9f498a2a6693 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Mon, 1 Feb 2021 16:23:49 +0100 Subject: [PATCH 133/150] galera fixes related to THD::LOCK_thd_kill Since 2017 (c2118a08b1) THD::awake() no longer requires LOCK_thd_data. It uses LOCK_thd_kill, and this latter mutex is used to prevent a thread of dying, not LOCK_thd_data as before. --- include/mysql/service_wsrep.h | 6 ++++++ include/service_versions.h | 2 +- sql/sql_plugin_services.ic | 4 +++- sql/wsrep_mysqld.cc | 17 ++++++++++++----- 4 files changed, 22 insertions(+), 7 deletions(-) diff --git a/include/mysql/service_wsrep.h b/include/mysql/service_wsrep.h index e9c3b0fa86a..671ef515135 100644 --- a/include/mysql/service_wsrep.h +++ b/include/mysql/service_wsrep.h @@ -119,6 +119,8 @@ extern struct wsrep_service_st { my_bool (*wsrep_thd_is_applier_func)(MYSQL_THD); void (*wsrep_report_bf_lock_wait_func)(MYSQL_THD thd, unsigned long long trx_id); + void (*wsrep_thd_kill_LOCK_func)(THD *thd); + void (*wsrep_thd_kill_UNLOCK_func)(THD *thd); } *wsrep_service; #ifdef MYSQL_DYNAMIC_PLUGIN @@ -143,6 +145,8 @@ extern struct wsrep_service_st { #define wsrep_run_wsrep_commit(T,A) wsrep_service->wsrep_run_wsrep_commit_func(T,A) #define wsrep_thd_LOCK(T) wsrep_service->wsrep_thd_LOCK_func(T) #define wsrep_thd_UNLOCK(T) wsrep_service->wsrep_thd_UNLOCK_func(T) +#define wsrep_thd_kill_LOCK(T) wsrep_service->wsrep_thd_kill_LOCK_func(T) +#define wsrep_thd_kill_UNLOCK(T) wsrep_service->wsrep_thd_kill_UNLOCK_func(T) #define wsrep_thd_awake(T,S) wsrep_service->wsrep_thd_awake_func(T,S) #define wsrep_thd_conflict_state(T,S) wsrep_service->wsrep_thd_conflict_state_func(T,S) #define wsrep_thd_conflict_state_str(T) wsrep_service->wsrep_thd_conflict_state_str_func(T) @@ -226,6 +230,8 @@ void wsrep_lock_rollback(); void wsrep_post_commit(THD* thd, bool all); void wsrep_thd_LOCK(THD *thd); void wsrep_thd_UNLOCK(THD *thd); +void wsrep_thd_kill_LOCK(THD *thd); +void wsrep_thd_kill_UNLOCK(THD *thd); void wsrep_thd_awake(THD *thd, my_bool signal); void wsrep_thd_set_conflict_state(THD *thd, enum wsrep_conflict_state state); bool wsrep_thd_ignore_table(THD *thd); diff --git a/include/service_versions.h b/include/service_versions.h index 6e138fab5a4..09ba702a225 100644 --- a/include/service_versions.h +++ b/include/service_versions.h @@ -41,4 +41,4 @@ #define VERSION_thd_specifics 0x0100 #define VERSION_thd_timezone 0x0100 #define VERSION_thd_wait 0x0100 -#define VERSION_wsrep 0x0202 +#define VERSION_wsrep 0x0203 diff --git a/sql/sql_plugin_services.ic b/sql/sql_plugin_services.ic index a97aaed831a..784ba5c9068 100644 --- a/sql/sql_plugin_services.ic +++ b/sql/sql_plugin_services.ic @@ -187,7 +187,9 @@ static struct wsrep_service_st wsrep_handler = { wsrep_unlock_rollback, wsrep_set_data_home_dir, wsrep_thd_is_applier, - wsrep_report_bf_lock_wait + wsrep_report_bf_lock_wait, + wsrep_thd_kill_LOCK, + wsrep_thd_kill_UNLOCK }; static struct thd_specifics_service_st thd_specifics_handler= diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index de234770788..f57be3a8611 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -2383,12 +2383,7 @@ void wsrep_close_client_connections(my_bool wait_to_end, THD *except_caller_thd) /* instead of wsrep_close_thread() we do now soft kill by THD::awake */ - mysql_mutex_lock(&tmp->LOCK_thd_data); - tmp->awake(KILL_CONNECTION); - - mysql_mutex_unlock(&tmp->LOCK_thd_data); - } mysql_mutex_unlock(&LOCK_thread_count); @@ -2676,6 +2671,18 @@ void wsrep_thd_UNLOCK(THD *thd) } +void wsrep_thd_kill_LOCK(THD *thd) +{ + mysql_mutex_lock(&thd->LOCK_thd_kill); +} + + +void wsrep_thd_kill_UNLOCK(THD *thd) +{ + mysql_mutex_unlock(&thd->LOCK_thd_kill); +} + + extern "C" time_t wsrep_thd_query_start(THD *thd) { return thd->query_start(); From b3cecb7bfc88a72661e61b3c8ce3e6101ac4e9b5 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Tue, 2 Feb 2021 10:37:54 +0100 Subject: [PATCH 134/150] Revert "Fix of warnings on aarch64 like:" Fixed by the author in other way (char -> short) This reverts commit 496f7090a825ac7ee54a6b5f9700e5f261e4bce0. --- storage/connect/bson.cpp | 6 +++--- storage/connect/bsonudf.cpp | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index fa24300f91f..7665d8520ab 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -769,7 +769,7 @@ bool BDOC::SerializeValue(PBVAL jvp, bool b) case TYPE_DBL: sprintf(buf, "%.*lf", jvp->Nd, *(double*)MakePtr(Base, jvp->To_Val)); return jp->WriteStr(buf); - case (char)TYPE_NULL: + case TYPE_NULL: return jp->WriteStr("null"); case TYPE_JVAL: return SerializeValue(MVP(jvp->To_Val)); @@ -1554,7 +1554,7 @@ PSZ BJSON::GetString(PBVAL vp, char* buff) case TYPE_BOOL: p = (PSZ)((vlp->B) ? "true" : "false"); break; - case (char)TYPE_NULL: + case TYPE_NULL: p = (PSZ)"null"; break; default: @@ -1771,7 +1771,7 @@ bool BJSON::IsValueNull(PBVAL vlp) bool b; switch (vlp->Type) { - case (char)TYPE_NULL: + case TYPE_NULL: b = true; break; case TYPE_JOB: diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index a71cd2f0229..30cb69d1775 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -526,7 +526,7 @@ void BJNX::SetJsonValue(PGLOBAL g, PVAL vp, PBVAL vlp) case TYPE_JOB: vp->SetValue_psz(GetObjectText(g, vlp, NULL)); break; - case (char)TYPE_NULL: + case TYPE_NULL: vp->SetNull(true); default: vp->Reset(); @@ -1444,7 +1444,7 @@ my_bool BJNX::CompareValues(PGLOBAL g, PBVAL v1, PBVAL v2) b = (v1->B == v2->B); break; - case (char)TYPE_NULL: + case TYPE_NULL: b = (v2->Type == TYPE_NULL); break; default: From 37e24970cbbcca4102094d177eee570b3338262a Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 2 Feb 2021 10:02:48 +0100 Subject: [PATCH 135/150] merge --- sql/wsrep_mysqld.cc | 2 +- storage/innobase/handler/ha_innodb.cc | 8 ++++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index f57be3a8611..67077741d9b 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -2747,7 +2747,7 @@ extern "C" void wsrep_thd_awake(THD *thd, my_bool signal) { if (signal) { - thd->awake(KILL_QUERY); + thd->awake_no_mutex(KILL_QUERY); } else { diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 01f5ad7173a..81a3f5c0da3 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -18767,15 +18767,17 @@ static void bg_wsrep_kill_trx( DBUG_ENTER("bg_wsrep_kill_trx"); if (thd) { + wsrep_thd_LOCK(thd); victim_trx = thd_to_trx(thd); lock_mutex_enter(); trx_mutex_enter(victim_trx); + wsrep_thd_UNLOCK(thd); if (victim_trx->id != arg->trx_id) { trx_mutex_exit(victim_trx); lock_mutex_exit(); - wsrep_thd_UNLOCK(thd); victim_trx = NULL; + wsrep_thd_kill_UNLOCK(thd); } } @@ -18944,7 +18946,7 @@ ret_unlock: lock_mutex_exit(); if (awake) wsrep_thd_awake(thd, arg->signal); - wsrep_thd_UNLOCK(thd); + wsrep_thd_kill_UNLOCK(thd); ret: free(arg); @@ -19021,10 +19023,12 @@ wsrep_abort_transaction( DBUG_VOID_RETURN; } else { WSREP_DEBUG("victim does not have transaction"); + wsrep_thd_kill_LOCK(victim_thd); wsrep_thd_LOCK(victim_thd); wsrep_thd_set_conflict_state(victim_thd, MUST_ABORT); wsrep_thd_UNLOCK(victim_thd); wsrep_thd_awake(victim_thd, signal); + wsrep_thd_kill_UNLOCK(victim_thd); } DBUG_VOID_RETURN; From 6212cf86a246326adae6a1b56df0ebbdf45a305f Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Tue, 2 Feb 2021 14:08:07 +0100 Subject: [PATCH 136/150] galera fixes related to THD::LOCK_thd_kill win --- sql/wsrep_dummy.cc | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/sql/wsrep_dummy.cc b/sql/wsrep_dummy.cc index 2f12b089939..90069b72ce7 100644 --- a/sql/wsrep_dummy.cc +++ b/sql/wsrep_dummy.cc @@ -92,6 +92,12 @@ void wsrep_thd_LOCK(THD *) void wsrep_thd_UNLOCK(THD *) { } +void wsrep_thd_kill_LOCK(THD *) +{ } + +void wsrep_thd_kill_UNLOCK(THD *) +{ } + void wsrep_thd_awake(THD *, my_bool) { } From 716b0b44f8ec169de5b89e24c46434f64d58de90 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Tue, 2 Feb 2021 10:49:13 +0100 Subject: [PATCH 137/150] Fix compiler warnings of the new connect engine. --- storage/connect/bsonudf.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 30cb69d1775..18382c7a273 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -31,6 +31,7 @@ int JsonDefPrec = -1; int GetDefaultPrec(void); int IsArgJson(UDF_ARGS* args, uint i); void SetChanged(PBSON bsp); +int GetJsonDefPrec(void); static PBSON BbinAlloc(PGLOBAL g, ulong len, PBVAL jsp); From 87bf594bc505cef0cc5c3aa436bb134cc8810e86 Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 3 Feb 2021 10:44:44 +0100 Subject: [PATCH 138/150] Fix of random crashes of connect engine (probably depend on addresses used)- --- storage/connect/plugutil.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/storage/connect/plugutil.cpp b/storage/connect/plugutil.cpp index 389613351bb..9e1f006d605 100644 --- a/storage/connect/plugutil.cpp +++ b/storage/connect/plugutil.cpp @@ -628,7 +628,7 @@ size_t MakeOff(void* memp, void* ptr) DoThrow(999); } // endif ptr #endif // _DEBUG || DEVELOPMENT - return (size_t)((char*)ptr - (size_t)memp); + return (size_t)(((char*)ptr) - ((char*)memp)); } else return 0; From c04ae0d365f4e65bf6c0ccc01c25d597e269d47e Mon Sep 17 00:00:00 2001 From: Oleksandr Byelkin Date: Wed, 3 Feb 2021 15:35:32 +0100 Subject: [PATCH 139/150] Fix of crashes of connect engine. Use size_t everywhere and remove suspicious expression. --- storage/connect/bson.cpp | 32 ++++++++++++++++---------------- storage/connect/bson.h | 14 +++++++------- storage/connect/bsonudf.cpp | 2 +- 3 files changed, 24 insertions(+), 24 deletions(-) diff --git a/storage/connect/bson.cpp b/storage/connect/bson.cpp index 7665d8520ab..3c33551cb68 100644 --- a/storage/connect/bson.cpp +++ b/storage/connect/bson.cpp @@ -82,7 +82,7 @@ BDOC::BDOC(PGLOBAL G) : BJSON(G, NULL) /***********************************************************************/ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng) { - int i; + size_t i; bool b = false, ptyp = (bool *)pty; PBVAL bvp = NULL; @@ -185,7 +185,7 @@ PBVAL BDOC::ParseJson(PGLOBAL g, char* js, size_t lng) /***********************************************************************/ /* Parse several items as being in an array. */ /***********************************************************************/ -OFFSET BDOC::ParseAsArray(int& i) { +OFFSET BDOC::ParseAsArray(size_t& i) { if (pty[0] && (!pretty || pretty > 2)) { OFFSET jsp; @@ -202,7 +202,7 @@ OFFSET BDOC::ParseAsArray(int& i) { /***********************************************************************/ /* Parse a JSON Array. */ /***********************************************************************/ -OFFSET BDOC::ParseArray(int& i) +OFFSET BDOC::ParseArray(size_t& i) { int level = 0; bool b = (!i); @@ -214,7 +214,7 @@ OFFSET BDOC::ParseArray(int& i) switch (s[i]) { case ',': if (level < 2) { - sprintf(G->Message, "Unexpected ',' near %.*s", ARGS); + sprintf(G->Message, "Unexpected ',' near %.*s", (int) ARGS); throw 1; } else level = 1; @@ -222,7 +222,7 @@ OFFSET BDOC::ParseArray(int& i) break; case ']': if (level == 1) { - sprintf(G->Message, "Unexpected ',]' near %.*s", ARGS); + sprintf(G->Message, "Unexpected ',]' near %.*s", (int) ARGS); throw 1; } // endif level @@ -236,7 +236,7 @@ OFFSET BDOC::ParseArray(int& i) break; default: if (level == 2) { - sprintf(G->Message, "Unexpected value near %.*s", ARGS); + sprintf(G->Message, "Unexpected value near %.*s", (int) ARGS); throw 1; } else if (lastvlp) { vlp = ParseValue(i, NewVal()); @@ -260,7 +260,7 @@ OFFSET BDOC::ParseArray(int& i) /***********************************************************************/ /* Parse a JSON Object. */ /***********************************************************************/ -OFFSET BDOC::ParseObject(int& i) +OFFSET BDOC::ParseObject(size_t& i) { OFFSET key; int level = 0; @@ -283,7 +283,7 @@ OFFSET BDOC::ParseObject(int& i) level = 2; } else { - sprintf(G->Message, "misplaced string near %.*s", ARGS); + sprintf(G->Message, "misplaced string near %.*s", (int) ARGS); throw 2; } // endif level @@ -293,14 +293,14 @@ OFFSET BDOC::ParseObject(int& i) ParseValue(++i, GetVlp(lastbpp)); level = 3; } else { - sprintf(G->Message, "Unexpected ':' near %.*s", ARGS); + sprintf(G->Message, "Unexpected ':' near %.*s", (int) ARGS); throw 2; } // endif level break; case ',': if (level < 3) { - sprintf(G->Message, "Unexpected ',' near %.*s", ARGS); + sprintf(G->Message, "Unexpected ',' near %.*s", (int) ARGS); throw 2; } else level = 1; @@ -308,7 +308,7 @@ OFFSET BDOC::ParseObject(int& i) break; case '}': if (!(level == 0 || level == 3)) { - sprintf(G->Message, "Unexpected '}' near %.*s", ARGS); + sprintf(G->Message, "Unexpected '}' near %.*s", (int) ARGS); throw 2; } // endif level @@ -321,7 +321,7 @@ OFFSET BDOC::ParseObject(int& i) break; default: sprintf(G->Message, "Unexpected character '%c' near %.*s", - s[i], ARGS); + s[i], (int) ARGS); throw 2; }; // endswitch s[i] @@ -332,7 +332,7 @@ OFFSET BDOC::ParseObject(int& i) /***********************************************************************/ /* Parse a JSON Value. */ /***********************************************************************/ -PBVAL BDOC::ParseValue(int& i, PBVAL bvp) +PBVAL BDOC::ParseValue(size_t& i, PBVAL bvp) { for (; i < len; i++) switch (s[i]) { @@ -398,14 +398,14 @@ suite: return bvp; err: - sprintf(G->Message, "Unexpected character '%c' near %.*s", s[i], ARGS); + sprintf(G->Message, "Unexpected character '%c' near %.*s", s[i], (int) ARGS); throw 3; } // end of ParseValue /***********************************************************************/ /* Unescape and parse a JSON string. */ /***********************************************************************/ -OFFSET BDOC::ParseString(int& i) +OFFSET BDOC::ParseString(size_t& i) { uchar* p; int n = 0; @@ -492,7 +492,7 @@ throw("Unexpected EOF in String"); /***********************************************************************/ /* Parse a JSON numeric value. */ /***********************************************************************/ -void BDOC::ParseNumeric(int& i, PBVAL vlp) +void BDOC::ParseNumeric(size_t& i, PBVAL vlp) { char buf[50]; int n = 0; diff --git a/storage/connect/bson.h b/storage/connect/bson.h index d13ded72eb1..acc36e8e0ed 100644 --- a/storage/connect/bson.h +++ b/storage/connect/bson.h @@ -184,12 +184,12 @@ public: PSZ Serialize(PGLOBAL g, PBVAL bvp, char* fn, int pretty); protected: - OFFSET ParseArray(int& i); - OFFSET ParseObject(int& i); - PBVAL ParseValue(int& i, PBVAL bvp); - OFFSET ParseString(int& i); - void ParseNumeric(int& i, PBVAL bvp); - OFFSET ParseAsArray(int& i); + OFFSET ParseArray(size_t& i); + OFFSET ParseObject(size_t& i); + PBVAL ParseValue(size_t& i, PBVAL bvp); + OFFSET ParseString(size_t& i); + void ParseNumeric(size_t& i, PBVAL bvp); + OFFSET ParseAsArray(size_t& i); bool SerializeArray(OFFSET arp, bool b); bool SerializeObject(OFFSET obp); bool SerializeValue(PBVAL vp, bool b = false); @@ -197,7 +197,7 @@ protected: // Members used when parsing and serializing JOUT* jp; // Used with serialize char* s; // The Json string to parse - int len; // The Json string length + size_t len; // The Json string length int pretty; // The pretty style of the file to parse bool pty[3]; // Used to guess what pretty is bool comma; // True if Pretty = 1 diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 18382c7a273..f377e578399 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -1670,7 +1670,7 @@ PBVAL BJNX::ParseJsonFile(PGLOBAL g, char *fn, int& pty, size_t& len) len = (size_t)mm.lenL; if (mm.lenH) - len += ((size_t)mm.lenH * 0x000000001LL); + len += mm.lenH; memory = (char *)mm.memory; From dc31627c2d3100ecb45ef71d8040fedff202a2f7 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 5 Feb 2021 11:24:05 +0100 Subject: [PATCH 140/150] Fix of connect engine crashes --- storage/connect/bsonudf.cpp | 2 ++ 1 file changed, 2 insertions(+) diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index f377e578399..0ef2f8ca9e6 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -3710,6 +3710,7 @@ char *bson_get_item(UDF_INIT *initid, UDF_ARGS *args, char *result, PUSH_WARNING("CheckMemory error"); goto fin; } else { + bnx.Reset(); jvp = bnx.MakeValue(args, 0, true); if (g->Mrr) { // First argument is a constant @@ -4055,6 +4056,7 @@ double bsonget_real(UDF_INIT *initid, UDF_ARGS *args, *is_null = 1; return 0.0; } else { + bnx.Reset(); jvp = bnx.MakeValue(args, 0); if ((p = bnx.GetString(jvp))) { From e7d9c1d498d0b86ef773f21a95f60af26b2dce75 Mon Sep 17 00:00:00 2001 From: Olivier Bertrand Date: Fri, 5 Feb 2021 18:33:36 +0100 Subject: [PATCH 141/150] Fix connect engine ppc64 fail --- storage/connect/bsonudf.cpp | 2 +- storage/connect/mysql-test/connect/r/bson_udf.result | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/storage/connect/bsonudf.cpp b/storage/connect/bsonudf.cpp index 0ef2f8ca9e6..29fe0a6bf22 100644 --- a/storage/connect/bsonudf.cpp +++ b/storage/connect/bsonudf.cpp @@ -3022,7 +3022,7 @@ void bson_object_grp_add(UDF_INIT *initid, UDF_ARGS *args, char*, char*) PBVAL bop = (PBVAL)g->Activityp; if (g->N-- > 0) - bxp->SetKeyValue(bop, bxp->MakeValue(args, 0), MakePSZ(g, args, 1)); + bxp->SetKeyValue(bop, bxp->MakeValue(args, 1), MakePSZ(g, args, 0)); } // end of bson_object_grp_add diff --git a/storage/connect/mysql-test/connect/r/bson_udf.result b/storage/connect/mysql-test/connect/r/bson_udf.result index 4ec1f0c87fd..fef55f7d3d9 100644 --- a/storage/connect/mysql-test/connect/r/bson_udf.result +++ b/storage/connect/mysql-test/connect/r/bson_udf.result @@ -280,13 +280,13 @@ SELECT Bson_Object_Grp(SALARY) FROM t3; ERROR HY000: Can't initialize function 'bson_object_grp'; This function requires 2 arguments (key, value) SELECT Bson_Object_Grp(NAME, SALARY) FROM t3; Bson_Object_Grp(NAME, SALARY) -{"":"MARTIN","ffffp@":"KITTY"} +{"BANCROFT":9600.00,"SMITH":9000.00,"MERCHANT":8700.00,"FUNNIGUY":8500.00,"BUGHAPPY":8500.00,"BIGHEAD":8000.00,"SHRINKY":7500.00,"WALTER":7400.00,"FODDERMAN":7000.00,"TONGHO":6800.00,"SHORTSIGHT":5500.00,"MESSIFUL":5000.50,"HONEY":4900.00,"GOOSEPEN":4700.00,"CHERRY":4500.00,"MONAPENNY":3800.00,"KITTY":3000.45,"PLUMHEAD":2800.00,"STRONG":23000.00,"BULLOZER":14800.00,"WERTHER":14500.00,"QUINN":14000.00,"ORELLY":13400.00,"BIGHORN":11000.00,"BROWNY":10500.00,"WHEELFOR":10030.00,"MARTIN":10000.00} SELECT Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES") FROM t3 GROUP BY DEPARTMENT; Bson_Make_Object(DEPARTMENT, Bson_Object_Grp(NAME, SALARY) "Json_SALARIES") -{"DEPARTMENT":"0021","SALARIES":{"":"SHORTSIGHT"}} -{"DEPARTMENT":"0318","SALARIES":{"":"WHEELFOR"}} -{"DEPARTMENT":"0319","SALARIES":{"":"GOOSEPEN","ffffp@":"KITTY"}} -{"DEPARTMENT":"2452","SALARIES":{"":"CHERRY"}} +{"DEPARTMENT":"0021","SALARIES":{"STRONG":23000.00,"SHORTSIGHT":5500.00}} +{"DEPARTMENT":"0318","SALARIES":{"BANCROFT":9600.00,"PLUMHEAD":2800.00,"HONEY":4900.00,"TONGHO":6800.00,"WALTER":7400.00,"SHRINKY":7500.00,"WERTHER":14500.00,"MERCHANT":8700.00,"WHEELFOR":10030.00}} +{"DEPARTMENT":"0319","SALARIES":{"BULLOZER":14800.00,"QUINN":14000.00,"BROWNY":10500.00,"KITTY":3000.45,"MONAPENNY":3800.00,"MARTIN":10000.00,"FUNNIGUY":8500.00,"BUGHAPPY":8500.00,"FODDERMAN":7000.00,"MESSIFUL":5000.50,"GOOSEPEN":4700.00}} +{"DEPARTMENT":"2452","SALARIES":{"BIGHEAD":8000.00,"ORELLY":13400.00,"BIGHORN":11000.00,"SMITH":9000.00,"CHERRY":4500.00}} SELECT Bson_Array_Grp(NAME) FROM t3; Bson_Array_Grp(NAME) ["BANCROFT","SMITH","MERCHANT","FUNNIGUY","BUGHAPPY","BIGHEAD","SHRINKY","WALTER","FODDERMAN","TONGHO","SHORTSIGHT","MESSIFUL","HONEY","GOOSEPEN","CHERRY","MONAPENNY","KITTY","PLUMHEAD","STRONG","BULLOZER","WERTHER","QUINN","ORELLY","BIGHORN","BROWNY","WHEELFOR","MARTIN"] @@ -303,7 +303,7 @@ Bson_Object_Key(name, title) {"WHEELFOR":"SALESMAN"} SELECT Bson_Object_Grp(name, title) FROM t3 WHERE DEPARTMENT = 318; Bson_Object_Grp(name, title) -{"SALESMAN":"WHEELFOR","ADMINISTRATOR":"SHRINKY","ENGINEER":"TONGHO","SECRETARY":"HONEY","TYPIST":"PLUMHEAD","DIRECTOR":"WERTHER"} +{"BANCROFT":"SALESMAN","MERCHANT":"SALESMAN","SHRINKY":"ADMINISTRATOR","WALTER":"ENGINEER","TONGHO":"ENGINEER","HONEY":"SECRETARY","PLUMHEAD":"TYPIST","WERTHER":"DIRECTOR","WHEELFOR":"SALESMAN"} # # Test value getting UDF's # From 691f93d6d17603f11a0c90f64e94b7ce9d187db4 Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Sat, 6 Feb 2021 21:02:44 +0200 Subject: [PATCH 142/150] List of unstable tests for 10.3.28 release Test code modifications and new failures from buildbot registered only for the main suite. The rest was updated partially, based on the status of existing JIRA items --- mysql-test/unstable-tests | 281 ++++++++++++++------------------------ 1 file changed, 101 insertions(+), 180 deletions(-) diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests index 56ef121cff0..55e1db5a5af 100644 --- a/mysql-test/unstable-tests +++ b/mysql-test/unstable-tests @@ -23,150 +23,144 @@ # ############################################################################## # -# Based on 10.3 794f66513967891520ec432123dcff8270871b93 +# Based on bb-10.3-release 6f93df1c2 (Merge branch '10.2' into 10.3) +# for main suite changes and failures, and +# 10.3 794f66513967891520ec432123dcff8270871b93 +# for the rest -main.alter_table : Modified in 10.3.26 main.alter_table_trans : MDEV-12084 - timeout main.analyze_stmt_slow_query_log : MDEV-12237 - Wrong result -main.aria_icp_debug : Added in 10.3.26 main.auth_named_pipe : MDEV-14724 - System error 2 -main.blackhole : Modified in 10.3.26 -main.bootstrap_innodb : Added in 10.3.26 +main.auto_increment_ranges_innodb : Modified in 10.3.28 main.connect : MDEV-17282 - Wrong result main.connect2 : MDEV-13885 - Server crash -main.count_distinct2 : MDEV-11768 - timeout +main.create : Modified in 10.3.28 main.create_delayed : MDEV-10605 - failed with timeout main.create_drop_event : MDEV-16271 - Wrong result -main.ctype_filename : Modified in 10.3.26 +main.cte_nonrecursive : Modified in 10.3.28 +main.cte_nonrecursive_not_embedded : Added in 10.3.28 +main.cte_recursive : Modified in 10.3.28 main.ctype_ucs : MDEV-17681 - Data too long for column main.ctype_upgrade : MDEV-16945 - Error upon mysql_upgrade main.ctype_utf16 : MDEV-10675: timeout or extra warnings main.ctype_utf16le : MDEV-10675: timeout or extra warnings -main.ctype_utf8 : Modified in 10.3.26 -main.ctype_utf8mb4_innodb : MDEV-17744 - Timeout; MDEV-18567 - ASAN use-after-poison +main.ctype_utf8mb4 : Modified in 10.3.28 +main.ctype_utf8mb4_heap : Include file modified in 10.3.28 +main.ctype_utf8mb4_innodb : MDEV-17744 - Timeout; MDEV-18567 - ASAN use-after-poison; include file modified in 10.3.28 +main.ctype_utf8mb4_myisam : Include file modified in 10.3.28 main.debug_sync : MDEV-10607 - internal error -main.derived_cond_pushdown : MDEV-20532 - Floating point differences -main.derived_opt : MDEV-11768 - timeout +main.derived_cond_pushdown : MDEV-20532 - Floating point differences; modified in 10.3.28 main.dirty_close : MDEV-19368 - mysqltest failed but provided no output main.distinct : MDEV-14194 - Crash main.drop_bad_db_type : MDEV-15676 - Wrong result main.dyncol : MDEV-19455 - Extra warning +main.empty_string_literal : Modified in 10.3.28 main.events_2 : MDEV-13277 - Crash main.events_bugs : MDEV-12892 - Crash main.events_restart : MDEV-12236 - Server shutdown problem main.events_slowlog : MDEV-12821 - Wrong result -main.fast_prefix_index_fetch_innodb : Modified in 10.3.26 main.flush : MDEV-19368 - mysqltest failed but provided no output -main.func_gconcat : MDEV-21379 - Valgrind warnings -main.func_json : Modified in 10.3.26 -main.func_math : MDEV-20532 - Floating point differences; modified in 10.3.26 -main.func_test : Modified in 10.3.26 +main.func_gconcat : MDEV-21379 - Valgrind warnings; modified in 10.3.28 +main.func_like : Modified in 10.3.28 +main.func_math : MDEV-20532 - Floating point differences main.gis : MDEV-13411 - wrong result on P8 +main.gis-json : Modified in 10.3.28 main.gis_notembedded : MDEV-21264 - Wrong result with different default charset -main.grant : Modified in 10.3.26 -main.grant5 : Modified in 10.3.26 +main.group_by : Modified in 10.3.28 main.host_cache_size_functionality : MDEV-10606 - sporadic failure on shutdown main.index_intersect_innodb : MDEV-10643 - failed with timeout main.index_merge_innodb : MDEV-7142 - Plan mismatch -main.information_schema : Modified in 10.3.26 -main.innodb_ext_key : Modified in 10.3.26 -main.innodb_icp_debug : Added in 10.3.26 -main.invisible_field : Modified in 10.3.26 +main.information_schema : Modified in 10.3.28 +main.innodb_mrr_cpk : MDEV-24737 - Server crash main.invisible_field_grant_completely : MDEV-22254 - Syscall param write points to uninitialised bytes main.join_cache : MDEV-17743 - Bad address from storage engine MyISAM -main.kill : Modified in 10.3.26 +main.kill : MDEV-24801 - Wrong errno on reap; modified in 10.3.28 main.kill-2 : MDEV-13257 - Wrong result main.kill_processlist-6619 : MDEV-10793 - Wrong result -main.limit_rows_examined : Modified in 10.3.26 main.loaddata : MDEV-19368 - mysqltest failed but provided no output main.locale : MDEV-20521 - Missing warning -main.lock_view : Added in 10.3.26 +main.lock_tables_lost_commit : MDEV-24624 - Timeout +main.lock_view : Modified in 10.3.28 main.log_slow : MDEV-13263 - Wrong result -main.log_tables : Modified in 10.3.26 main.log_tables-big : MDEV-13408 - wrong result main.mdev-504 : MDEV-15171 - warning main.mdev375 : MDEV-10607 - sporadic "can't connect" main.merge : MDEV-10607 - sporadic "can't connect" -main.multi_update_big : Modified in 10.3.26 -main.myisam_icp_debug : Added in 10.3.26 +main.myisam : Modified in 10.3.28 main.mysql_client_test : MDEV-19369 - error: 5888, status: 23, errno: 2 main.mysql_client_test_comp : MDEV-16641 - Error in exec main.mysql_client_test_nonblock : CONC-208 - Error on Power; MDEV-15096 - exec failed -main.mysql_upgrade : Modified in 10.3.26 +main.mysql_upgrade : Modified in 10.3.28 main.mysql_upgrade_noengine : MDEV-14355 - Wrong result main.mysql_upgrade_view : MDEV-23392 - Wrong result -main.mysqlbinlog_row_minimal : Modified in 10.3.26 -main.mysqld--help : Modified in 10.3.26 main.mysqld_option_err : MDEV-21236 - Wrong error; MDEV-21571 - Crash on bootstrap -main.mysqldump : MDEV-14800 - Stack smashing detected; modified in 10.3.26 -main.mysqlhotcopy_myisam : MDEV-10995 - Hang on debug +main.mysqldump : Modified in 10.3.28 +main.mysqldump-system : Added in 10.3.28 main.mysqlslap : MDEV-11801 - timeout main.mysqltest : MDEV-13887 - Wrong result -main.named_pipe : Modified in 10.3.26 main.old-mode : MDEV-19373 - Wrong result main.openssl_6975 : MDEV-17184 - Failures with OpenSSL 1.1.1 -main.order_by : Modified in 10.3.26 +main.order_by : Modified in 10.3.28 main.order_by_optimizer_innodb : MDEV-10683 - Wrong result -main.parser : Modified in 10.3.26 -main.partition : Modified in 10.3.26 +main.parser : Modified in 10.3.28 main.partition_debug_sync : MDEV-15669 - Deadlock found when trying to get lock main.partition_innodb : MDEV-23427 - Server crash main.partition_innodb_plugin : MDEV-12901 - Valgrind warnings main.partition_innodb_semi_consistent : MDEV-19411 - Failed to start mysqld.1 -main.plugin_innodb : Modified in 10.3.26 -main.pool_of_threads : MDEV-18135 - SSL error: key too small; modified in 10.3.26 -main.precedence : Added in 10.3.26 -main.precedence_bugs : Added in 10.3.26 -main.processlist_notembedded : Modified in 10.3.26 +main.pool_of_threads : MDEV-18135 - SSL error: key too small +main.precedence : Modified in 10.3.28 +main.processlist_notembedded : MDEV-23752 - Not explainable command; modified in 10.3.28 main.ps : MDEV-11017 - Sporadic wrong Prepared_stmt_count main.ps_error : MDEV-24079 - Memory not freed -main.query_cache : MDEV-16180 - Wrong result +main.ps_show_log : Added in 10.3.28 +main.query_cache : MDEV-16180 - Wrong result; modified in 10.3.28 main.query_cache_debug : MDEV-15281 - Query cache is disabled -main.range : Modified in 10.3.26 +main.range : Modified in 10.3.28 main.range_innodb : MDEV-23371 - Server crash main.range_vs_index_merge_innodb : MDEV-15283 - Server has gone away main.select : MDEV-20532 - Floating point differences main.select_jcl6 : MDEV-20532 - Floating point differences main.select_pkeycache : MDEV-20532 - Floating point differences main.set_statement : MDEV-13183 - Wrong result -main.set_statement_notembedded : MDEV-19414 - Wrong result; modified in 10.3.26 +main.set_statement_notembedded : MDEV-19414 - Wrong result main.shm : MDEV-12727 - Mismatch, ERROR 2013 main.show_explain : MDEV-10674 - Wrong result code -main.sp : MDEV-7866 - Mismatch +main.skip_grants : Modified in 10.3.28 +main.sp : MDEV-7866 - Mismatch; modified in 10.3.28 main.sp-security : MDEV-10607 - sporadic "can't connect" +main.sp-ucs2 : Modified in 10.3.28 main.sp_notembedded : MDEV-10607 - internal error main.ssl : MDEV-17184 - Failures with OpenSSL 1.1.1 main.ssl_ca : MDEV-10895 - SSL connection error on Power main.ssl_cipher : MDEV-17184 - Failures with OpenSSL 1.1.1 main.ssl_timeout : MDEV-11244 - Crash +main.stat_tables : Modified in 10.3.28 main.stat_tables_par_innodb : MDEV-14155 - Wrong rounding main.status : MDEV-13255 - Wrong result main.subselect : MDEV-20551 - Valgrind failure -main.subselect4 : Modified in 10.3.26 -main.subselect_innodb : MDEV-10614 - Wrong result; modified in 10.3.26 -main.sum_distinct-big : Modified in 10.3.26 +main.subselect4 : Modified in 10.3.28 +main.subselect_innodb : MDEV-10614 - Wrong result +main.table_value_constr : Modified in 10.3.28 main.tc_heuristic_recover : MDEV-14189 - Wrong result -main.temp_table_symlink : MDEV-24058 - Wrong error code; added in 10.3.26 +main.temp_table_symlink : MDEV-24058 - Wrong error code main.type_blob : MDEV-15195 - Wrong result -main.type_datetime : Modified in 10.3.26 main.type_datetime_hires : MDEV-10687 - Timeout main.type_float : MDEV-20532 - Floating point differences -main.type_newdecimal : MDEV-20532 - Floating point differences; modified in 10.3.26 +main.type_newdecimal : MDEV-20532 - Floating point differences main.type_temporal_innodb : MDEV-24025 - Wrong result -main.udf : Modified in 10.3.26 +main.type_year : Modified in 10.3.28 +main.union : Modified in 10.3.28 +main.user_limits : Modified in 10.3.28 main.userstat : MDEV-12904 - SSL errors -main.view : Modified in 10.3.26 +main.view : Modified in 10.3.28 main.wait_timeout : MDEV-19023 - Lost connection to MySQL server during query -main.win : Modified in 10.3.26 -main.windows_debug : Added in 10.3.26 -main.xa : MDEV-11769 - lock wait timeout +main.xa : MDEV-11769 - lock wait timeout; modified in 10.3.28 #----------------------------------------------------------------------- -archive.archive_bitfield : MDEV-11771 - table is marked as crashed -archive.archive_symlink : MDEV-12170 - unexpected error on rmdir -archive.discover : MDEV-10510 - Table is marked as crashed -archive.mysqlhotcopy_archive : MDEV-10995 - Hang on debug +archive.archive_bitfield : MDEV-11771 - table is marked as crashed +archive.archive_symlink : MDEV-12170 - unexpected error on rmdir +archive.discover : MDEV-10510 - Table is marked as crashed #----------------------------------------------------------------------- @@ -174,20 +168,16 @@ archive-test_sql_discovery.discover : MDEV-16817 - Table marked as crashed #----------------------------------------------------------------------- -binlog.binlog_commit_wait : MDEV-10150 - Mismatch -binlog.binlog_killed : MDEV-12925 - Wrong result -binlog.binlog_max_extension : MDEV-19762 - Crash on shutdown -binlog.binlog_mysqlbinlog_row : Modified in 10.3.26 -binlog.binlog_mysqlbinlog_row_frag : Modified in 10.3.26 -binlog.binlog_mysqlbinlog_row_innodb : MDEV-20530 - Binary files differ -binlog.binlog_mysqlbinlog_row_myisam : MDEV-20530 - Binary files differ -binlog.binlog_no_uniqfile_crash : MDEV-24078 - Server crash upon shutdown -binlog.binlog_recover_checksum_error : Added in 10.3.26 -binlog.binlog_show_binlog_event_random_pos : Modified in 10.3.26 -binlog.binlog_stm_mix_innodb_myisam : MDEV-24057 - Wrong result -binlog.binlog_xa_recover : MDEV-8517 - Extra checkpoint -binlog.flashback-largebinlog : MDEV-19764 - Out of memory -binlog.load_data_stm_view : MDEV-16948 - Wrong result +binlog.binlog_commit_wait : MDEV-10150 - Mismatch +binlog.binlog_killed : MDEV-12925 - Wrong result +binlog.binlog_max_extension : MDEV-19762 - Crash on shutdown +binlog.binlog_mysqlbinlog_row_innodb : MDEV-20530 - Binary files differ +binlog.binlog_mysqlbinlog_row_myisam : MDEV-20530 - Binary files differ +binlog.binlog_no_uniqfile_crash : MDEV-24078 - Server crash upon shutdown +binlog.binlog_stm_mix_innodb_myisam : MDEV-24057 - Wrong result +binlog.binlog_xa_recover : MDEV-12908 - Extra checkpoint +binlog.flashback-largebinlog : MDEV-19764 - Out of memory +binlog.load_data_stm_view : MDEV-16948 - Wrong result #----------------------------------------------------------------------- @@ -220,7 +210,6 @@ connect.part_file : MDEV-18135 - SSL error: key too small connect.part_table : MDEV-18135 - SSL error: key too small connect.pivot : MDEV-14803 - Failed to discover table connect.secure_file_priv : MDEV-18135 - SSL error: key too small -connect.updelx : Modified in 10.3.26 connect.vcol : MDEV-12374 - Fails on Windows connect.zip : MDEV-13884 - Wrong result @@ -230,8 +219,7 @@ disks.disks_notembedded : MDEV-21587 - Wrong result #----------------------------------------------------------------------- -encryption.create_or_replace : Modified in 10.3.26 -encryption.create_or_replace_big : Added in 10.3.26 +encryption.create_or_replace : MDEV-24081 - Lock wait timeout exceeded encryption.debug_key_management : MDEV-13841 - Timeout encryption.encrypt_and_grep : MDEV-13765 - Wrong result encryption.innochecksum : MDEV-13644 - Assertion failure @@ -243,10 +231,8 @@ encryption.innodb-first-page-read : MDEV-14356 - Timeout in wait encryption.innodb-force-corrupt : MDEV-17286 - SSL error encryption.innodb-missing-key : MDEV-14728 - SSL error encryption.innodb-page_encryption : MDEV-10641 - mutex problem -encryption.innodb-page_encryption_compression : Modified in 10.3.26 -encryption.innodb-page_encryption_log_encryption : MDEV-17339 - Crash on restart; modified in 10.3.26 +encryption.innodb-page_encryption_log_encryption : MDEV-17339 - Crash on restart encryption.innodb-read-only : MDEV-16563 - Crash on startup -encryption.innodb-redo-badkey : MDEV-12898 - Server hang on startup encryption.innodb-remove-encryption : MDEV-16493 - Timeout in wait condition encryption.innodb-spatial-index : MDEV-13746 - Wrong result encryption.innodb_encrypt_key_rotation_age : MDEV-19763 - Timeout @@ -257,7 +243,6 @@ encryption.innodb_encryption : MDEV-14728 - Unable to get ce encryption.innodb_encryption-page-compression : MDEV-12630 - crash or assertion failure encryption.innodb_encryption_discard_import : MDEV-16116 - Wrong result encryption.innodb_encryption_filekeys : MDEV-15673 - Timeout -encryption.innodb_encryption_is : MDEV-12898 - Server hang on startup encryption.innodb_encryption_row_compressed : MDEV-16113 - Crash encryption.innodb_encryption_tables : MDEV-17339 - Crash on restart encryption.innodb_first_page : MDEV-10689 - Crash @@ -265,7 +250,6 @@ encryption.innodb_onlinealter_encryption : MDEV-17287 - SIGABRT on serve encryption.innodb_scrub : MDEV-8139 - scrubbing tests need fixing (fixed in 10.5+) encryption.innodb_scrub_background : MDEV-8139 - scrubbing tests need fixing (fixed in 10.5+) encryption.innodb_scrub_compressed : MDEV-8139 - scrubbing tests need fixing (fixed in 10.5+) -encryption.tempfiles_encrypted : Added in 10.3.26 #----------------------------------------------------------------------- @@ -314,17 +298,9 @@ galera_3nodes.* : Suite is not stable yet #----------------------------------------------------------------------- -gcol.gcol_keys_innodb : Include file modified in 10.3.26 -gcol.gcol_keys_myisam : Include file modified in 10.3.26 -gcol.gcol_partition_innodb : Include file modified in 10.3.26 -gcol.gcol_update : Include file modified in 10.3.26 -gcol.innodb_virtual_basic : MDEV-16950 - Failing assertion -gcol.innodb_virtual_debug : MDEV-23111 - Server crash -gcol.innodb_virtual_debug_purge : Include file modified in 10.3.26 -gcol.innodb_virtual_fk : Modified in 10.3.26 -gcol.innodb_virtual_fk_restart : MDEV-17466 - Assertion failure -gcol.innodb_virtual_index : Modified in 10.3.26 -gcol.innodb_virtual_purge : Include file modified in 10.3.26 +gcol.innodb_virtual_basic : MDEV-16950 - Failing assertion +gcol.innodb_virtual_debug : MDEV-23111 - Server crash +gcol.innodb_virtual_fk_restart : MDEV-17466 - Assertion failure #----------------------------------------------------------------------- @@ -332,18 +308,13 @@ innodb.101_compatibility : MDEV-13891 - Wrong result innodb.alter_copy : MDEV-16181 - Assertion failure innodb.alter_crash : MDEV-16944 - The process cannot access the file innodb.alter_large_dml : MDEV-20148 - Debug sync point wait timed out -innodb.alter_table : Modified in 10.3.26 innodb.binlog_consistent : MDEV-10618 - Server fails to start innodb.blob-crash : MDEV-20481 - Crash during recovery innodb.doublewrite : MDEV-12905 - Server crash -innodb.foreign-keys : Modified in 10.3.26 -innodb.foreign_key : Modified in 10.3.26 innodb.group_commit_crash : MDEV-14191 - InnoDB registration failed innodb.group_commit_crash_no_optimize_thread : MDEV-11770 - Checksum mismatch innodb.ibuf_not_empty : MDEV-19021 - Wrong result -innodb.innodb : Modified in 10.3.26 innodb.innodb-32k-crash : MDEV-20194 - Extra warning -innodb.innodb-64k : Modified in 10.3.26 innodb.innodb-64k-crash : MDEV-13872 - Failure and crash on startup innodb.innodb-alter-debug : MDEV-13182 - InnoDB: adjusting FSP_SPACE_FLAGS innodb.innodb-alter-table : MDEV-10619 - Testcase timeout @@ -352,8 +323,6 @@ innodb.innodb-blob : MDEV-12053 - Client crash innodb.innodb-change-buffer-recovery : MDEV-19115 - Lost connection to MySQL server during query innodb.innodb-fk : MDEV-13832 - Assertion failure on shutdown innodb.innodb-get-fk : MDEV-13276 - Server crash -innodb.innodb-index : Include file modified in 10.3.26 -innodb.innodb-index-debug : Include file modified in 10.3.26 innodb.innodb-index-online : MDEV-14809 - Cannot save statistics innodb.innodb-page_compression_default : MDEV-13644 - Assertion failure innodb.innodb-page_compression_lzma : MDEV-14353 - Wrong result @@ -361,13 +330,11 @@ innodb.innodb-page_compression_snappy : MDEV-13644 - Assertion failure innodb.innodb-page_compression_tables : MDEV-13644 - Assertion failure innodb.innodb-page_compression_zip : MDEV-10641 - mutex problem innodb.innodb-table-online : MDEV-13894 - Wrong result +innodb.innodb-ucs2 : MDEV-24505 - Assertion failure innodb.innodb-wl5522 : MDEV-13644 - Assertion failure innodb.innodb-wl5522-1 : MDEV-22945 - Server crash innodb.innodb-wl5522-debug : MDEV-14200 - Wrong errno innodb.innodb_buffer_pool_dump_pct : MDEV-20139 - Timeout in wait_condition.inc -innodb.innodb_buffer_pool_resize : MDEV-16964 - Assertion failure -innodb.innodb_buffer_pool_resize_with_chunks : MDEV-16964 - Assertion failure -innodb.innodb_bug14147491 : MDEV-11808 - Index is corrupt innodb.innodb_bug30423 : MDEV-7311 - Wrong result innodb.innodb_bug47167 : MDEV-20524 - Table 'user' is marked as crashed and should be repaired innodb.innodb_bug48024 : MDEV-14352 - Assertion failure @@ -377,17 +344,13 @@ innodb.innodb_force_recovery_rollback : MDEV-22889 - Wrong result innodb.innodb_information_schema : MDEV-8851 - Wrong result innodb.innodb_max_recordsize_32k : MDEV-14801 - Operation failed innodb.innodb_max_recordsize_64k : MDEV-15203 - Wrong result -innodb.innodb_monitor : MDEV-10939 - Testcase timeout innodb.innodb_mysql : MDEV-19873 - Wrong result innodb.innodb_simulate_comp_failures_small : MDEV-20526 - ASAN use-after-poison innodb.innodb_stats : MDEV-10682 - wrong result -innodb.innodb_stats_drop_locked : Modified in 10.3.26 innodb.innodb_stats_persistent : MDEV-21567 - Wrong result in execution plan innodb.innodb_stats_persistent_debug : MDEV-14801 - Operation failed innodb.innodb_sys_semaphore_waits : MDEV-10331 - Semaphore wait -innodb.innodb_trx_weight : Configuration deleted in 10.3.26 innodb.innodb_zip_innochecksum2 : MDEV-13882 - Warning: difficult to find free blocks -innodb.instant_alter_crash : Modified in 10.3.26 innodb.log_corruption : MDEV-13251 - Wrong result innodb.log_data_file_size : MDEV-14204 - Server failed to start innodb.log_file_name : MDEV-14193 - Exception @@ -397,24 +360,19 @@ innodb.purge_secondary : MDEV-15681 - Wrong result innodb.purge_thread_shutdown : MDEV-13792 - Wrong result innodb.read_only_recovery : MDEV-13886 - Server crash innodb.recovery_shutdown : MDEV-15671 - Checksum mismatch in datafile -innodb.row_format_redundant : MDEV-15192 - Trying to access missing tablespace; modified in 10.3.26 -innodb.stats_persistent : Added in 10.3.26 +innodb.row_format_redundant : MDEV-15192 - Trying to access missing tablespace innodb.table_definition_cache_debug : MDEV-14206 - Extra warning innodb.table_flags : MDEV-13572 - Wrong result; MDEV-19374 - Server failed to start innodb.temp_table_savepoint : MDEV-24077 - Assertion failure innodb.temporary_table : MDEV-13265 - Wrong result -innodb.truncate : Modified in 10.3.26 innodb.undo_truncate : MDEV-17340 - Server hung; MDEV-20840 - Sporadic timeout innodb.undo_truncate_recover : MDEV-17679 - Server has gone away -innodb.update-cascade : Combinations added in 10.3.26 innodb.update_time : MDEV-14804 - Wrong result innodb.xa_recovery : MDEV-15279 - mysqld got exception #----------------------------------------------------------------------- -innodb_fts.basic : Modified in 10.3.26 innodb_fts.fulltext2 : MDEV-24074 - Server crash -innodb_fts.innodb_fts_misc_1 : Modified in 10.3.26 innodb_fts.innodb_fts_misc_debug : MDEV-14156 - Unexpected warning innodb_fts.innodb_fts_plugin : MDEV-13888 - Errors in server log innodb_fts.innodb_fts_stopword_charset : MDEV-13259 - Table crashed @@ -424,14 +382,12 @@ innodb_fts.sync_ddl : MDEV-21568 - Errno: 2000; MDEV-18654 - innodb_gis.alter_spatial_index : MDEV-13745 - Server crash innodb_gis.gis_split_nan : MDEV-21678 - Cannot get geometry object -innodb_gis.rtree_add_index : Include file modified in 10.3.26 -innodb_gis.rtree_compress : Include file modified in 10.3.26 innodb_gis.rtree_compress2 : MDEV-16269 - Wrong result innodb_gis.rtree_concurrent_srch : MDEV-15284 - Wrong result with embedded -innodb_gis.rtree_purge : MDEV-15275 - Timeout; include file modified in 10.3.26 +innodb_gis.rtree_purge : MDEV-15275 - Timeout innodb_gis.rtree_recovery : MDEV-15274 - Error on check innodb_gis.rtree_split : MDEV-14208 - Too many arguments -innodb_gis.rtree_undo : MDEV-14456 - Timeout in include file; include file modified in 10.3.26 +innodb_gis.rtree_undo : MDEV-14456 - Timeout in include file innodb_gis.types : MDEV-15679 - Table is marked as crashed #----------------------------------------------------------------------- @@ -448,7 +404,6 @@ innodb_zip.wl6501_scale_1 : MDEV-13254 - Timeout, MDEV-14104 - Error 192 #----------------------------------------------------------------------- -maria.create : Modified in 10.3.26 maria.insert_select : MDEV-12757 - Timeout maria.insert_select-7314 : MDEV-16492 - Timeout maria.maria : MDEV-14430 - Extra warning @@ -456,29 +411,25 @@ maria.maria-no-logging : MDEV-20196 - Crash on shutdown or server can't start #----------------------------------------------------------------------- -mariabackup.absolute_ibdata_paths : MDEV-16571 - Wrong result -mariabackup.apply-log-only : MDEV-20135 - Timeout -mariabackup.backup_ssl : MDEV-24073 - Server crash upon shutdown -mariabackup.data_directory : MDEV-15270 - Error on exec -mariabackup.ddl_incremental_encrypted : Added in 10.3.26 -mariabackup.full_backup : MDEV-16571 - Wrong result -mariabackup.huge_lsn : MDEV-18569 - Table doesn't exist -mariabackup.incremental_backup : MDEV-21222 - Memory allocation failure -mariabackup.incremental_ddl_during_backup : Modified in 10.3.26 -mariabackup.incremental_encrypted : MDEV-15667 - timeout -mariabackup.incremental_rocksdb : MDEV-20954 - Cannot access the file -mariabackup.innodb_redo_overwrite : MDEV-24023 - Wrong result; added in 10.3.26 -mariabackup.log_checksum_mismatch : MDEV-16571 - Wrong result -mariabackup.mdev-14447 : MDEV-15201 - Timeout -mariabackup.mlog_index_load : Modified in 10.3.26 -mariabackup.partial_exclude : MDEV-15270 - Error on exec -mariabackup.rpl_slave_info : Added in 10.3.26 -mariabackup.unencrypted_page_compressed : MDEV-18653 - Wrong error; include file modified in 10.3.26 -mariabackup.xb_compressed_encrypted : MDEV-14812 - Segmentation fault -mariabackup.xb_file_key_management : MDEV-16571 - Wrong result -mariabackup.xb_page_compress : MDEV-14810 - status: 1, errno: 11 -mariabackup.xb_partition : MDEV-17584 - Crash upon shutdown -mariabackup.xb_rocksdb : MDEV-17338 - Server hung on shutdown +mariabackup.absolute_ibdata_paths : MDEV-16571 - Wrong result +mariabackup.apply-log-only : MDEV-20135 - Timeout +mariabackup.backup_ssl : MDEV-24073 - Server crash upon shutdown +mariabackup.data_directory : MDEV-15270 - Error on exec +mariabackup.full_backup : MDEV-16571 - Wrong result +mariabackup.huge_lsn : MDEV-18569 - Table doesn't exist +mariabackup.incremental_backup : MDEV-21222 - Memory allocation failure +mariabackup.incremental_encrypted : MDEV-15667 - timeout +mariabackup.incremental_rocksdb : MDEV-20954 - Cannot access the file +mariabackup.innodb_redo_overwrite : MDEV-24023 - Wrong result +mariabackup.log_checksum_mismatch : MDEV-16571 - Wrong result +mariabackup.mdev-14447 : MDEV-15201 - Timeout +mariabackup.partial_exclude : MDEV-15270 - Error on exec +mariabackup.unencrypted_page_compressed : MDEV-18653 - Wrong error +mariabackup.xb_compressed_encrypted : MDEV-14812 - Segmentation fault +mariabackup.xb_file_key_management : MDEV-16571 - Wrong result +mariabackup.xb_page_compress : MDEV-14810 - status: 1, errno: 11 +mariabackup.xb_partition : MDEV-17584 - Crash upon shutdown +mariabackup.xb_rocksdb : MDEV-17338 - Server hung on shutdown #----------------------------------------------------------------------- @@ -503,8 +454,6 @@ multi_source.load_data : MDEV-21235 - Slave crash multi_source.mdev-8874 : MDEV-19415 - AddressSanitizer: heap-use-after-free multi_source.mdev-9544 : MDEV-19415 - AddressSanitizer: heap-use-after-free multi_source.multisource : MDEV-10417 - Fails on Mips -multi_source.reset_slave : MDEV-10690 - Wrong result -multi_source.simple : MDEV-4633 - Wrong result multi_source.status_vars : MDEV-4632 - failed while waiting for Slave_received_heartbeats #----------------------------------------------------------------------- @@ -519,7 +468,6 @@ parts.partition_debug_innodb : MDEV-10891 - Can't create UNIX socket; parts.partition_exch_qa_10 : MDEV-11765 - wrong result parts.partition_innodb_status_file : MDEV-12901 - Valgrind parts.partition_special_innodb : MDEV-16942 - Timeout -parts.reorganize : Added in 10.3.26 #----------------------------------------------------------------------- @@ -553,7 +501,7 @@ perfschema_stress.* : MDEV-10996 - Not maintained plugins.feedback_plugin_send : MDEV-7932, MDEV-11118 - Connection problems and such plugins.processlist : MDEV-16574 - Wrong result -plugins.server_audit : MDEV-14295 - Wrong result; modified in 10.3.26 +plugins.server_audit : MDEV-14295 - Wrong result plugins.thread_pool_server_audit : MDEV-14295 - Wrong result #----------------------------------------------------------------------- @@ -607,7 +555,6 @@ rpl.rpl_auto_increment : MDEV-10417 - Fails on Mips rpl.rpl_auto_increment_bug45679 : MDEV-10417 - Fails on Mips rpl.rpl_auto_increment_update_failure : MDEV-10625 - warnings in error log rpl.rpl_binlog_errors : MDEV-12742 - Crash -rpl.rpl_binlog_index : Modified in 10.3.26 rpl.rpl_checksum_cache : MDEV-22510 - Server crash rpl.rpl_colSize : MDEV-16112 - Server crash rpl.rpl_corruption : MDEV-20527 - Slave stopped with wrong error code @@ -618,13 +565,10 @@ rpl.rpl_domain_id_filter_master_crash : MDEV-19043 - Table marked as crashed rpl.rpl_domain_id_filter_restart : MDEV-10684 - Wrong result; MDEV-19043 - Table marked as crashed rpl.rpl_drop_db_fail : MDEV-16898 - Slave fails to start rpl.rpl_extra_col_master_innodb : MDEV-16570 - Extra warning -rpl.rpl_filter_tables_dynamic : Modified in 10.3.26 -rpl.rpl_filter_wild_tables_dynamic : Modified in 10.3.26 rpl.rpl_flushlog_loop : MDEV-21570 - Server crash rpl.rpl_get_lock : MDEV-19368 - mysqltest failed but provided no output rpl.rpl_gtid_basic : MDEV-10681 - server startup problem -rpl.rpl_gtid_crash : MDEV-13643 - Lost connection; modified in 10.3.26 -rpl.rpl_gtid_delete_domain : MDEV-14463 - Timeout; MDEV-23103 - Could not delete gtid domain; modified in 10.3.26 +rpl.rpl_gtid_crash : MDEV-13643 - Lost connection rpl.rpl_gtid_errorhandling : MDEV-13261 - Crash rpl.rpl_gtid_mdev9033 : MDEV-10680 - warnings rpl.rpl_gtid_reconnect : MDEV-14497 - Crash @@ -643,12 +587,9 @@ rpl.rpl_mariadb_slave_capability : MDEV-11018 - Extra lines in binlog rpl.rpl_mdev12179 : MDEV-19043 - Table marked as crashed rpl.rpl_mdev6020 : MDEV-23426 - Server crash, ASAN failures; MDEV-15272 - Server crash rpl.rpl_mixed_mixing_engines : MDEV-21266 - Timeout -rpl.rpl_mysql_upgrade : Modified in 10.3.26 rpl.rpl_non_direct_row_mixing_engines : MDEV-16561 - Timeout in master_pos_wait rpl.rpl_parallel : MDEV-10653 - Timeouts -rpl.rpl_parallel2 : MDEV-17390 - Operation cannot be performed; modified in 10.3.26 rpl.rpl_parallel_conflicts : MDEV-15272 - Server crash -rpl.rpl_parallel_mdev6589 : MDEV-12979 - Assertion failure rpl.rpl_parallel_multilevel2 : MDEV-14723 - Timeout rpl.rpl_parallel_optimistic : MDEV-15278 - Failed to sync with master rpl.rpl_parallel_optimistic_nobinlog : MDEV-15278 - Failed to sync with master @@ -670,20 +611,15 @@ rpl.rpl_row_img_eng_noblob : MDEV-13875 - command "diff_files" failed rpl.rpl_row_index_choice : MDEV-15196 - Slave crash rpl.rpl_row_sp001 : MDEV-9329 - Fails on Ubuntu/s390x rpl.rpl_row_until : MDEV-14052 - Master will not send events with checksum -rpl.rpl_semi_sync : MDEV-11220 - Wrong result -rpl.rpl_semi_sync_after_sync : MDEV-14366 - Wrong result -rpl.rpl_semi_sync_after_sync_row : MDEV-21031 - Wrong result; MDEV-14366 - Wrong result rpl.rpl_semi_sync_event_after_sync : MDEV-11806 - warnings -rpl.rpl_semi_sync_uninstall_plugin : MDEV-7140 - Assorted failures +rpl.rpl_semi_sync_uninstall_plugin : MDEV-24561 - Wrong usage of mutex; MDEV-7140 - Assorted failures rpl.rpl_semi_sync_wait_point : MDEV-11807 - timeout in wait condition -rpl.rpl_semisync_ali_issues : MDEV-16272 - Wrong result rpl.rpl_show_slave_hosts : MDEV-10681 - Crash rpl.rpl_skip_replication : MDEV-23372 - Extra warning -rpl.rpl_slave_grp_exec : MDEV-10514 - Deadlock; modified in 10.3.26 rpl.rpl_slave_load_tmpdir_not_exist : MDEV-23372 - Extra warning rpl.rpl_slow_query_log : MDEV-13250 - Test abort rpl.rpl_sp_effects : MDEV-13249 - Crash -rpl.rpl_start_stop_slave : MDEV-13567 - Sync slave timeout; modified in 10.3.26 +rpl.rpl_start_stop_slave : MDEV-13567 - Sync slave timeout rpl.rpl_stm_relay_ign_space : MDEV-14360 - Test assertion rpl.rpl_stm_stop_middle_group : MDEV-13791 - Server crash rpl.rpl_sync : MDEV-10633 - Database page corruption @@ -694,7 +630,6 @@ rpl.rpl_truncate_3innodb : MDEV-19454 - Syntax error rpl.rpl_user_variables : MDEV-20522 - Wrong result rpl.rpl_variables : MDEV-20150 - Server crash rpl.sec_behind_master-5114 : MDEV-13878 - Wrong result -rpl.show_status_stop_slave_race-7126 : Modified in 10.3.26 #----------------------------------------------------------------------- @@ -728,17 +663,12 @@ spider/bg.vp_fixes : MDEV-9329 - Fails on Ubuntu/s390x #----------------------------------------------------------------------- -spider/bugfix.mdev_20100 : Added in 10.3.26 - -#----------------------------------------------------------------------- - spider/handler.* : MDEV-10987, MDEV-10990 - Tests have not been maintained #----------------------------------------------------------------------- sql_sequence.concurrent_create : MDEV-16635 - Server crash sql_sequence.kill : MDEV-23393 - Server crash -sql_sequence.next : Modified in 10.3.26 #----------------------------------------------------------------------- @@ -755,14 +685,9 @@ sys_vars.innodb_buffer_pool_dump_at_shutdown_basic : MDEV-14280 - Unexpected err sys_vars.innodb_checksum_algorithm_basic : MDEV-21568 - Errno: 2000 sys_vars.keep_files_on_create_basic : MDEV-10676 - timeout sys_vars.log_slow_admin_statements_func : MDEV-12235 - Server crash -sys_vars.replicate_do_db_basic : Modified in 10.3.26 -sys_vars.rpl_init_slave_func : Modified in 10.3.26 -sys_vars.session_track_system_variables_basic : Modified in 10.3.26 sys_vars.slow_query_log_func : MDEV-14273 - Wrong result sys_vars.thread_cache_size_func : MDEV-11775 - Wrong result sys_vars.wait_timeout_func : MDEV-12896 - Wrong result -sys_vars.wsrep_cluster_address_basic : Modified in 10.3.26 -sys_vars.wsrep_on_basic : Configuration deleted in 10.3.26 #----------------------------------------------------------------------- @@ -794,7 +719,7 @@ tokudb_alter_table.hcad_all_add2 : MDEV-15269 - Timeout #----------------------------------------------------------------------- -tokudb_backup.* : MDEV-11001 - Missing include file +tokudb_backup.* : MDEV-11001 - Missing include file (Won't fix) #----------------------------------------------------------------------- @@ -813,11 +738,11 @@ tokudb_parts.partition_alter4_tokudb : MDEV-12640 - Lost connection #----------------------------------------------------------------------- -tokudb_rpl.* : MDEV-11001 - Missing include file +tokudb_rpl.* : MDEV-11001 - Missing include file (Won't fix) #----------------------------------------------------------------------- -tokudb_sys_vars.* : MDEV-11001 - Missing include file +tokudb_sys_vars.* : MDEV-11001 - Missing include file (Won't fix) #----------------------------------------------------------------------- @@ -835,21 +760,17 @@ unit.mf_iocache : MDEV-20952 - ASAN stack-buffer-overflow vcol.not_supported : MDEV-10639 - Testcase timeout vcol.vcol_keys_innodb : MDEV-10639 - Testcase timeout -vcol.vcol_misc : MDEV-16651 - Wrong error message; modified in 10.3.26 +vcol.vcol_misc : MDEV-16651 - Wrong error message #----------------------------------------------------------------------- -versioning.create : Modified in 10.3.26 -versioning.select : Modified in 10.3.26 versioning.update : MDEV-22475 - Wrong result code -versioning.view : Modified in 10.3.26 #----------------------------------------------------------------------- wsrep.foreign_key : MDEV-14725 - WSREP has not yet prepared node wsrep.mdev_6832 : MDEV-14195 - Check testcase failed wsrep.pool_of_threads : MDEV-17345 - WSREP has not yet prepared node for application use -wsrep.variables : Modified in 10.3.26 #----------------------------------------------------------------------- From ef5adf520760536c7396bdfe884fc509ac065694 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Thu, 4 Feb 2021 16:06:54 +0100 Subject: [PATCH 143/150] MDEV-24274 ALTER TABLE with CHECK CONSTRAINTS gives "Out of Memory" error partially revert 76063c2a13. Item::clone() is not an all-purpose Item copying machine, it was specifically created for pushdown of predicates into derived tables and views and it does not copy everything. In particular, it does not copy Item_func_regex. Fix the bug differently by preserving the old constraint name. But keep setting automatic_name=true to have it regenerated for cases like ALTER TABLE ... ADD CONSTRAINT. --- mysql-test/main/check_constraint.result | 12 ++++++++ mysql-test/main/check_constraint.test | 8 ++++++ sql/sql_table.cc | 38 +++++++++---------------- sql/unireg.cc | 8 +----- 4 files changed, 34 insertions(+), 32 deletions(-) diff --git a/mysql-test/main/check_constraint.result b/mysql-test/main/check_constraint.result index 3511af84166..f851b99e5c1 100644 --- a/mysql-test/main/check_constraint.result +++ b/mysql-test/main/check_constraint.result @@ -235,3 +235,15 @@ a b insert t1 (b) values (1); ERROR 23000: CONSTRAINT `CONSTRAINT_1` failed for `test`.`t1` drop table t1; +# +# MDEV-24274 ALTER TABLE with CHECK CONSTRAINTS gives "Out of Memory" error +# +create table t1 (id varchar(2), constraint id check (id regexp '[a-z]')); +alter table t1 force; +show create table t1; +Table Create Table +t1 CREATE TABLE `t1` ( + `id` varchar(2) DEFAULT NULL, + CONSTRAINT `id` CHECK (`id` regexp '[a-z]') +) ENGINE=MyISAM DEFAULT CHARSET=latin1 +drop table t1; diff --git a/mysql-test/main/check_constraint.test b/mysql-test/main/check_constraint.test index 93538fd1666..1258a9e3be6 100644 --- a/mysql-test/main/check_constraint.test +++ b/mysql-test/main/check_constraint.test @@ -176,3 +176,11 @@ select * from t1 where a is null; --error ER_CONSTRAINT_FAILED insert t1 (b) values (1); drop table t1; + +--echo # +--echo # MDEV-24274 ALTER TABLE with CHECK CONSTRAINTS gives "Out of Memory" error +--echo # +create table t1 (id varchar(2), constraint id check (id regexp '[a-z]')); +alter table t1 force; +show create table t1; +drop table t1; diff --git a/sql/sql_table.cc b/sql/sql_table.cc index 4ddfcabf6c8..50fe0eb3a6f 100644 --- a/sql/sql_table.cc +++ b/sql/sql_table.cc @@ -76,9 +76,8 @@ static int copy_data_between_tables(THD *, TABLE *,TABLE *, static int mysql_prepare_create_table(THD *, HA_CREATE_INFO *, Alter_info *, uint *, handler *, KEY **, uint *, int); static uint blob_length_by_type(enum_field_types type); -static bool fix_constraints_names(THD *thd, List - *check_constraint_list, - const HA_CREATE_INFO *create_info); +static bool fix_constraints_names(THD *, List *, + const HA_CREATE_INFO *); /** @brief Helper function for explain_filename @@ -4347,8 +4346,6 @@ mysql_prepare_create_table(THD *thd, HA_CREATE_INFO *create_info, const Virtual_column_info *dup_check; while ((dup_check= dup_it++) && dup_check != check) { - if (!dup_check->name.length || dup_check->automatic_name) - continue; if (!lex_string_cmp(system_charset_info, &check->name, &dup_check->name)) { @@ -8682,37 +8679,28 @@ mysql_prepare_alter_table(THD *thd, TABLE *table, } } - // NB: `check` is TABLE resident, we must keep it intact. - if (keep) - { - check= check->clone(thd); - if (!check) - { - my_error(ER_OUT_OF_RESOURCES, MYF(0)); - goto err; - } - } - if (share->period.constr_name.streq(check->name.str)) { - if (drop_period) - { - keep= false; - } - else if(!keep) + if (!drop_period && !keep) { my_error(ER_PERIOD_CONSTRAINT_DROP, MYF(0), check->name.str, share->period.name.str); goto err; } - else + keep= keep && !drop_period; + + DBUG_ASSERT(create_info->period_info.constr == NULL || drop_period); + + if (keep) { - DBUG_ASSERT(create_info->period_info.constr == NULL); + Item *expr_copy= check->expr->get_copy(thd); + check= new Virtual_column_info(); + check->name= share->period.constr_name; + check->automatic_name= true; + check->expr= expr_copy; create_info->period_info.constr= check; - create_info->period_info.constr->automatic_name= true; } } - /* see if the constraint depends on *only* on dropped fields */ if (keep && dropped_fields) { diff --git a/sql/unireg.cc b/sql/unireg.cc index 17222efe791..8e432c54b15 100644 --- a/sql/unireg.cc +++ b/sql/unireg.cc @@ -57,13 +57,7 @@ static bool make_empty_rec(THD *, uchar *, uint, List &, uint, */ static uchar *extra2_write_len(uchar *pos, size_t len) { - /* TODO: should be - if (len > 0 && len <= 255) - *pos++= (uchar)len; - ... - because extra2_read_len() uses 0 for 2-byte lengths. - extra2_str_size() must be fixed too. - */ + DBUG_ASSERT(len); if (len <= 255) *pos++= (uchar)len; else From cbbcc8fa2b8aa4dc4b5a5de74beea6e614fc28f1 Mon Sep 17 00:00:00 2001 From: Elena Stepanova Date: Mon, 8 Feb 2021 23:36:06 +0200 Subject: [PATCH 144/150] List of unstable tests for 10.4.18 release Test code modifications and new failures from buildbot registered only for the main suite. The rest was updated partially, based on the status of existing JIRA items --- mysql-test/unstable-tests | 630 +++++++++++++++----------------------- 1 file changed, 252 insertions(+), 378 deletions(-) diff --git a/mysql-test/unstable-tests b/mysql-test/unstable-tests index 30171bfe084..df4a1e74447 100644 --- a/mysql-test/unstable-tests +++ b/mysql-test/unstable-tests @@ -23,190 +23,174 @@ # ############################################################################## # -# Based on bb-10.4-release 80c951ce2875aac521b82323b5b6ebf638593445 -# Sat Oct 31 21:06:49 2020 +0100 : Merge branch '10.3' into 10.4 +# Based on bb-10.4-release d348555cd (MDEV-23328 Server hang due to Galera lock conflict resolution) +# for main suite changes and failures, and +# bb-10.4-release 80c951ce2875aac521b82323b5b6ebf638593445 +# for the rest + -main.alter_table : Modified in 10.4.16 main.alter_table_trans : MDEV-12084 - timeout main.analyze_stmt_slow_query_log : MDEV-12237 - Wrong result -main.aria_icp_debug : Added in 10.4.16 main.auth_named_pipe : MDEV-14724 - System error 2 -main.backup_locks : Modified in 10.4.16 +main.auto_increment_ranges_innodb : Modified in 10.4.18 main.backup_stages : MDEV-23401 - Bad file descriptor main.binary_to_hex : MDEV-20211 - Wrong result -main.blackhole : Modified in 10.4.16 -main.bootstrap_innodb : Added in 10.4.16 +main.check_constraint : Modified in 10.4.18 main.connect : MDEV-17282 - Wrong result main.connect-abstract : MDEV-20162 - Could not execute 'check-testcase' main.connect2 : MDEV-13885 - Server crash -main.count_distinct2 : MDEV-11768 - timeout +main.create : Modified in 10.4.18 main.create_delayed : MDEV-10605 - failed with timeout main.create_drop_event : MDEV-16271 - Wrong result -main.ctype_binary : MDEV-24080 - Data too long for column; include file modified in 10.4.16 -main.ctype_cp1251 : Include file modified in 10.4.16 +main.cte_nonrecursive : Modified in 10.4.18 +main.cte_nonrecursive_not_embedded : Added in 10.4.18 +main.cte_recursive : Modified in 10.4.18 +main.ctype_binary : MDEV-24080 - Data too long for column main.ctype_cp932_binlog_stm : MDEV-20534 - Wrong result -main.ctype_filename : Modified in 10.4.16 -main.ctype_latin1 : Include file modified in 10.4.16 -main.ctype_ucs : MDEV-17681 - Data too long for column; include file modified in 10.4.16 +main.ctype_ucs : MDEV-17681 - Data too long for column main.ctype_upgrade : MDEV-16945 - Error upon mysql_upgrade main.ctype_utf16 : MDEV-10675: timeout or extra warnings main.ctype_utf16le : MDEV-10675: timeout or extra warnings -main.ctype_utf8 : Modified in 10.4.16 -main.ctype_utf8mb4_innodb : MDEV-17744 - Timeout; MDEV-18567 - ASAN use-after-poison +main.ctype_utf8mb4 : Modified in 10.4.18 +main.ctype_utf8mb4_heap : Include file modified in 10.4.18 +main.ctype_utf8mb4_innodb : MDEV-17744 - Timeout; MDEV-18567 - ASAN use-after-poison; include file modified in 10.4.18 +main.ctype_utf8mb4_myisam : Include file modified in 10.4.18 main.debug_sync : MDEV-10607 - internal error main.delayed : MDEV-20961 - Assertion failure -main.derived_cond_pushdown : MDEV-20532 - Floating point differences -main.derived_opt : MDEV-11768 - timeout +main.derived_cond_pushdown : MDEV-20532 - Floating point differences; modified in 10.4.18 main.dirty_close : MDEV-19368 - mysqltest failed but provided no output main.distinct : MDEV-14194 - Crash main.drop_bad_db_type : MDEV-15676 - Wrong result main.dyncol : MDEV-19455 - Extra warning -main.empty_server_name-8224 : Modified in 10.4.16 -main.errors : Modified in 10.4.16 +main.empty_string_literal : Modified in 10.4.18 main.events_2 : MDEV-13277 - Crash main.events_bugs : MDEV-12892 - Crash main.events_restart : MDEV-12236 - Server shutdown problem main.events_slowlog : MDEV-12821 - Wrong result -main.fast_prefix_index_fetch_innodb : Modified in 10.4.16 main.flush : MDEV-19368 - mysqltest failed but provided no output main.flush_ssl : MDEV-21276 - Aria recovery failure -main.func_gconcat : MDEV-21379 - Valgrind warnings -main.func_json : Modified in 10.4.16 -main.func_math : MDEV-20966 - Wrong error code; modified in 10.4.16 -main.func_test : Modified in 10.4.16 +main.func_gconcat : MDEV-21379 - Valgrind warnings; modified in 10.4.18 +main.func_like : Modified in 10.4.18 +main.func_math : MDEV-20966 - Wrong error code main.gis : MDEV-13411 - wrong result on P8 +main.gis-json : Modified in 10.4.18 main.gis_notembedded : MDEV-21264 - Wrong result with non-default charset -main.grant : Modified in 10.4.16 -main.grant5 : Modified in 10.4.16 +main.group_by : Modified in 10.4.18 main.host_cache_size_functionality : MDEV-10606 - sporadic failure on shutdown -main.implicit_commit : Modified in 10.4.16 main.index_intersect_innodb : MDEV-10643 - failed with timeout main.index_merge_innodb : MDEV-7142 - Plan mismatch -main.information_schema : Modified in 10.4.16 -main.innodb_ext_key : Modified in 10.4.16 +main.information_schema : Modified in 10.4.18 main.innodb_icp : MDEV-20168 - Wrong execution plans -main.innodb_icp_debug : Added in 10.4.16 -main.invisible_field : Modified in 10.4.16 +main.innodb_mrr_cpk : MDEV-24737 - Server crash main.invisible_field_grant_completely : MDEV-22254 - Syscall param write points to uninitialised bytes main.ipv4_and_ipv6 : MDEV-20964 - Wrong result main.ipv6 : MDEV-20964 - Wrong result main.join_cache : MDEV-17743 - Bad address from storage engine MyISAM -main.kill : Modified in 10.4.16 +main.kill : MDEV-24801 - Wrong errno on reap; modified in 10.4.18 main.kill-2 : MDEV-13257 - Wrong result main.kill_processlist-6619 : MDEV-10793 - Wrong result -main.limit_rows_examined : Modified in 10.4.16 main.loaddata : MDEV-19368 - mysqltest failed but provided no output main.locale : MDEV-20521 - Missing warning -main.lock_view : Added in 10.4.16 +main.lock_tables_lost_commit : MDEV-24624 - Timeout +main.lock_user : Modified in 10.4.18 +main.lock_view : Modified in 10.4.18 main.log_slow : MDEV-13263 - Wrong result -main.log_tables : Modified in 10.4.16 main.log_tables-big : MDEV-13408 - wrong result main.log_tables_upgrade : MDEV-20962 - Wrong result main.mdev-504 : MDEV-15171 - warning main.mdev375 : MDEV-10607 - sporadic "can't connect" main.merge : MDEV-10607 - sporadic "can't connect" -main.multi_update_big : Modified in 10.4.16 -main.myisam_icp_debug : Added in 10.4.16 -main.myisam_repair : Added in 10.4.16 +main.myisam : Modified in 10.4.18 main.mysql : MDEV-20156 - Wrong result main.mysql_client_test : MDEV-19369 - error: 5888, status: 23, errno: 2; MDEV-19511 - Big endian issue main.mysql_client_test_comp : MDEV-16641 - Error in exec main.mysql_client_test_nonblock : CONC-208 - Error on Power; MDEV-15096 - exec failed main.mysql_cp932 : MDEV-21275 - Wrong result -main.mysql_upgrade : MDEV-20161 - Wrong result; MDEV-20166 - FATAL ERROR: Upgrade failed; modified in 10.4.16 +main.mysql_upgrade : MDEV-20161 - Wrong result; MDEV-20166 - FATAL ERROR: Upgrade failed; modified in 10.4.18 main.mysql_upgrade-6984 : MDEV-22514 - Wrong result main.mysql_upgrade_no_innodb : MDEV-20537 - Wrong result main.mysql_upgrade_noengine : MDEV-14355 - Wrong result main.mysql_upgrade_view : MDEV-20161 - Wrong result; MDEV-23392 - Wrong result main.mysqladmin : MDEV-20535 - Wrong result -main.mysqlbinlog_row_minimal : Modified in 10.4.16 main.mysqlcheck : MDEV-20164 - Wrong result -main.mysqld--help : Modified in 10.4.16 main.mysqld_option_err : MDEV-21236 - Wrong error; MDEV-21571 - Crash on bootstrap -main.mysqldump : MDEV-14800 - Stack smashing detected; modified in 10.4.16 +main.mysqldump : Modified in 10.4.18 main.mysqldump-max : MDEV-21272 - Wrong result -main.mysqlhotcopy_myisam : MDEV-10995 - Hang on debug +main.mysqldump-system : Modified in 10.4.18 main.mysqlshow : MDEV-20965 - Wrong result main.mysqlslap : MDEV-11801 - timeout main.mysqltest : MDEV-13887 - Wrong result -main.named_pipe : Modified in 10.4.16 main.old-mode : MDEV-19373 - Wrong result main.openssl_6975 : MDEV-17184 - Failures with OpenSSL 1.1.1 -main.order_by : Modified in 10.4.16 +main.order_by : Modified in 10.4.18 main.order_by_optimizer_innodb : MDEV-10683 - Wrong result -main.parser : Modified in 10.4.16 -main.partition : Modified in 10.4.16 +main.parser : Modified in 10.4.18 main.partition_debug_sync : MDEV-15669 - Deadlock found when trying to get lock main.partition_innodb : MDEV-20169 - Wrong result; MDEV-23427 - Server crash main.partition_innodb_plugin : MDEV-12901 - Valgrind warnings main.partition_innodb_semi_consistent : MDEV-19411 - Failed to start mysqld.1 main.plugin_auth : MDEV-20957 - Upgrade file was not properly created main.plugin_auth_qa_2 : MDEV-20165 - Wrong result -main.plugin_innodb : Modified in 10.4.16 -main.pool_of_threads : MDEV-18135 - SSL error: key too small; modified in 10.4.16 -main.precedence : Added in 10.4.16 -main.precedence_bugs : Added in 10.4.16 -main.processlist_notembedded : Modified in 10.4.16 +main.pool_of_threads : MDEV-18135 - SSL error: key too small +main.precedence : Modified in 10.4.18 +main.processlist_notembedded : MDEV-23752 - Not explainable command; modified in 10.4.18 main.ps : MDEV-11017 - sporadic wrong Prepared_stmt_count main.ps_error : MDEV-24079 - Memory not freed -main.query_cache : MDEV-16180 - Wrong result +main.ps_show_log : Added in 10.4.18 +main.query_cache : MDEV-16180 - Wrong result; modified in 10.4.18 main.query_cache_debug : MDEV-15281 - Query cache is disabled -main.range : Modified in 10.4.16 +main.range : Modified in 10.4.18 main.range_innodb : MDEV-23371 - Server crash +main.range_notembedded : Added in 10.4.18 main.range_vs_index_merge_innodb : MDEV-15283 - Server has gone away -main.rowid_filter : Modified in 10.4.16 main.rowid_filter_innodb : MDEV-20538 - Wrong result -main.rowid_filter_innodb_debug : Added in 10.4.16 -main.rowid_filter_myisam_debug : Added in 10.4.16 main.select : MDEV-20532 - Floating point differences main.select_jcl6 : MDEV-20532 - Floating point differences main.select_pkeycache : MDEV-20532 - Floating point differences +main.set_password : Modified in 10.4.18 main.set_statement : MDEV-13183 - Wrong result -main.set_statement_notembedded : MDEV-19414 - Wrong result; modified in 10.4.16 +main.set_statement_notembedded : MDEV-19414 - Wrong result main.shm : MDEV-12727 - Mismatch, ERROR 2013 main.show_explain : MDEV-10674 - Wrong result code -main.sp : MDEV-7866 - Mismatch; modified in 10.4.16 -main.sp-destruct : Modified in 10.4.16 +main.skip_grants : Modified in 10.4.18 +main.sp : MDEV-7866 - Mismatch; modified in 10.4.18 main.sp-security : MDEV-10607 - sporadic "can't connect" +main.sp-ucs2 : Modified in 10.4.18 main.sp_notembedded : MDEV-10607 - internal error main.ssl : MDEV-17184 - Failures with OpenSSL 1.1.1 main.ssl_7937 : MDEV-20958 - Wrong result main.ssl_ca : MDEV-10895 - SSL connection error on Power main.ssl_cipher : MDEV-17184 - Failures with OpenSSL 1.1.1 main.ssl_timeout : MDEV-11244 - Crash +main.stat_tables : Modified in 10.4.18 main.stat_tables_par_innodb : MDEV-14155 - Wrong rounding main.status : MDEV-13255 - Wrong result main.subselect : MDEV-20551 - Valgrind failure -main.subselect4 : Modified in 10.4.16 -main.subselect_innodb : MDEV-10614 - Wrong result; modified in 10.4.16 -main.sum_distinct-big : Modified in 10.4.16 +main.subselect4 : Modified in 10.4.18 +main.subselect_innodb : MDEV-10614 - Wrong result +main.table_value_constr : Modified in 10.4.18 main.tc_heuristic_recover : MDEV-14189 - Wrong result -main.temp_table_symlink : MDEV-24058 - Wrong error code; added in 10.4.16 -main.type_blob : MDEV-15195 - Wrong result; modified in 10.4.16 -main.type_date : Modified in 10.4.16 -main.type_datetime : Modified in 10.4.16 +main.temp_table_symlink : MDEV-24058 - Wrong error code +main.type_blob : MDEV-15195 - Wrong result main.type_datetime_hires : MDEV-10687 - Timeout -main.type_float : MDEV-20532 - Floating point differences; modified in 10.4.16 -main.type_newdecimal : MDEV-20532 - Floating point differences; modified in 10.4.16 +main.type_float : MDEV-20532 - Floating point differences +main.type_newdecimal : MDEV-20532 - Floating point differences main.type_ranges : MDEV-20532 - Floating point differences main.type_temporal_innodb : MDEV-24025 - Wrong result -main.type_time : Modified in 10.4.16 -main.udf : Modified in 10.4.16 -main.upgrade_MDEV-19650 : Re-enabled in 10.4.16 +main.type_year : Modified in 10.4.18 +main.union : Modified in 10.4.18 +main.user_limits : Modified in 10.4.18 main.userstat : MDEV-12904 - SSL errors -main.view : Modified in 10.4.16 +main.view : Modified in 10.4.18 main.wait_timeout : MDEV-19023 - Lost connection to MySQL server during query -main.win : Modified in 10.4.16 -main.windows_debug : Added in 10.4.16 -main.xa : MDEV-11769 - lock wait timeout +main.xa : MDEV-11769 - lock wait timeout; modified in 10.4.18 #----------------------------------------------------------------------- -archive.archive-big : MDEV-20167 - Wrong error code -archive.archive_bitfield : MDEV-11771 - table is marked as crashed -archive.archive_symlink : MDEV-12170 - unexpected error on rmdir -archive.discover : MDEV-10510 - Table is marked as crashed -archive.mysqlhotcopy_archive : MDEV-10995 - Hang on debug +archive.archive-big : MDEV-20167 - Wrong error code +archive.archive_bitfield : MDEV-11771 - table is marked as crashed +archive.archive_symlink : MDEV-12170 - unexpected error on rmdir +archive.discover : MDEV-10510 - Table is marked as crashed #----------------------------------------------------------------------- @@ -214,56 +198,45 @@ archive-test_sql_discovery.discover : MDEV-16817 - Table marked as crashed #----------------------------------------------------------------------- -binlog.binlog_commit_wait : MDEV-10150 - Mismatch -binlog.binlog_innodb : MDEV-22516 - Wrong result -binlog.binlog_ioerr : MDEV-20159 - Assertion failure -binlog.binlog_killed : MDEV-12925 - Wrong result -binlog.binlog_max_extension : MDEV-19762 - Crash on shutdown -binlog.binlog_mysqlbinlog_row : Modified in 10.4.16 -binlog.binlog_mysqlbinlog_row_frag : Modified in 10.4.16 -binlog.binlog_mysqlbinlog_row_innodb : MDEV-20530 - Binary files differ -binlog.binlog_mysqlbinlog_row_myisam : MDEV-20530 - Binary files differ -binlog.binlog_no_uniqfile_crash : MDEV-24078 - Server crash upon shutdown -binlog.binlog_recover_checksum_error : Added in 10.4.16 -binlog.binlog_row_binlog : MDEV-23402 - Wrong result -binlog.binlog_show_binlog_event_random_pos : Modified in 10.4.16 -binlog.binlog_stm_binlog : MDEV-20412 - Wrong result -binlog.binlog_stm_mix_innodb_myisam : MDEV-24057 - Wrong result -binlog.binlog_xa_recover : MDEV-8517 - Extra checkpoint -binlog.flashback-largebinlog : MDEV-19764 - Out of memory -binlog.load_data_stm_view : MDEV-16948 - Wrong result -binlog.show_concurrent_rotate : MDEV-20215 - Wrong result +binlog.binlog_commit_wait : MDEV-10150 - Mismatch +binlog.binlog_innodb : MDEV-22516 - Wrong result +binlog.binlog_killed : MDEV-12925 - Wrong result +binlog.binlog_max_extension : MDEV-19762 - Crash on shutdown +binlog.binlog_mysqlbinlog_row_innodb : MDEV-20530 - Binary files differ +binlog.binlog_mysqlbinlog_row_myisam : MDEV-20530 - Binary files differ +binlog.binlog_no_uniqfile_crash : MDEV-24078 - Server crash upon shutdown +binlog.binlog_row_binlog : MDEV-23402 - Wrong result +binlog.binlog_stm_binlog : MDEV-20412 - Wrong result +binlog.binlog_stm_mix_innodb_myisam : MDEV-24057 - Wrong result +binlog.binlog_xa_recover : MDEV-12908 - Extra checkpoint +binlog.flashback-largebinlog : MDEV-19764 - Out of memory +binlog.load_data_stm_view : MDEV-16948 - Wrong result +binlog.show_concurrent_rotate : MDEV-20215 - Wrong result #----------------------------------------------------------------------- -binlog_encryption.binlog_xa_recover : MDEV-12908 - Extra checkpoint -binlog_encryption.encrypted_master : MDEV-23637 - Assertion failure; MDEV-14201 - Extra warnings -binlog_encryption.encrypted_master_switch_to_unencrypted : MDEV-14190 - Can't init tc log -binlog_encryption.encrypted_slave : MDEV-18135 - SSL error: key too small -binlog_encryption.encryption_combo : MDEV-14199 - Table is marked as crashed -binlog_encryption.multisource : MDEV-21289 - Wrong error code -binlog_encryption.rpl_binlog_errors : MDEV-12742 - Crash -binlog_encryption.rpl_checksum : MDEV-16951 - Wrong result -binlog_encryption.rpl_corruption : MDEV-20159 - Assertion failure; MDEV-20953 - Wrong error code -binlog_encryption.rpl_gtid_basic : MDEV-16947 - Server failed to start -binlog_encryption.rpl_incident : MDEV-21569 - mutex: LOCK_global_system_variables unlocking -binlog_encryption.rpl_loadfile : MDEV-16645 - Timeout in include -binlog_encryption.rpl_mixed_binlog_max_cache_size : MDEV-20956 - Incorrect checksum for freed object -binlog_encryption.rpl_parallel : MDEV-10653 - Timeout in include -binlog_encryption.rpl_parallel_ignored_errors : MDEV-22471 - Slave crash -binlog_encryption.rpl_relayrotate : MDEV-15194 - Timeout -binlog_encryption.rpl_semi_sync : MDEV-11673 - Valgrind -binlog_encryption.rpl_skip_replication : MDEV-13571 - Unexpected warning; MDEV-20573 - Wrong result -binlog_encryption.rpl_ssl : MDEV-14507 - Timeouts -binlog_encryption.rpl_stm_relay_ign_space : MDEV-19375 - Test assertion failed -binlog_encryption.rpl_temporal_format_default_to_default : MDEV-21273 - Timeout -binlog_encryption.rpl_temporal_format_mariadb53_to_mysql56 : MDEV-20159 - Assertion failure -binlog_encryption.rpl_typeconv : MDEV-14362 - Lost connection to MySQL server during query - -#----------------------------------------------------------------------- - -compat/oracle.parser : Modified in 10.4.16 -compat/oracle.sp-package : Modified in 10.4.16 +binlog_encryption.binlog_xa_recover : MDEV-12908 - Extra checkpoint +binlog_encryption.encrypted_master : MDEV-23637 - Assertion failure; MDEV-14201 - Extra warnings +binlog_encryption.encrypted_master_switch_to_unencrypted : MDEV-14190 - Can't init tc log +binlog_encryption.encrypted_slave : MDEV-18135 - SSL error: key too small +binlog_encryption.encryption_combo : MDEV-14199 - Table is marked as crashed +binlog_encryption.multisource : MDEV-21289 - Wrong error code +binlog_encryption.rpl_binlog_errors : MDEV-12742 - Crash +binlog_encryption.rpl_checksum : MDEV-16951 - Wrong result +binlog_encryption.rpl_corruption : MDEV-20953 - Wrong error code +binlog_encryption.rpl_gtid_basic : MDEV-16947 - Server failed to start +binlog_encryption.rpl_incident : MDEV-21569 - mutex: LOCK_global_system_variables unlocking +binlog_encryption.rpl_loadfile : MDEV-16645 - Timeout in include +binlog_encryption.rpl_mixed_binlog_max_cache_size : MDEV-20956 - Incorrect checksum for freed object +binlog_encryption.rpl_parallel : MDEV-10653 - Timeout in include +binlog_encryption.rpl_parallel_ignored_errors : MDEV-22471 - Slave crash +binlog_encryption.rpl_relayrotate : MDEV-15194 - Timeout +binlog_encryption.rpl_semi_sync : MDEV-11673 - Valgrind +binlog_encryption.rpl_skip_replication : MDEV-13571 - Unexpected warning; MDEV-20573 - Wrong result +binlog_encryption.rpl_ssl : MDEV-14507 - Timeouts +binlog_encryption.rpl_stm_relay_ign_space : MDEV-19375 - Test assertion failed +binlog_encryption.rpl_temporal_format_default_to_default : MDEV-21273 - Timeout +binlog_encryption.rpl_typeconv : MDEV-14362 - Lost connection to MySQL server during query #----------------------------------------------------------------------- @@ -274,7 +247,6 @@ connect.part_file : MDEV-18135 - SSL error: key too small connect.part_table : MDEV-18135 - SSL error: key too small connect.pivot : MDEV-14803 - Failed to discover table connect.secure_file_priv : MDEV-18135 - SSL error: key too small -connect.updelx : Modified in 10.4.16 connect.vcol : MDEV-12374 - Fails on Windows connect.zip : MDEV-13884 - Wrong result @@ -284,9 +256,7 @@ disks.disks_notembedded : MDEV-21587 - Wrong result #----------------------------------------------------------------------- -encryption.corrupted_during_recovery : MDEV-20159 - Assertion failure -encryption.create_or_replace : MDEV-24081 - Lock wait timeout exceeded; modified in 10.4.16 -encryption.create_or_replace_big : Added in 10.4.16 +encryption.create_or_replace : MDEV-24081 - Lock wait timeout exceeded encryption.debug_key_management : MDEV-13841 - Timeout encryption.encrypt_and_grep : MDEV-13765 - Wrong result encryption.innochecksum : MDEV-13644 - Assertion failure @@ -298,11 +268,8 @@ encryption.innodb-first-page-read : MDEV-14356 - Timeout in wait encryption.innodb-force-corrupt : MDEV-17286 - SSL error encryption.innodb-missing-key : MDEV-14728 - SSL error encryption.innodb-page_encryption : MDEV-10641 - mutex problem -encryption.innodb-page_encryption_compression : Modified in 10.4.16 -encryption.innodb-page_encryption_log_encryption : MDEV-17339 - Crash on restart; modified in 10.4.16 +encryption.innodb-page_encryption_log_encryption : MDEV-17339 - Crash on restart encryption.innodb-read-only : MDEV-16563 - Crash on startup -encryption.innodb-redo-badkey : MDEV-12898 - Server hang on startup -encryption.innodb-redo-nokeys : MDEV-20159 - Assertion failure encryption.innodb-remove-encryption : MDEV-16493 - Timeout in wait condition encryption.innodb-spatial-index : MDEV-13746 - Wrong result encryption.innodb_encrypt_key_rotation_age : MDEV-19763 - Timeout @@ -313,7 +280,6 @@ encryption.innodb_encryption : MDEV-14728 - Unable to get ce encryption.innodb_encryption-page-compression : MDEV-12630 - crash or assertion failure encryption.innodb_encryption_discard_import : MDEV-16116 - Wrong result encryption.innodb_encryption_filekeys : MDEV-15673 - Timeout -encryption.innodb_encryption_is : MDEV-12898 - Server hang on startup encryption.innodb_encryption_row_compressed : MDEV-16113 - Crash encryption.innodb_encryption_tables : MDEV-17339 - Crash on restart encryption.innodb_first_page : MDEV-10689 - Crash @@ -321,7 +287,6 @@ encryption.innodb_onlinealter_encryption : MDEV-17287 - SIGABRT on serve encryption.innodb_scrub : MDEV-8139 - scrubbing tests need fixing (fixed in 10.5+) encryption.innodb_scrub_background : MDEV-8139 - scrubbing tests need fixing (fixed in 10.5+) encryption.innodb_scrub_compressed : MDEV-8139 - scrubbing tests need fixing (fixed in 10.5+) -encryption.tempfiles_encrypted : Modified in 10.4.16 #----------------------------------------------------------------------- @@ -338,13 +303,12 @@ engines/rr_trx.* : MDEV-10998 - Not maintained #----------------------------------------------------------------------- -federated.federated_bug_35333 : MDEV-13410 - Wrong result -federated.federated_bug_585688 : MDEV-14805 - Server crash, MDEV-12907 - Valgrind -federated.federated_innodb : MDEV-10617 - Wrong checksum -federated.federated_partition : MDEV-10417 - Fails on Mips -federated.federated_transactions : MDEV-10617 - Wrong checksum -federated.federatedx : MDEV-10617 - Wrong checksum -federated.federatedx_create_handlers : Modified in 10.4.16 +federated.federated_bug_35333 : MDEV-13410 - Wrong result +federated.federated_bug_585688 : MDEV-14805 - Server crash, MDEV-12907 - Valgrind +federated.federated_innodb : MDEV-10617 - Wrong checksum +federated.federated_partition : MDEV-10417 - Fails on Mips +federated.federated_transactions : MDEV-10617 - Wrong checksum +federated.federatedx : MDEV-10617 - Wrong checksum #----------------------------------------------------------------------- @@ -371,18 +335,12 @@ galera_3nodes.* : Suite is not stable yet #----------------------------------------------------------------------- -gcol.gcol_keys_innodb : Include file modified in 10.4.16 -gcol.gcol_keys_myisam : Include file modified in 10.4.16 -gcol.gcol_partition_innodb : Include file modified in 10.4.16 -gcol.gcol_update : Include file modified in 10.4.16 -gcol.innodb_virtual_basic : MDEV-16950 - Failing assertion -gcol.innodb_virtual_debug : MDEV-23404 - Server crash -gcol.innodb_virtual_debug_purge : Include file modified in 10.4.16 -gcol.innodb_virtual_fk : MDEV-20640 - Assertion failure; modified in 10.4.16 -gcol.innodb_virtual_fk_restart : MDEV-17466 - Assertion failure -gcol.innodb_virtual_index : Modified in 10.4.16 -gcol.innodb_virtual_purge : MDEV-22952 - Lock wait timeout; include file modified in 10.4.16 -gcol.main_alter_table : MDEV-23403 - Wrong result +gcol.innodb_virtual_basic : MDEV-16950 - Failing assertion +gcol.innodb_virtual_debug : MDEV-23111 - Server crash +gcol.innodb_virtual_fk : MDEV-20640 - Assertion failure +gcol.innodb_virtual_fk_restart : MDEV-17466 - Assertion failure +gcol.innodb_virtual_purge : MDEV-22952 - Lock wait timeout +gcol.main_alter_table : MDEV-23403 - Wrong result #----------------------------------------------------------------------- @@ -390,31 +348,21 @@ innodb.101_compatibility : MDEV-13891 - Wrong result innodb.alter_copy : MDEV-16181 - Assertion failure innodb.alter_crash : MDEV-16944 - The process cannot access the file innodb.alter_large_dml : MDEV-20148 - Debug sync point wait timed out -innodb.alter_table : Modified in 10.4.16 innodb.binlog_consistent : MDEV-10618 - Server fails to start innodb.blob-crash : MDEV-20481 - Crash during recovery -innodb.create-index : MDEV-20159 - Assertion failure -innodb.default_row_format_compatibility : MDEV-20159 - Assertion failure innodb.doublewrite : MDEV-12905 - Server crash -innodb.foreign-keys : Modified in 10.4.16 -innodb.foreign_key : Modified in 10.4.16 innodb.group_commit_crash : MDEV-14191 - InnoDB registration failed innodb.group_commit_crash_no_optimize_thread : MDEV-11770 - Checksum mismatch innodb.ibuf_not_empty : MDEV-19021 - Wrong result -innodb.innodb : Modified in 10.4.16 innodb.innodb-32k-crash : MDEV-20194 - Extra warnings -innodb.innodb-64k : Modified in 10.4.16 innodb.innodb-64k-crash : MDEV-13872 - Failure and crash on startup innodb.innodb-alter-debug : MDEV-13182 - InnoDB: adjusting FSP_SPACE_FLAGS innodb.innodb-alter-table : MDEV-10619 - Testcase timeout innodb.innodb-bigblob : MDEV-18655 - ASAN unknown crash innodb.innodb-blob : MDEV-12053 - Client crash innodb.innodb-change-buffer-recovery : MDEV-19115 - Lost connection to MySQL server during query -innodb.innodb-dict : MDEV-20159 - Assertion failure innodb.innodb-fk : MDEV-13832 - Assertion failure on shutdown innodb.innodb-get-fk : MDEV-13276 - Server crash -innodb.innodb-index : Include file modified in 10.4.16 -innodb.innodb-index-debug : Include file modified in 10.4.16 innodb.innodb-index-online : MDEV-14809 - Cannot save statistics innodb.innodb-page_compression_default : MDEV-13644 - Assertion failure innodb.innodb-page_compression_lzma : MDEV-14353 - Wrong result @@ -422,15 +370,14 @@ innodb.innodb-page_compression_snappy : MDEV-13644 - Assertion failure innodb.innodb-page_compression_tables : MDEV-13644 - Assertion failure innodb.innodb-page_compression_zip : MDEV-10641 - mutex problem innodb.innodb-table-online : MDEV-13894 - Wrong result -innodb.innodb-timeout : MDEV-20159 - Assertion failure +innodb.innodb-ucs2 : MDEV-24505 - Assertion failure innodb.innodb-wl5522 : MDEV-13644 - Assertion failure innodb.innodb-wl5522-1 : MDEV-22945 - Server crash innodb.innodb-wl5522-debug : MDEV-14200 - Wrong errno innodb.innodb_buffer_pool_dump_pct : MDEV-20139 - Timeout in wait_condition.inc -innodb.innodb_buffer_pool_resize : MDEV-16964 - Assertion failure +innodb.innodb_buffer_pool_resize : MDEV-23637 - Assertion failure innodb.innodb_buffer_pool_resize_debug : MDEV-22515 - Timeout in wait_condition -innodb.innodb_buffer_pool_resize_with_chunks : MDEV-16964 - Assertion failure -innodb.innodb_bug14147491 : MDEV-11808 - Index is corrupt +innodb.innodb_buffer_pool_resize_with_chunks : MDEV-23637 - Assertion failure innodb.innodb_bug30423 : MDEV-7311 - Wrong result innodb.innodb_bug47167 : MDEV-20524 - Table 'user' is marked as crashed and should be repaired innodb.innodb_bug48024 : MDEV-14352 - Assertion failure @@ -440,50 +387,36 @@ innodb.innodb_force_recovery_rollback : MDEV-22889 - Wrong result innodb.innodb_information_schema : MDEV-8851 - Wrong result innodb.innodb_max_recordsize_32k : MDEV-14801 - Operation failed innodb.innodb_max_recordsize_64k : MDEV-15203 - Wrong result -innodb.innodb_monitor : MDEV-10939 - Testcase timeout innodb.innodb_mysql : MDEV-19873 - Wrong result innodb.innodb_simulate_comp_failures_small : MDEV-20526 - ASAN use-after-poison innodb.innodb_stats : MDEV-10682 - wrong result -innodb.innodb_stats_drop_locked : Modified in 10.4.16 innodb.innodb_stats_persistent : MDEV-21567 - Wrong result in execution plan innodb.innodb_stats_persistent_debug : MDEV-14801 - Operation failed innodb.innodb_sys_semaphore_waits : MDEV-10331 - Semaphore wait -innodb.innodb_trx_weight : Configuration deleted in 10.4.16 innodb.innodb_zip_innochecksum2 : MDEV-13882 - Warning: difficult to find free blocks -innodb.instant_alter_bugs : Modified in 10.4.16 -innodb.instant_alter_crash : Modified in 10.4.16 innodb.instant_alter_extend : MDEV-20963 - Binary files differ -innodb.instant_alter_index_rename : Modified in 10.4.16 -innodb.instant_alter_purge : Modified in 10.4.16 innodb.log_corruption : MDEV-13251 - Wrong result innodb.log_data_file_size : MDEV-14204 - Server failed to start; MDEV-20648 - Assertion failure -innodb.log_file : MDEV-20159 - Assertion failure innodb.log_file_name : MDEV-14193 - Exception innodb.log_file_size : MDEV-15668 - Not found pattern innodb.monitor : MDEV-16179 - Wrong result -innodb.page_id_innochecksum : MDEV-20159 - Assertion failure innodb.purge_secondary : MDEV-15681 - Wrong result innodb.purge_thread_shutdown : MDEV-13792 - Wrong result innodb.read_only_recovery : MDEV-13886 - Server crash innodb.recovery_shutdown : MDEV-15671 - Checksum mismatch in datafile -innodb.row_format_redundant : MDEV-15192 - Trying to access missing tablespace; modified in 10.4.16 -innodb.stats_persistent : Added in 10.4.16 +innodb.row_format_redundant : MDEV-15192 - Trying to access missing tablespace innodb.table_definition_cache_debug : MDEV-14206 - Extra warning -innodb.table_flags : MDEV-13572 - Wrong result; MDEV-19374 - Server failed to start; modified in 10.4.16 +innodb.table_flags : MDEV-13572 - Wrong result; MDEV-19374 - Server failed to start innodb.temp_table_savepoint : MDEV-24077 - Assertion failure innodb.temporary_table : MDEV-13265 - Wrong result -innodb.truncate : Modified in 10.4.16 innodb.undo_truncate : MDEV-17340 - Server hung; MDEV-20840 - Sporadic timeout innodb.undo_truncate_recover : MDEV-17679 - Server has gone away; MDEV-19200 - Shutdown fails -innodb.update-cascade : Combinations added in 10.4.16 innodb.update_time : MDEV-14804 - Wrong result innodb.xa_recovery : MDEV-15279 - mysqld got exception #----------------------------------------------------------------------- -innodb_fts.basic : Modified in 10.4.16 innodb_fts.fulltext2 : MDEV-24074 - Server crash -innodb_fts.innodb_fts_misc_1 : Modified in 10.4.16 innodb_fts.innodb_fts_misc_debug : MDEV-14156 - Unexpected warning innodb_fts.innodb_fts_plugin : MDEV-13888 - Errors in server log innodb_fts.innodb_fts_stopword_charset : MDEV-13259 - Table crashed @@ -493,20 +426,18 @@ innodb_fts.sync_ddl : MDEV-21568 - Errno: 2000; MDEV-18654 - innodb_gis.alter_spatial_index : MDEV-13745 - Server crash innodb_gis.gis_split_nan : MDEV-21678 - Cannot get geometry object -innodb_gis.rtree_add_index : Include file modified in 10.4.16 -innodb_gis.rtree_compress : Include file modified in 10.4.16 innodb_gis.rtree_compress2 : MDEV-16269 - Wrong result innodb_gis.rtree_concurrent_srch : MDEV-15284 - Wrong result with embedded -innodb_gis.rtree_purge : MDEV-15275 - Timeout; include file modified in 10.4.16 +innodb_gis.rtree_purge : MDEV-15275 - Timeout innodb_gis.rtree_recovery : MDEV-15274 - Error on check innodb_gis.rtree_split : MDEV-14208 - Too many arguments -innodb_gis.rtree_undo : MDEV-14456 - Timeout in include file; include file modified in 10.4.16 +innodb_gis.rtree_undo : MDEV-14456 - Timeout in include file innodb_gis.types : MDEV-15679 - Table is marked as crashed #----------------------------------------------------------------------- innodb_zip.cmp_per_index : MDEV-14490 - Table is marked as crashed -innodb_zip.create_options : MDEV-21329 - Assertion failure; MDEV-24076 - Assertion failure +innodb_zip.create_options : MDEV-24076 - Assertion failure innodb_zip.index_large_prefix_4k : MDEV-21679 - Row size too large innodb_zip.innochecksum : MDEV-14486 - Server failed to shut down innodb_zip.innochecksum_3 : MDEV-13279 - Extra warnings @@ -518,39 +449,32 @@ innodb_zip.wl6501_scale_1 : MDEV-13254 - Timeout, MDEV-14104 - Error 192 #----------------------------------------------------------------------- -maria.alter : Modified in 10.4.16 -maria.create : Modified in 10.4.16 maria.insert_select : MDEV-12757 - Timeout maria.insert_select-7314 : MDEV-16492 - Timeout maria.maria : MDEV-14430 - Extra warning -maria.maria-no-logging : MDEV-20196 - Crash on shutdown or server can't start; modified in 10.4.16 +maria.maria-no-logging : MDEV-20196 - Crash on shutdown or server can't start #----------------------------------------------------------------------- -mariabackup.absolute_ibdata_paths : MDEV-16571 - Wrong result -mariabackup.apply-log-only : MDEV-20135 - Timeout -mariabackup.backup_ssl : MDEV-24073 - Server crash upon shutdown -mariabackup.create_with_data_directory_during_backup : MDEV-20159 - Assertion failure -mariabackup.data_directory : MDEV-15270 - Error on exec -mariabackup.ddl_incremental_encrypted : Added in 10.4.16 -mariabackup.full_backup : MDEV-16571 - Wrong result -mariabackup.huge_lsn : MDEV-18569 - Table doesn't exist -mariabackup.incremental_backup : MDEV-21222 - Memory allocation failure -mariabackup.incremental_ddl_during_backup : Modified in 10.4.16 -mariabackup.incremental_encrypted : MDEV-15667 - timeout -mariabackup.incremental_rocksdb : MDEV-20954 - Cannot access the file -mariabackup.innodb_redo_overwrite : MDEV-24023 - Wrong result; added in 10.4.16 -mariabackup.log_checksum_mismatch : MDEV-16571 - Wrong result -mariabackup.mdev-14447 : MDEV-15201 - Timeout -mariabackup.mlog_index_load : Modified in 10.4.16 -mariabackup.partial_exclude : MDEV-15270 - Error on exec -mariabackup.rpl_slave_info : Added in 10.4.16 -mariabackup.unencrypted_page_compressed : MDEV-18653 - Wrong error; include file modified in 10.4.16 -mariabackup.xb_compressed_encrypted : MDEV-14812 - Segmentation fault -mariabackup.xb_file_key_management : MDEV-16571 - Wrong result -mariabackup.xb_page_compress : MDEV-14810 - status: 1, errno: 11 -mariabackup.xb_partition : MDEV-17584 - Crash upon shutdown -mariabackup.xb_rocksdb : MDEV-17338 - Server hung on shutdown +mariabackup.absolute_ibdata_paths : MDEV-16571 - Wrong result +mariabackup.apply-log-only : MDEV-20135 - Timeout +mariabackup.backup_ssl : MDEV-24073 - Server crash upon shutdown +mariabackup.data_directory : MDEV-15270 - Error on exec +mariabackup.full_backup : MDEV-16571 - Wrong result +mariabackup.huge_lsn : MDEV-18569 - Table doesn't exist +mariabackup.incremental_backup : MDEV-21222 - Memory allocation failure +mariabackup.incremental_encrypted : MDEV-15667 - timeout +mariabackup.incremental_rocksdb : MDEV-20954 - Cannot access the file +mariabackup.innodb_redo_overwrite : MDEV-24023 - Wrong result +mariabackup.log_checksum_mismatch : MDEV-16571 - Wrong result +mariabackup.mdev-14447 : MDEV-15201 - Timeout +mariabackup.partial_exclude : MDEV-15270 - Error on exec +mariabackup.unencrypted_page_compressed : MDEV-18653 - Wrong error +mariabackup.xb_compressed_encrypted : MDEV-14812 - Segmentation fault +mariabackup.xb_file_key_management : MDEV-16571 - Wrong result +mariabackup.xb_page_compress : MDEV-14810 - status: 1, errno: 11 +mariabackup.xb_partition : MDEV-17584 - Crash upon shutdown +mariabackup.xb_rocksdb : MDEV-17338 - Server hung on shutdown #----------------------------------------------------------------------- @@ -575,8 +499,6 @@ multi_source.load_data : MDEV-21235 - Slave crash multi_source.mdev-8874 : MDEV-19415 - AddressSanitizer: heap-use-after-free multi_source.mdev-9544 : MDEV-19415 - AddressSanitizer: heap-use-after-free multi_source.multisource : MDEV-10417 - Fails on Mips -multi_source.reset_slave : MDEV-10690 - Wrong result -multi_source.simple : MDEV-4633 - Wrong result multi_source.status_vars : MDEV-4632 - failed while waiting for Slave_received_heartbeats #----------------------------------------------------------------------- @@ -596,7 +518,6 @@ parts.partition_debug_innodb : MDEV-10891 - Can't create UNIX socket; parts.partition_exch_qa_10 : MDEV-11765 - wrong result parts.partition_innodb_status_file : MDEV-12901 - Valgrind parts.partition_special_innodb : MDEV-16942 - Timeout -parts.reorganize : Added in 10.4.16 #----------------------------------------------------------------------- @@ -635,16 +556,10 @@ perfschema_stress.* : MDEV-10996 - Not maintained #----------------------------------------------------------------------- -period.delete : Modified in 10.4.16 -period.update : Modified in 10.4.16 -period.versioning : MDEV-20159 - Assertion failure - -#----------------------------------------------------------------------- - plugins.feedback_plugin_send : MDEV-7932, MDEV-11118 - Connection problems and such plugins.multiauth : MDEV-20163 - Plugin could not be loaded plugins.processlist : MDEV-16574 - Wrong result -plugins.server_audit : MDEV-14295 - Wrong result; modified in 10.4.16 +plugins.server_audit : MDEV-14295 - Wrong result plugins.thread_pool_server_audit : MDEV-14295 - Wrong result #----------------------------------------------------------------------- @@ -684,124 +599,105 @@ rocksdb_sys_vars.rocksdb_rate_limiter_bytes_per_sec_basic : MDEV-16639 - Crash #----------------------------------------------------------------------- -roles.acl_load_mutex-5170 : Modified in 10.4.16 roles.create_and_grant_role : MDEV-11772 - wrong result #----------------------------------------------------------------------- -rpl.circular_serverid0 : MDEV-19372 - ASAN heap-use-after-free -rpl.create_or_replace2 : MDEV-19412 - Lost connection to MySQL server -rpl.create_or_replace_mix : MDEV-20523 - Wrong result -rpl.create_or_replace_statement : MDEV-20523 - Wrong result -rpl.create_select : MDEV-14121 - Assertion failure -rpl.last_insert_id : MDEV-10625 - warnings in error log -rpl.rpl_auto_increment : MDEV-10417 - Fails on Mips -rpl.rpl_auto_increment_bug45679 : MDEV-10417 - Fails on Mips -rpl.rpl_auto_increment_update_failure : MDEV-10625 - warnings in error log -rpl.rpl_binlog_errors : MDEV-12742 - Crash -rpl.rpl_binlog_grant : MDEV-21274 - Lost connection at handshake -rpl.rpl_binlog_index : Modified in 10.4.16 -rpl.rpl_cant_read_event_incident : MDEV-20960 - Abort on shutdown -rpl.rpl_checksum_cache : MDEV-22510 - Server crash -rpl.rpl_circular_for_4_hosts : MDEV-20536 - Server crash -rpl.rpl_colSize : MDEV-16112 - Server crash -rpl.rpl_corruption : MDEV-20527 - Slave stopped with wrong error code -rpl.rpl_create_tmp_table_if_not_exists : MDEV-20159 - Assertion failure -rpl.rpl_ctype_latin1 : MDEV-14813 - Wrong result on Mac -rpl.rpl_ddl : MDEV-10417 - Fails on Mips -rpl.rpl_domain_id_filter_io_crash : MDEV-12729 - Timeout in include file, MDEV-13677 - Server crash -rpl.rpl_domain_id_filter_master_crash : MDEV-19043 - Table marked as crashed -rpl.rpl_domain_id_filter_restart : MDEV-10684 - Wrong result; MDEV-19043 - Table marked as crashed -rpl.rpl_drop_db_fail : MDEV-16898 - Slave fails to start -rpl.rpl_extra_col_master_innodb : MDEV-16570 - Extra warning -rpl.rpl_extra_col_master_myisam : MDEV-23372 - Extra warning -rpl.rpl_filter_tables_dynamic : Modified in 10.4.16 -rpl.rpl_filter_wild_tables_dynamic : Modified in 10.4.16 -rpl.rpl_flushlog_loop : MDEV-21570 - Server crash -rpl.rpl_get_lock : MDEV-19368 - mysqltest failed but provided no output -rpl.rpl_gtid_basic : MDEV-10681 - server startup problem -rpl.rpl_gtid_crash : MDEV-13643 - Lost connection; modified in 10.4.16 -rpl.rpl_gtid_delete_domain : MDEV-14463 - Timeout; MDEV-23103 - Could not delete gtid domain; modified in 10.4.16 -rpl.rpl_gtid_errorhandling : MDEV-13261 - Crash -rpl.rpl_gtid_mdev9033 : MDEV-10680 - warnings -rpl.rpl_gtid_reconnect : MDEV-14497 - Crash -rpl.rpl_gtid_startpos : MDEV-20141 - mysqltest failed but provided no output -rpl.rpl_gtid_stop_start : MDEV-10629 - Crash on shutdown, MDEV-12629 - Valgrind warnings -rpl.rpl_gtid_until : MDEV-10625 - warnings in error log -rpl.rpl_ignore_grant : MDEV-20159 - Assertion failure -rpl.rpl_ignore_table_update : MDEV-20159 - Assertion failure -rpl.rpl_innodb_bug30888 : MDEV-10417 - Fails on Mips -rpl.rpl_insert : MDEV-9329 - Fails on Ubuntu/s390x -rpl.rpl_insert_delayed : MDEV-9329 - Fails on Ubuntu/s390x -rpl.rpl_insert_id : MDEV-15197 - Wrong result -rpl.rpl_insert_id_pk : MDEV-16567 - Assertion failure -rpl.rpl_insert_ignore : MDEV-14365 - Lost connection to MySQL server during query -rpl.rpl_invoked_features : MDEV-10417 - Fails on Mips -rpl.rpl_ipv4_as_ipv6 : MDEV-20147 - Incorrect checksum for freed object -rpl.rpl_mariadb_slave_capability : MDEV-11018 - Extra lines in binlog -rpl.rpl_mdev12179 : MDEV-19043 - Table marked as crashed -rpl.rpl_mdev6020 : MDEV-23426 - Server crash, ASAN failures; MDEV-15272 - Server crash -rpl.rpl_mixed_mixing_engines : MDEV-21266 - Timeout -rpl.rpl_mysql_upgrade : Modified in 10.4.16 -rpl.rpl_non_direct_row_mixing_engines : MDEV-16561 - Timeout in master_pos_wait -rpl.rpl_old_master : MDEV-22956 - Failing assertion -rpl.rpl_parallel : MDEV-10653 - Timeouts -rpl.rpl_parallel2 : MDEV-17390 - Operation cannot be performed -rpl.rpl_parallel_conflicts : MDEV-15272 - Server crash -rpl.rpl_parallel_mdev6589 : MDEV-12979 - Assertion failure -rpl.rpl_parallel_multilevel : MDEV-20160 - Server crash -rpl.rpl_parallel_multilevel2 : MDEV-14723 - Timeout -rpl.rpl_parallel_optimistic : MDEV-15278 - Failed to sync with master -rpl.rpl_parallel_optimistic_nobinlog : MDEV-15278 - Failed to sync with master -rpl.rpl_parallel_optimistic_until : MDEV-23021 - Query didn't return a result set -rpl.rpl_parallel_retry : MDEV-11119 - Crash; MDEV-17109 - Timeout; modified in 10.4.16 -rpl.rpl_parallel_temptable : MDEV-10356 - Crash; MDEV-19076 - Wrong result -rpl.rpl_partition_innodb : MDEV-10417 - Fails on Mips -rpl.rpl_password_boundaries : MDEV-11534 - Slave IO warnings -rpl.rpl_read_only : MDEV-20159 - Assertion failure -rpl.rpl_rewrt_db : MDEV-24060 - Server did not start -rpl.rpl_row_001 : MDEV-16653 - MTR's internal check fails -rpl.rpl_row_basic_11bugs : MDEV-12171 - Server failed to start -rpl.rpl_row_basic_2myisam : MDEV-13875 - command "diff_files" failed -rpl.rpl_row_corruption : MDEV-21569 - mutex: LOCK_global_system_variables unlocking -rpl.rpl_row_drop_create_temp_table : MDEV-14487 - Wrong result -rpl.rpl_row_end_of_statement_loss : MDEV-21237 - Server crash -rpl.rpl_row_img_blobs : MDEV-13875 - command "diff_files" failed -rpl.rpl_row_img_eng_min : MDEV-13875 - diff_files failed -rpl.rpl_row_img_eng_noblob : MDEV-13875 - command "diff_files" failed -rpl.rpl_row_index_choice : MDEV-15196 - Slave crash -rpl.rpl_row_sp001 : MDEV-9329 - Fails on Ubuntu/s390x -rpl.rpl_row_until : MDEV-14052 - Master will not send events with checksum -rpl.rpl_semi_sync : MDEV-11220 - Wrong result -rpl.rpl_semi_sync_after_sync : MDEV-14366 - Wrong result -rpl.rpl_semi_sync_after_sync_row : MDEV-14366 - Wrong result -rpl.rpl_semi_sync_event_after_sync : MDEV-11806 - warnings -rpl.rpl_semi_sync_skip_repl : MDEV-23371 - Server crash -rpl.rpl_semi_sync_uninstall_plugin : MDEV-7140 - Assorted failures -rpl.rpl_semi_sync_wait_no_slave : MDEV-20159 - Assertion failure -rpl.rpl_semi_sync_wait_point : MDEV-11807 - timeout in wait condition -rpl.rpl_semisync_ali_issues : MDEV-16272 - Wrong result -rpl.rpl_show_slave_hosts : MDEV-10681 - Crash -rpl.rpl_shutdown_wait_slaves : MDEV-22517 - Timeout on sync_with_master -rpl.rpl_skip_replication : MDEV-23372 - Extra warning -rpl.rpl_slave_grp_exec : MDEV-10514 - Deadlock; re-enabled in 10.4.16; modified in 10.4.16 -rpl.rpl_slave_load_in : MDEV-20159 - Assertion failure -rpl.rpl_slave_load_tmpdir_not_exist : MDEV-23372 - Extra warning -rpl.rpl_slow_query_log : MDEV-13250 - Test abort -rpl.rpl_sp_effects : MDEV-13249 - Crash -rpl.rpl_start_stop_slave : MDEV-13567 - Sync slave timeout; modified in 10.4.16 -rpl.rpl_stm_relay_ign_space : MDEV-14360 - Test assertion -rpl.rpl_stm_stop_middle_group : MDEV-13791 - Server crash -rpl.rpl_sync : MDEV-10633 - Database page corruption -rpl.rpl_temporary_error2 : MDEV-10634 - Wrong number of retries -rpl.rpl_test_framework : MDEV-19368 - mysqltest failed but provided no output -rpl.rpl_trigger : MDEV-18055 - Wrong result -rpl.rpl_truncate_3innodb : MDEV-19454 - Syntax error -rpl.rpl_upgrade_master_info : MDEV-16567 - Assertion failure -rpl.rpl_user_variables : MDEV-20522 - Wrong result -rpl.rpl_variables : MDEV-20150 - Server crash -rpl.sec_behind_master-5114 : MDEV-13878 - Wrong result -rpl.show_status_stop_slave_race-7126 : Modified in 10.4.16 +rpl.circular_serverid0 : MDEV-19372 - ASAN heap-use-after-free +rpl.create_or_replace2 : MDEV-19412 - Lost connection to MySQL server +rpl.create_or_replace_mix : MDEV-20523 - Wrong result +rpl.create_or_replace_statement : MDEV-20523 - Wrong result +rpl.create_select : MDEV-14121 - Assertion failure +rpl.last_insert_id : MDEV-10625 - warnings in error log +rpl.rpl_auto_increment : MDEV-10417 - Fails on Mips +rpl.rpl_auto_increment_bug45679 : MDEV-10417 - Fails on Mips +rpl.rpl_auto_increment_update_failure : MDEV-10625 - warnings in error log +rpl.rpl_binlog_errors : MDEV-12742 - Crash +rpl.rpl_binlog_grant : MDEV-21274 - Lost connection at handshake +rpl.rpl_cant_read_event_incident : MDEV-20960 - Abort on shutdown +rpl.rpl_checksum_cache : MDEV-22510 - Server crash +rpl.rpl_circular_for_4_hosts : MDEV-20536 - Server crash +rpl.rpl_colSize : MDEV-16112 - Server crash +rpl.rpl_corruption : MDEV-20527 - Slave stopped with wrong error code +rpl.rpl_ctype_latin1 : MDEV-14813 - Wrong result on Mac +rpl.rpl_ddl : MDEV-10417 - Fails on Mips +rpl.rpl_domain_id_filter_io_crash : MDEV-12729 - Timeout in include file, MDEV-13677 - Server crash +rpl.rpl_domain_id_filter_master_crash : MDEV-19043 - Table marked as crashed +rpl.rpl_domain_id_filter_restart : MDEV-10684 - Wrong result; MDEV-19043 - Table marked as crashed +rpl.rpl_drop_db_fail : MDEV-16898 - Slave fails to start +rpl.rpl_extra_col_master_innodb : MDEV-16570 - Extra warning +rpl.rpl_extra_col_master_myisam : MDEV-23372 - Extra warning +rpl.rpl_flushlog_loop : MDEV-21570 - Server crash +rpl.rpl_get_lock : MDEV-19368 - mysqltest failed but provided no output +rpl.rpl_gtid_basic : MDEV-10681 - server startup problem +rpl.rpl_gtid_crash : MDEV-13643 - Lost connection +rpl.rpl_gtid_errorhandling : MDEV-13261 - Crash +rpl.rpl_gtid_mdev9033 : MDEV-10680 - warnings +rpl.rpl_gtid_reconnect : MDEV-14497 - Crash +rpl.rpl_gtid_startpos : MDEV-20141 - mysqltest failed but provided no output +rpl.rpl_gtid_stop_start : MDEV-10629 - Crash on shutdown, MDEV-12629 - Valgrind warnings +rpl.rpl_gtid_until : MDEV-10625 - warnings in error log +rpl.rpl_innodb_bug30888 : MDEV-10417 - Fails on Mips +rpl.rpl_insert : MDEV-9329 - Fails on Ubuntu/s390x +rpl.rpl_insert_delayed : MDEV-9329 - Fails on Ubuntu/s390x +rpl.rpl_insert_id : MDEV-15197 - Wrong result +rpl.rpl_insert_id_pk : MDEV-16567 - Assertion failure +rpl.rpl_insert_ignore : MDEV-14365 - Lost connection to MySQL server during query +rpl.rpl_invoked_features : MDEV-10417 - Fails on Mips +rpl.rpl_ipv4_as_ipv6 : MDEV-20147 - Incorrect checksum for freed object +rpl.rpl_mariadb_slave_capability : MDEV-11018 - Extra lines in binlog +rpl.rpl_mdev12179 : MDEV-19043 - Table marked as crashed +rpl.rpl_mdev6020 : MDEV-23426 - Server crash, ASAN failures; MDEV-15272 - Server crash +rpl.rpl_mixed_mixing_engines : MDEV-21266 - Timeout +rpl.rpl_non_direct_row_mixing_engines : MDEV-16561 - Timeout in master_pos_wait +rpl.rpl_old_master : MDEV-22956 - Failing assertion +rpl.rpl_parallel : MDEV-10653 - Timeouts; MDEV-24110 - Slave crash +rpl.rpl_parallel_conflicts : MDEV-15272 - Server crash +rpl.rpl_parallel_multilevel : MDEV-20160 - Server crash +rpl.rpl_parallel_multilevel2 : MDEV-14723 - Timeout +rpl.rpl_parallel_optimistic : MDEV-15278 - Failed to sync with master +rpl.rpl_parallel_optimistic_nobinlog : MDEV-15278 - Failed to sync with master +rpl.rpl_parallel_optimistic_until : MDEV-23021 - Query didn't return a result set +rpl.rpl_parallel_retry : MDEV-11119 - Crash; MDEV-17109 - Timeout +rpl.rpl_parallel_stop_on_con_kill : MDEV-24110 - Slave crash +rpl.rpl_parallel_temptable : MDEV-10356 - Crash; MDEV-19076 - Wrong result +rpl.rpl_partition_innodb : MDEV-10417 - Fails on Mips +rpl.rpl_password_boundaries : MDEV-11534 - Slave IO warnings +rpl.rpl_rewrt_db : MDEV-24060 - Server did not start +rpl.rpl_row_001 : MDEV-16653 - MTR's internal check fails +rpl.rpl_row_basic_11bugs : MDEV-12171 - Server failed to start +rpl.rpl_row_basic_2myisam : MDEV-13875 - command "diff_files" failed +rpl.rpl_row_corruption : MDEV-21569 - mutex: LOCK_global_system_variables unlocking +rpl.rpl_row_drop_create_temp_table : MDEV-14487 - Wrong result +rpl.rpl_row_end_of_statement_loss : MDEV-21237 - Server crash +rpl.rpl_row_img_blobs : MDEV-13875 - command "diff_files" failed +rpl.rpl_row_img_eng_min : MDEV-13875 - diff_files failed +rpl.rpl_row_img_eng_noblob : MDEV-13875 - command "diff_files" failed +rpl.rpl_row_index_choice : MDEV-15196 - Slave crash +rpl.rpl_row_sp001 : MDEV-9329 - Fails on Ubuntu/s390x +rpl.rpl_row_until : MDEV-14052 - Master will not send events with checksum +rpl.rpl_semi_sync_event_after_sync : MDEV-11806 - warnings +rpl.rpl_semi_sync_skip_repl : MDEV-23371 - Server crash +rpl.rpl_semi_sync_uninstall_plugin : MDEV-24561 - Wrong usage of mutex; MDEV-7140 - Assorted failures +rpl.rpl_semi_sync_wait_point : MDEV-11807 - timeout in wait condition +rpl.rpl_show_slave_hosts : MDEV-10681 - Crash +rpl.rpl_shutdown_wait_slaves : MDEV-22517 - Timeout on sync_with_master +rpl.rpl_skip_replication : MDEV-23372 - Extra warning +rpl.rpl_slave_load_tmpdir_not_exist : MDEV-23372 - Extra warning +rpl.rpl_slow_query_log : MDEV-13250 - Test abort +rpl.rpl_sp_effects : MDEV-13249 - Crash +rpl.rpl_start_stop_slave : MDEV-13567 - Sync slave timeout +rpl.rpl_stm_relay_ign_space : MDEV-14360 - Test assertion +rpl.rpl_stm_stop_middle_group : MDEV-13791 - Server crash +rpl.rpl_sync : MDEV-10633 - Database page corruption +rpl.rpl_temporary_error2 : MDEV-10634 - Wrong number of retries +rpl.rpl_test_framework : MDEV-19368 - mysqltest failed but provided no output +rpl.rpl_trigger : MDEV-18055 - Wrong result +rpl.rpl_truncate_3innodb : MDEV-19454 - Syntax error +rpl.rpl_upgrade_master_info : MDEV-16567 - Assertion failure +rpl.rpl_user_variables : MDEV-20522 - Wrong result +rpl.rpl_variables : MDEV-20150 - Server crash +rpl.sec_behind_master-5114 : MDEV-13878 - Wrong result #----------------------------------------------------------------------- @@ -826,19 +722,13 @@ spider.basic_sql : MDEV-11186 - Internal check fails #----------------------------------------------------------------------- -spider/bg.* : MDEV-24059 - Timeout -spider/bg.ha : MDEV-9329 - failures on s390x -spider/bg.ha_part : MDEV-9329 - Fails on Ubuntu/s390x -spider/bg.spider3_fixes : MDEV-12639 - Syntax error -spider/bg.spider_fixes : MDEV-9329 - failures on s390x -spider/bg.vp_fixes : MDEV-9329 - Fails on Ubuntu/s390x - -#----------------------------------------------------------------------- - -spider/bugfix.direct_sql_with_comma_pwd : Added in 10.4.16 -spider/bugfix.mdev_20100 : Added in 10.4.16 -spider/bugfix.mdev_22246 : Added in 10.4.16 -spider/bugfix.xa_cmd : Added in 10.4.16 +spider/bg.* : MDEV-24059 - Timeout +spider/bg.ha : MDEV-9329 - failures on s390x +spider/bg.ha_part : MDEV-9329 - Fails on Ubuntu/s390x +spider/bg.spider3_fixes : MDEV-12639 - Syntax error +spider/bg.spider3_fixes_part : MDEV-24809 - Timeout +spider/bg.spider_fixes : MDEV-9329 - failures on s390x +spider/bg.vp_fixes : MDEV-9329 - Fails on Ubuntu/s390x #----------------------------------------------------------------------- @@ -848,7 +738,6 @@ spider/handler.* : MDEV-10987, MDEV-10990 - Tests have not been maintained sql_sequence.concurrent_create : MDEV-16635 - Server crash sql_sequence.kill : MDEV-23393 - Server crash -sql_sequence.next : Modified in 10.4.16 sql_sequence.read_only : MDEV-22956 - Failing assertion #----------------------------------------------------------------------- @@ -865,16 +754,12 @@ sys_vars.autocommit_func2 : MDEV-9329 - Fails on Ubuntu sys_vars.host_cache_size_auto : MDEV-20112 - Wrong result sys_vars.innodb_buffer_pool_dump_at_shutdown_basic : MDEV-14280 - Unexpected error sys_vars.innodb_checksum_algorithm_basic : MDEV-21568 - Errno: 2000 +sys_vars.innodb_flush_method_func : MDEV-24810 - Server failed to restart sys_vars.keep_files_on_create_basic : MDEV-10676 - timeout sys_vars.log_slow_admin_statements_func : MDEV-12235 - Server crash -sys_vars.replicate_do_db_basic : Modified in 10.4.16 -sys_vars.rpl_init_slave_func : Modified in 10.4.16 -sys_vars.session_track_system_variables_basic : Modified in 10.4.16 sys_vars.slow_query_log_func : MDEV-14273 - Wrong result sys_vars.thread_cache_size_func : MDEV-11775 - Wrong result sys_vars.wait_timeout_func : MDEV-12896 - Wrong result -sys_vars.wsrep_cluster_address_basic : Modified in 10.4.16 -sys_vars.wsrep_on_basic : Configuration deleted in 10.4.16 #----------------------------------------------------------------------- @@ -906,7 +791,7 @@ tokudb_alter_table.hcad_all_add2 : MDEV-15269 - Timeout #----------------------------------------------------------------------- -tokudb_backup.* : MDEV-11001 - Missing include file +tokudb_backup.* : MDEV-11001 - Missing include file (Won't fix) #----------------------------------------------------------------------- @@ -925,11 +810,11 @@ tokudb_parts.partition_alter4_tokudb : MDEV-12640 - Lost connection #----------------------------------------------------------------------- -tokudb_rpl.* : MDEV-11001 - Missing include file +tokudb_rpl.* : MDEV-11001 - Missing include file (Won't fix) #----------------------------------------------------------------------- -tokudb_sys_vars.* : MDEV-11001 - Missing include file +tokudb_sys_vars.* : MDEV-11001 - Missing include file (Won't fix) #----------------------------------------------------------------------- @@ -947,28 +832,17 @@ unit.mf_iocache : MDEV-20952 - ASAN stack-buffer-overflow vcol.not_supported : MDEV-10639 - Testcase timeout vcol.vcol_keys_innodb : MDEV-10639 - Testcase timeout -vcol.vcol_misc : MDEV-16651 - Wrong error message; modified in 10.4.16 +vcol.vcol_misc : MDEV-16651 - Wrong error message #----------------------------------------------------------------------- -versioning.create : Modified in 10.4.16 -versioning.select : Modified in 10.4.16 -versioning.sysvars : Modified in 10.4.16 -versioning.update : MDEV-22475 - Wrong result code -versioning.view : Modified in 10.4.16 +versioning.update : MDEV-22475 - Wrong result code #----------------------------------------------------------------------- -wsrep.MDEV-22443 : Added in 10.4.16 -wsrep.MDEV-23081 : Added in 10.4.16 -wsrep.MDEV-23092 : Added in 10.4.16 -wsrep.MDEV-23466 : Added in 10.4.16 wsrep.foreign_key : MDEV-14725 - WSREP has not yet prepared node -wsrep.mdev_22681 : Added in 10.4.16 wsrep.mdev_6832 : MDEV-14195 - Check testcase failed wsrep.pool_of_threads : MDEV-17345 - WSREP has not yet prepared node for application use -wsrep.variables : Modified in 10.4.16 -wsrep.variables_debug : Added in 10.4.16 #----------------------------------------------------------------------- From 259a1902a066d01547e5d70ba0e4837d1be62e7b Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 5 Feb 2021 15:00:38 +0100 Subject: [PATCH 145/150] cleanup: THD::abort_current_cond_wait() * reuse the loop in THD::abort_current_cond_wait, don't duplicate it * find_thread_by_id should return whatever it has found, it's the caller's task not to kill COM_DAEMON (if the caller's a killer) and other minor changes --- sql/mysqld.cc | 22 +--------------------- sql/sql_class.cc | 31 +++++++++++++------------------ sql/sql_class.h | 4 ++-- sql/sql_insert.cc | 18 +----------------- sql/sql_parse.cc | 26 ++++++++++++++------------ sql/sql_show.cc | 7 ++----- sql/wsrep_mysqld.cc | 13 +------------ 7 files changed, 34 insertions(+), 87 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index d4b49047046..f5a53597325 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1523,27 +1523,7 @@ static void kill_thread(THD *thd) { if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data); mysql_mutex_lock(&thd->LOCK_thd_kill); - if (thd->mysys_var) - { - thd->mysys_var->abort= 1; - mysql_mutex_lock(&thd->mysys_var->mutex); - if (thd->mysys_var->current_cond) - { - for (uint i= 0; i < 2; i++) - { - int ret= mysql_mutex_trylock(thd->mysys_var->current_mutex); - mysql_cond_broadcast(thd->mysys_var->current_cond); - if (!ret) - { - /* Thread has surely got the signal, unlock and abort */ - mysql_mutex_unlock(thd->mysys_var->current_mutex); - break; - } - sleep(1); - } - } - mysql_mutex_unlock(&thd->mysys_var->mutex); - } + thd->abort_current_cond_wait(true); mysql_mutex_unlock(&thd->LOCK_thd_kill); if (WSREP(thd)) mysql_mutex_unlock(&thd->LOCK_thd_data); } diff --git a/sql/sql_class.cc b/sql/sql_class.cc index 5c1ad49c9de..d815dd56647 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -49,9 +49,6 @@ #include #include #include -#ifdef __WIN__0 -#include -#endif #include #include @@ -70,6 +67,8 @@ #ifdef WITH_WSREP #include "wsrep_thd.h" #include "wsrep_trans_observer.h" +#else +static inline bool wsrep_is_bf_aborted(THD* thd) { return false; } #endif /* WITH_WSREP */ #include "opt_trace.h" @@ -1902,15 +1901,21 @@ void THD::awake_no_mutex(killed_state state_to_set) } /* Interrupt target waiting inside a storage engine. */ - if (IF_WSREP(state_to_set != NOT_KILLED && !wsrep_is_bf_aborted(this), - state_to_set != NOT_KILLED)) + if (state_to_set != NOT_KILLED && !wsrep_is_bf_aborted(this)) ha_kill_query(this, thd_kill_level(this)); - /* Broadcast a condition to kick the target if it is waiting on it. */ + abort_current_cond_wait(false); + DBUG_VOID_RETURN; +} + +/* Broadcast a condition to kick the target if it is waiting on it. */ +void THD::abort_current_cond_wait(bool force) +{ + mysql_mutex_assert_owner(&LOCK_thd_kill); if (mysys_var) { mysql_mutex_lock(&mysys_var->mutex); - if (!system_thread) // Don't abort locks + if (!system_thread || force) // Don't abort locks mysys_var->abort=1; /* @@ -1968,7 +1973,6 @@ void THD::awake_no_mutex(killed_state state_to_set) } mysql_mutex_unlock(&mysys_var->mutex); } - DBUG_VOID_RETURN; } @@ -2022,16 +2026,7 @@ bool THD::notify_shared_lock(MDL_context_owner *ctx_in_use, mysql_mutex_lock(&in_use->LOCK_thd_kill); if (in_use->killed < KILL_CONNECTION) in_use->set_killed_no_mutex(KILL_CONNECTION); - if (in_use->mysys_var) - { - mysql_mutex_lock(&in_use->mysys_var->mutex); - if (in_use->mysys_var->current_cond) - mysql_cond_broadcast(in_use->mysys_var->current_cond); - - /* Abort if about to wait in thr_upgrade_write_delay_lock */ - in_use->mysys_var->abort= 1; - mysql_mutex_unlock(&in_use->mysys_var->mutex); - } + in_use->abort_current_cond_wait(true); mysql_mutex_unlock(&in_use->LOCK_thd_kill); signalled= TRUE; } diff --git a/sql/sql_class.h b/sql/sql_class.h index 77bde414b31..be61a4047ec 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3322,6 +3322,7 @@ public: if (wsrep_on_local) mysql_mutex_unlock(&LOCK_thd_data); } + void abort_current_cond_wait(bool force); /** Disconnect the associated communication endpoint. */ void disconnect(); @@ -4061,8 +4062,7 @@ public: mysql_mutex_lock(&LOCK_thd_kill); int err= killed_errno(); if (err) - my_message(err, killed_err ? killed_err->msg : ER_THD(this, err), - MYF(0)); + my_message(err, killed_err ? killed_err->msg : ER_THD(this, err), MYF(0)); mysql_mutex_unlock(&LOCK_thd_kill); } /* return TRUE if we will abort query if we make a warning now */ diff --git a/sql/sql_insert.cc b/sql/sql_insert.cc index c728203f07a..a3211629180 100644 --- a/sql/sql_insert.cc +++ b/sql/sql_insert.cc @@ -2869,23 +2869,7 @@ void kill_delayed_threads(void) mysql_mutex_lock(&di->thd.LOCK_thd_kill); if (di->thd.killed < KILL_CONNECTION) di->thd.set_killed_no_mutex(KILL_CONNECTION); - if (di->thd.mysys_var) - { - mysql_mutex_lock(&di->thd.mysys_var->mutex); - if (di->thd.mysys_var->current_cond) - { - /* - We need the following test because the main mutex may be locked - in handle_delayed_insert() - */ - if (&di->mutex != di->thd.mysys_var->current_mutex) - mysql_mutex_lock(di->thd.mysys_var->current_mutex); - mysql_cond_broadcast(di->thd.mysys_var->current_cond); - if (&di->mutex != di->thd.mysys_var->current_mutex) - mysql_mutex_unlock(di->thd.mysys_var->current_mutex); - } - mysql_mutex_unlock(&di->thd.mysys_var->mutex); - } + di->thd.abort_current_cond_wait(false); mysql_mutex_unlock(&di->thd.LOCK_thd_kill); } mysql_mutex_unlock(&LOCK_delayed_insert); // For unlink from list diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index de96f0c8924..7b90bac2a42 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -9079,10 +9079,9 @@ struct find_thread_callback_arg }; -my_bool find_thread_callback(THD *thd, find_thread_callback_arg *arg) +static my_bool find_thread_callback(THD *thd, find_thread_callback_arg *arg) { - if (thd->get_command() != COM_DAEMON && - arg->id == (arg->query_id ? thd->query_id : (longlong) thd->thread_id)) + if (arg->id == (arg->query_id ? thd->query_id : (longlong) thd->thread_id)) { mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete arg->thd= thd; @@ -9100,10 +9099,9 @@ THD *find_thread_by_id(longlong id, bool query_id) } #ifdef WITH_WSREP -my_bool find_thread_with_thd_data_lock_callback(THD *thd, find_thread_callback_arg *arg) +static my_bool find_thread_with_thd_data_lock_callback(THD *thd, find_thread_callback_arg *arg) { - if (thd->get_command() != COM_DAEMON && - arg->id == (arg->query_id ? thd->query_id : (longlong) thd->thread_id)) + if (arg->id == (arg->query_id ? thd->query_id : (longlong) thd->thread_id)) { if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data); mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete @@ -9137,10 +9135,14 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ DBUG_ENTER("kill_one_thread"); DBUG_PRINT("enter", ("id: %lld signal: %u", id, (uint) kill_signal)); #ifdef WITH_WSREP - if (id && (tmp= find_thread_by_id_with_thd_data_lock(id, type == KILL_TYPE_QUERY))) + tmp= find_thread_by_id_with_thd_data_lock(id, type == KILL_TYPE_QUERY); #else - if (id && (tmp= find_thread_by_id(id, type == KILL_TYPE_QUERY))) + tmp= find_thread_by_id(id, type == KILL_TYPE_QUERY); #endif + if (!tmp) + DBUG_RETURN(error); + + if (tmp->get_command() != COM_DAEMON) { /* If we're SUPER, we can KILL anything, including system-threads. @@ -9194,11 +9196,11 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ else error= (type == KILL_TYPE_QUERY ? ER_KILL_QUERY_DENIED_ERROR : ER_KILL_DENIED_ERROR); -#ifdef WITH_WSREP - if (WSREP(tmp)) mysql_mutex_unlock(&tmp->LOCK_thd_data); -#endif - mysql_mutex_unlock(&tmp->LOCK_thd_kill); } +#ifdef WITH_WSREP + if (WSREP(tmp)) mysql_mutex_unlock(&tmp->LOCK_thd_data); +#endif + mysql_mutex_unlock(&tmp->LOCK_thd_kill); DBUG_PRINT("exit", ("%d", error)); DBUG_RETURN(error); } diff --git a/sql/sql_show.cc b/sql/sql_show.cc index ad2a489c39d..eac4af52ef4 100644 --- a/sql/sql_show.cc +++ b/sql/sql_show.cc @@ -3256,11 +3256,8 @@ int fill_show_explain(THD *thd, TABLE_LIST *table, COND *cond) } DBUG_RETURN(bres); } - else - { - my_error(ER_NO_SUCH_THREAD, MYF(0), (ulong) thread_id); - DBUG_RETURN(1); - } + my_error(ER_NO_SUCH_THREAD, MYF(0), (ulong) thread_id); + DBUG_RETURN(1); } diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index c0f48cca9cd..8e68f7229eb 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -2384,18 +2384,7 @@ static void wsrep_close_thread(THD *thd) thd->set_killed(KILL_CONNECTION); MYSQL_CALLBACK(thread_scheduler, post_kill_notification, (thd)); mysql_mutex_lock(&thd->LOCK_thd_kill); - if (thd->mysys_var) - { - thd->mysys_var->abort=1; - mysql_mutex_lock(&thd->mysys_var->mutex); - if (thd->mysys_var->current_cond) - { - mysql_mutex_lock(thd->mysys_var->current_mutex); - mysql_cond_broadcast(thd->mysys_var->current_cond); - mysql_mutex_unlock(thd->mysys_var->current_mutex); - } - mysql_mutex_unlock(&thd->mysys_var->mutex); - } + thd->abort_current_cond_wait(true); mysql_mutex_unlock(&thd->LOCK_thd_kill); } From 9703cffa8cb57e2fe29719f4aae3282bfae82878 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 5 Feb 2021 14:59:27 +0100 Subject: [PATCH 146/150] don't take mutexes conditionally --- sql/log_event.cc | 10 ++++------ sql/mysqld.cc | 2 -- sql/slave.cc | 10 ++-------- sql/sql_class.cc | 2 +- sql/sql_class.h | 11 ++--------- sql/sql_parse.cc | 16 ++++------------ sql/sql_repl.cc | 4 ++-- sql/sql_show.h | 1 + 8 files changed, 16 insertions(+), 40 deletions(-) diff --git a/sql/log_event.cc b/sql/log_event.cc index 52995cc7e66..89ffebf2659 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -9049,16 +9049,14 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi) res= trans_commit(thd); /* Automatically rolls back on error. */ thd->release_transactional_locks(); + mysql_mutex_lock(&thd->LOCK_thd_data); #ifdef WITH_WSREP - if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data); - if ((!res || (WSREP(thd) && thd->wsrep_trx().state() == wsrep::transaction::s_must_replay )) && sub_id) + if (sub_id && (!res || (WSREP(thd) && thd->wsrep_trx().state() == wsrep::transaction::s_must_replay))) #else - if (likely(!res) && sub_id) + if (sub_id && !res) #endif /* WITH_WSREP */ rpl_global_gtid_slave_state->update_state_hash(sub_id, >id, hton, rgi); -#ifdef WITH_WSREP - if (WSREP(thd)) mysql_mutex_unlock(&thd->LOCK_thd_data); -#endif /* WITH_WSREP */ + mysql_mutex_unlock(&thd->LOCK_thd_data); /* Increment the global status commit count variable */ diff --git a/sql/mysqld.cc b/sql/mysqld.cc index f5a53597325..6aecb0cf11e 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -1521,11 +1521,9 @@ static void end_ssl(); /* common callee of two shutdown phases */ static void kill_thread(THD *thd) { - if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data); mysql_mutex_lock(&thd->LOCK_thd_kill); thd->abort_current_cond_wait(true); mysql_mutex_unlock(&thd->LOCK_thd_kill); - if (WSREP(thd)) mysql_mutex_unlock(&thd->LOCK_thd_data); } diff --git a/sql/slave.cc b/sql/slave.cc index 40c09604745..372e46acd1d 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1069,11 +1069,7 @@ terminate_slave_thread(THD *thd, int error __attribute__((unused)); DBUG_PRINT("loop", ("killing slave thread")); -#ifdef WITH_WSREP - /* awake_no_mutex() requires LOCK_thd_data to be locked if wsrep - is enabled */ - if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data); -#endif /* WITH_WSREP */ + mysql_mutex_lock(&thd->LOCK_thd_data); mysql_mutex_lock(&thd->LOCK_thd_kill); #ifndef DONT_USE_THR_ALARM /* @@ -1087,9 +1083,7 @@ terminate_slave_thread(THD *thd, thd->awake_no_mutex(NOT_KILLED); mysql_mutex_unlock(&thd->LOCK_thd_kill); -#ifdef WITH_WSREP - if (WSREP(thd)) mysql_mutex_unlock(&thd->LOCK_thd_data); -#endif /* WITH_WSREP */ + mysql_mutex_unlock(&thd->LOCK_thd_data); /* There is a small chance that slave thread might miss the first diff --git a/sql/sql_class.cc b/sql/sql_class.cc index d815dd56647..dc3903661e1 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -1868,7 +1868,7 @@ void THD::awake_no_mutex(killed_state state_to_set) DBUG_PRINT("enter", ("this: %p current_thd: %p state: %d", this, current_thd, (int) state_to_set)); THD_CHECK_SENTRY(this); - if (WSREP_NNULL(this)) mysql_mutex_assert_owner(&LOCK_thd_data); + mysql_mutex_assert_owner(&LOCK_thd_data); mysql_mutex_assert_owner(&LOCK_thd_kill); print_aborted_warning(3, "KILLED"); diff --git a/sql/sql_class.h b/sql/sql_class.h index be61a4047ec..39d6ec1027f 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3309,18 +3309,11 @@ public: void awake_no_mutex(killed_state state_to_set); void awake(killed_state state_to_set) { - bool wsrep_on_local= variables.wsrep_on; - /* - mutex locking order (LOCK_thd_data - LOCK_thd_kill)) requires - to grab LOCK_thd_data here - */ - if (wsrep_on_local) - mysql_mutex_lock(&LOCK_thd_data); + mysql_mutex_lock(&LOCK_thd_data); mysql_mutex_lock(&LOCK_thd_kill); awake_no_mutex(state_to_set); mysql_mutex_unlock(&LOCK_thd_kill); - if (wsrep_on_local) - mysql_mutex_unlock(&LOCK_thd_data); + mysql_mutex_unlock(&LOCK_thd_data); } void abort_current_cond_wait(bool force); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index 7b90bac2a42..dd0e5cfa34e 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -9098,12 +9098,11 @@ THD *find_thread_by_id(longlong id, bool query_id) return arg.thd; } -#ifdef WITH_WSREP static my_bool find_thread_with_thd_data_lock_callback(THD *thd, find_thread_callback_arg *arg) { if (arg->id == (arg->query_id ? thd->query_id : (longlong) thd->thread_id)) { - if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data); + mysql_mutex_lock(&thd->LOCK_thd_data); mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete arg->thd= thd; return 1; @@ -9116,7 +9115,6 @@ THD *find_thread_by_id_with_thd_data_lock(longlong id, bool query_id) server_threads.iterate(find_thread_with_thd_data_lock_callback, &arg); return arg.thd; } -#endif /** kill one thread. @@ -9134,11 +9132,7 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ uint error= (type == KILL_TYPE_QUERY ? ER_NO_SUCH_QUERY : ER_NO_SUCH_THREAD); DBUG_ENTER("kill_one_thread"); DBUG_PRINT("enter", ("id: %lld signal: %u", id, (uint) kill_signal)); -#ifdef WITH_WSREP tmp= find_thread_by_id_with_thd_data_lock(id, type == KILL_TYPE_QUERY); -#else - tmp= find_thread_by_id(id, type == KILL_TYPE_QUERY); -#endif if (!tmp) DBUG_RETURN(error); @@ -9197,10 +9191,8 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ error= (type == KILL_TYPE_QUERY ? ER_KILL_QUERY_DENIED_ERROR : ER_KILL_DENIED_ERROR); } -#ifdef WITH_WSREP - if (WSREP(tmp)) mysql_mutex_unlock(&tmp->LOCK_thd_data); -#endif mysql_mutex_unlock(&tmp->LOCK_thd_kill); + mysql_mutex_unlock(&tmp->LOCK_thd_data); DBUG_PRINT("exit", ("%d", error)); DBUG_RETURN(error); } @@ -9246,7 +9238,7 @@ static my_bool kill_threads_callback(THD *thd, kill_threads_callback_arg *arg) return 1; if (!arg->threads_to_kill.push_back(thd, arg->thd->mem_root)) { - if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data); + mysql_mutex_lock(&thd->LOCK_thd_data); mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete } } @@ -9290,7 +9282,7 @@ static uint kill_threads_for_user(THD *thd, LEX_USER *user, */ next_ptr= it2++; mysql_mutex_unlock(&ptr->LOCK_thd_kill); - if (WSREP(ptr)) mysql_mutex_unlock(&ptr->LOCK_thd_data); + mysql_mutex_unlock(&ptr->LOCK_thd_data); (*rows)++; } while ((ptr= next_ptr)); } diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index 622eff2faae..a0d8b4ca6d1 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -3474,7 +3474,7 @@ static my_bool kill_callback(THD *thd, kill_callback_arg *arg) thd->variables.server_id == arg->slave_server_id) { arg->thd= thd; - if (WSREP(thd)) mysql_mutex_lock(&thd->LOCK_thd_data); + mysql_mutex_lock(&thd->LOCK_thd_data); mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete return 1; } @@ -3496,7 +3496,7 @@ void kill_zombie_dump_threads(uint32 slave_server_id) */ arg.thd->awake_no_mutex(KILL_SLAVE_SAME_ID); mysql_mutex_unlock(&arg.thd->LOCK_thd_kill); - if (WSREP(arg.thd)) mysql_mutex_unlock(&arg.thd->LOCK_thd_data); + mysql_mutex_unlock(&arg.thd->LOCK_thd_data); } } diff --git a/sql/sql_show.h b/sql/sql_show.h index c1845d8c1b3..8e807f0c1a5 100644 --- a/sql/sql_show.h +++ b/sql/sql_show.h @@ -144,6 +144,7 @@ const char* get_one_variable(THD *thd, const SHOW_VAR *variable, /* These functions were under INNODB_COMPATIBILITY_HOOKS */ int get_quote_char_for_identifier(THD *thd, const char *name, size_t length); THD *find_thread_by_id(longlong id, bool query_id= false); +THD *find_thread_by_id_with_thd_data_lock(longlong id, bool query_id= false); class select_result_explain_buffer; /* From eac8341df4c3c7b98360f4e9498acf393dc055e3 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Sun, 7 Feb 2021 17:48:58 +0100 Subject: [PATCH 147/150] MDEV-23328 Server hang due to Galera lock conflict resolution adaptation of 29bbcac0ee8 for 10.4 --- .../suite/galera/t/galera_bf_kill_debug.test | 2 +- sql/log_event.cc | 22 +- sql/service_wsrep.cc | 16 +- sql/slave.cc | 2 +- sql/sql_class.cc | 11 +- sql/sql_class.h | 4 +- sql/sql_parse.cc | 4 +- sql/sql_repl.cc | 2 +- sql/wsrep_client_service.cc | 7 - sql/wsrep_server_service.cc | 9 +- storage/innobase/handler/ha_innodb.cc | 208 +++++++++++++----- 11 files changed, 185 insertions(+), 102 deletions(-) diff --git a/mysql-test/suite/galera/t/galera_bf_kill_debug.test b/mysql-test/suite/galera/t/galera_bf_kill_debug.test index b687a5a6a67..c322f283757 100644 --- a/mysql-test/suite/galera/t/galera_bf_kill_debug.test +++ b/mysql-test/suite/galera/t/galera_bf_kill_debug.test @@ -84,7 +84,7 @@ SET DEBUG_SYNC = "now SIGNAL continue_kill"; --reap --connection node_2a ---error 0,1213 +--error 0,1213,2013 select * from t1; --connection node_2 diff --git a/sql/log_event.cc b/sql/log_event.cc index 89ffebf2659..337de3508ed 100644 --- a/sql/log_event.cc +++ b/sql/log_event.cc @@ -8983,8 +8983,20 @@ err: } #endif /* MYSQL_CLIENT */ - #if defined(HAVE_REPLICATION) && !defined(MYSQL_CLIENT) +static bool wsrep_must_replay(THD *thd) +{ +#ifdef WITH_WSREP + mysql_mutex_lock(&thd->LOCK_thd_data); + bool res= WSREP(thd) && thd->wsrep_trx().state() == wsrep::transaction::s_must_replay; + mysql_mutex_unlock(&thd->LOCK_thd_data); + return res; +#else + return false; +#endif +} + + int Xid_log_event::do_apply_event(rpl_group_info *rgi) { bool res; @@ -9049,14 +9061,8 @@ int Xid_log_event::do_apply_event(rpl_group_info *rgi) res= trans_commit(thd); /* Automatically rolls back on error. */ thd->release_transactional_locks(); - mysql_mutex_lock(&thd->LOCK_thd_data); -#ifdef WITH_WSREP - if (sub_id && (!res || (WSREP(thd) && thd->wsrep_trx().state() == wsrep::transaction::s_must_replay))) -#else - if (sub_id && !res) -#endif /* WITH_WSREP */ + if (sub_id && (!res || wsrep_must_replay(thd))) rpl_global_gtid_slave_state->update_state_hash(sub_id, >id, hton, rgi); - mysql_mutex_unlock(&thd->LOCK_thd_data); /* Increment the global status commit count variable */ diff --git a/sql/service_wsrep.cc b/sql/service_wsrep.cc index f0a4cf81c02..80f164855b2 100644 --- a/sql/service_wsrep.cc +++ b/sql/service_wsrep.cc @@ -210,16 +210,8 @@ extern "C" void wsrep_handle_SR_rollback(THD *bf_thd, extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd, my_bool signal) { - DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", - { - const char act[]= - "now " - "SIGNAL sync.before_wsrep_thd_abort_reached " - "WAIT_FOR signal.before_wsrep_thd_abort"; - DBUG_ASSERT(!debug_sync_set_action(bf_thd, - STRING_WITH_LEN(act))); - };); - + mysql_mutex_assert_owner(&victim_thd->LOCK_thd_kill); + mysql_mutex_assert_not_owner(&victim_thd->LOCK_thd_data); my_bool ret= wsrep_bf_abort(bf_thd, victim_thd); /* Send awake signal if victim was BF aborted or does not @@ -228,8 +220,6 @@ extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd, */ if ((ret || !wsrep_on(victim_thd)) && signal) { - mysql_mutex_assert_not_owner(&victim_thd->LOCK_thd_data); - mysql_mutex_assert_not_owner(&victim_thd->LOCK_thd_kill); mysql_mutex_lock(&victim_thd->LOCK_thd_data); if (victim_thd->wsrep_aborter && victim_thd->wsrep_aborter != bf_thd->thread_id) @@ -240,10 +230,8 @@ extern "C" my_bool wsrep_thd_bf_abort(THD *bf_thd, THD *victim_thd, return false; } - mysql_mutex_lock(&victim_thd->LOCK_thd_kill); victim_thd->wsrep_aborter= bf_thd->thread_id; victim_thd->awake_no_mutex(KILL_QUERY); - mysql_mutex_unlock(&victim_thd->LOCK_thd_kill); mysql_mutex_unlock(&victim_thd->LOCK_thd_data); } else { WSREP_DEBUG("wsrep_thd_bf_abort skipped awake"); diff --git a/sql/slave.cc b/sql/slave.cc index 372e46acd1d..31bd9372a14 100644 --- a/sql/slave.cc +++ b/sql/slave.cc @@ -1069,8 +1069,8 @@ terminate_slave_thread(THD *thd, int error __attribute__((unused)); DBUG_PRINT("loop", ("killing slave thread")); - mysql_mutex_lock(&thd->LOCK_thd_data); mysql_mutex_lock(&thd->LOCK_thd_kill); + mysql_mutex_lock(&thd->LOCK_thd_data); #ifndef DONT_USE_THR_ALARM /* Error codes from pthread_kill are: diff --git a/sql/sql_class.cc b/sql/sql_class.cc index dc3903661e1..81718595fec 100644 --- a/sql/sql_class.cc +++ b/sql/sql_class.cc @@ -799,6 +799,7 @@ THD::THD(my_thread_id id, bool is_wsrep_applier) mysql_mutex_init(key_LOCK_wakeup_ready, &LOCK_wakeup_ready, MY_MUTEX_INIT_FAST); mysql_mutex_init(key_LOCK_thd_kill, &LOCK_thd_kill, MY_MUTEX_INIT_FAST); mysql_cond_init(key_COND_wakeup_ready, &COND_wakeup_ready, 0); + mysql_mutex_record_order(&LOCK_thd_kill, &LOCK_thd_data); /* Variables with default values */ proc_info="login"; @@ -5058,11 +5059,13 @@ thd_need_ordering_with(const MYSQL_THD thd, const MYSQL_THD other_thd) #ifdef WITH_WSREP /* wsrep applier, replayer and TOI processing threads are ordered by replication provider, relaxed GAP locking protocol can be used - between high priority wsrep threads + between high priority wsrep threads. + Note that wsrep_thd_is_BF() doesn't take LOCK_thd_data for either thd, + the caller should guarantee that the BF state won't change. + (e.g. InnoDB does it by keeping lock_sys.mutex locked) */ - if (WSREP_ON && - wsrep_thd_is_BF(const_cast(thd), false) && - wsrep_thd_is_BF(const_cast(other_thd), true)) + if (WSREP_ON && wsrep_thd_is_BF(thd, false) && + wsrep_thd_is_BF(other_thd, false)) return 0; #endif /* WITH_WSREP */ rgi= thd->rgi_slave; diff --git a/sql/sql_class.h b/sql/sql_class.h index 39d6ec1027f..4eabd3da450 100644 --- a/sql/sql_class.h +++ b/sql/sql_class.h @@ -3309,11 +3309,11 @@ public: void awake_no_mutex(killed_state state_to_set); void awake(killed_state state_to_set) { - mysql_mutex_lock(&LOCK_thd_data); mysql_mutex_lock(&LOCK_thd_kill); + mysql_mutex_lock(&LOCK_thd_data); awake_no_mutex(state_to_set); - mysql_mutex_unlock(&LOCK_thd_kill); mysql_mutex_unlock(&LOCK_thd_data); + mysql_mutex_unlock(&LOCK_thd_kill); } void abort_current_cond_wait(bool force); diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index dd0e5cfa34e..d71d29bc85a 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -9102,8 +9102,8 @@ static my_bool find_thread_with_thd_data_lock_callback(THD *thd, find_thread_cal { if (arg->id == (arg->query_id ? thd->query_id : (longlong) thd->thread_id)) { - mysql_mutex_lock(&thd->LOCK_thd_data); mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete + mysql_mutex_lock(&thd->LOCK_thd_data); // XXX DELME arg->thd= thd; return 1; } @@ -9238,8 +9238,8 @@ static my_bool kill_threads_callback(THD *thd, kill_threads_callback_arg *arg) return 1; if (!arg->threads_to_kill.push_back(thd, arg->thd->mem_root)) { - mysql_mutex_lock(&thd->LOCK_thd_data); mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete + mysql_mutex_lock(&thd->LOCK_thd_data); } } } diff --git a/sql/sql_repl.cc b/sql/sql_repl.cc index a0d8b4ca6d1..6a6cfb2aa5f 100644 --- a/sql/sql_repl.cc +++ b/sql/sql_repl.cc @@ -3474,8 +3474,8 @@ static my_bool kill_callback(THD *thd, kill_callback_arg *arg) thd->variables.server_id == arg->slave_server_id) { arg->thd= thd; - mysql_mutex_lock(&thd->LOCK_thd_data); mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete + mysql_mutex_lock(&thd->LOCK_thd_data); return 1; } return 0; diff --git a/sql/wsrep_client_service.cc b/sql/wsrep_client_service.cc index 245fc1487ca..89621619a23 100644 --- a/sql/wsrep_client_service.cc +++ b/sql/wsrep_client_service.cc @@ -69,20 +69,13 @@ bool Wsrep_client_service::interrupted( wsrep::unique_lock& lock WSREP_UNUSED) const { DBUG_ASSERT(m_thd == current_thd); - /* Underlying mutex in lock object points to LOCK_thd_data, which - protects m_thd->wsrep_trx(), LOCK_thd_kill protects m_thd->killed. - Locking order is: - 1) LOCK_thd_data - 2) LOCK_thd_kill */ mysql_mutex_assert_owner(static_cast(lock.mutex()->native())); - mysql_mutex_lock(&m_thd->LOCK_thd_kill); bool ret= (m_thd->killed != NOT_KILLED); if (ret) { WSREP_DEBUG("wsrep state is interrupted, THD::killed %d trx state %d", m_thd->killed, m_thd->wsrep_trx().state()); } - mysql_mutex_unlock(&m_thd->LOCK_thd_kill); return ret; } diff --git a/sql/wsrep_server_service.cc b/sql/wsrep_server_service.cc index cd432ab3eae..19259a43925 100644 --- a/sql/wsrep_server_service.cc +++ b/sql/wsrep_server_service.cc @@ -40,6 +40,7 @@ static void init_service_thd(THD* thd, char* thread_stack) thd->prior_thr_create_utime= thd->start_utime= microsecond_interval_timer(); thd->set_command(COM_SLEEP); thd->reset_for_next_command(true); + server_threads.insert(thd); // as wsrep_innobase_kill_one_trx() uses find_thread_by_id() } Wsrep_storage_service* @@ -79,6 +80,7 @@ void Wsrep_server_service::release_storage_service( static_cast(storage_service); THD* thd= ss->m_thd; wsrep_reset_threadvars(thd); + server_threads.erase(thd); delete ss; delete thd; } @@ -92,7 +94,8 @@ wsrep_create_streaming_applier(THD *orig_thd, const char *ctx) streaming transaction is BF aborted and streaming applier is created from BF aborter context. */ Wsrep_threadvars saved_threadvars(wsrep_save_threadvars()); - wsrep_reset_threadvars(saved_threadvars.cur_thd); + if (saved_threadvars.cur_thd) + wsrep_reset_threadvars(saved_threadvars.cur_thd); THD *thd= 0; Wsrep_applier_service *ret= 0; if (!wsrep_create_threadvars() && @@ -109,7 +112,8 @@ wsrep_create_streaming_applier(THD *orig_thd, const char *ctx) } /* Restore original thread local storage state before returning. */ wsrep_restore_threadvars(saved_threadvars); - wsrep_store_threadvars(saved_threadvars.cur_thd); + if (saved_threadvars.cur_thd) + wsrep_store_threadvars(saved_threadvars.cur_thd); return ret; } @@ -138,6 +142,7 @@ void Wsrep_server_service::release_high_priority_service(wsrep::high_priority_se THD* thd= hps->m_thd; delete hps; wsrep_store_threadvars(thd); + server_threads.erase(thd); delete thd; wsrep_delete_threadvars(); } diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index cc9fa427168..8d55e6901a5 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -62,6 +62,7 @@ this program; if not, write to the Free Software Foundation, Inc., #include #include +#include /* Include necessary InnoDB headers */ #include "btr0btr.h" @@ -5093,6 +5094,8 @@ static void innobase_kill_query(handlerton*, THD *thd, enum thd_kill_levels) if (lock_t *lock= trx->lock.wait_lock) { trx_mutex_enter(trx); + if (trx->is_wsrep() && wsrep_thd_is_aborting(thd)) + trx->lock.was_chosen_as_deadlock_victim= TRUE; lock_cancel_waiting_and_release(lock); trx_mutex_exit(trx); } @@ -18556,62 +18559,60 @@ static struct st_mysql_storage_engine innobase_storage_engine= #ifdef WITH_WSREP -/** This function is used to kill one transaction. +struct bg_wsrep_kill_trx_arg { + my_thread_id thd_id, bf_thd_id; + trx_id_t trx_id, bf_trx_id; + bool signal; +}; -This transaction was open on this node (not-yet-committed), and a -conflicting writeset from some other node that was being applied -caused a locking conflict. First committed (from other node) -wins, thus open transaction is rolled back. BF stands for -brute-force: any transaction can get aborted by galera any time -it is necessary. +/** Kill one transaction from a background manager thread -This conflict can happen only when the replicated writeset (from -other node) is being applied, not when it’s waiting in the queue. -If our local transaction reached its COMMIT and this conflicting -writeset was in the queue, then it should fail the local -certification test instead. +wsrep_innobase_kill_one_trx() is invoked when lock_sys.mutex and trx mutex +are taken, wsrep_thd_bf_abort() cannot be used there as it takes THD mutexes +that must be taken before lock_sys.mutex and trx mutex. That's why +wsrep_innobase_kill_one_trx only posts the killing task to the manager thread +and the actual killing happens asynchronously here. -A brute force abort is only triggered by a locking conflict -between a writeset being applied by an applier thread (slave thread) -and an open transaction on the node, not by a Galera writeset -comparison as in the local certification failure. - -@param[in] bf_thd Brute force (BF) thread -@param[in,out] victim_trx Vimtim trx to be killed -@param[in] signal Should victim be signaled */ -UNIV_INTERN -void -wsrep_innobase_kill_one_trx( - THD* bf_thd, - trx_t *victim_trx, - bool signal) +As no mutexes were held we don't know whether THD or trx pointers are still +valid, so we need to pass thread/trx ids and perform a lookup. +*/ +static void bg_wsrep_kill_trx(void *void_arg) { - ut_ad(bf_thd); - ut_ad(victim_trx); - ut_ad(lock_mutex_own()); - ut_ad(trx_mutex_own(victim_trx)); + bg_wsrep_kill_trx_arg *arg= (bg_wsrep_kill_trx_arg *)void_arg; + THD *thd, *bf_thd; + trx_t *victim_trx; + bool aborting= false; - DBUG_ENTER("wsrep_innobase_kill_one_trx"); + bf_thd= find_thread_by_id_with_thd_data_lock(arg->bf_thd_id); + thd= find_thread_by_id_with_thd_data_lock(arg->thd_id); + + if (!thd || !bf_thd || !(victim_trx= thd_to_trx(thd))) + goto ret0; + + lock_mutex_enter(); + trx_mutex_enter(victim_trx); + if (victim_trx->id != arg->trx_id) + { + /* apparently victim trx was meanwhile rolled back. + tell bf thd not to wait, in case it already started to */ + trx_t *trx= thd_to_trx(bf_thd); + if (lock_t *lock= trx->lock.wait_lock) { + trx_mutex_enter(trx); + lock_cancel_waiting_and_release(lock); + trx_mutex_exit(trx); + } + goto ret1; + } - THD *thd= (THD *) victim_trx->mysql_thd; - ut_ad(thd); - /* Note that bf_trx might not exist here e.g. on MDL conflict - case (test: galera_concurrent_ctas). Similarly, BF thread - could be also acquiring MDL-lock causing victim to be - aborted. However, we have not yet called innobase_trx_init() - for BF transaction (test: galera_many_columns)*/ - trx_t* bf_trx= thd_to_trx(bf_thd); DBUG_ASSERT(wsrep_on(bf_thd)); - wsrep_thd_LOCK(thd); - WSREP_LOG_CONFLICT(bf_thd, thd, TRUE); WSREP_DEBUG("Aborter %s trx_id: " TRX_ID_FMT " thread: %ld " "seqno: %lld client_state: %s client_mode: %s transaction_mode: %s " "query: %s", wsrep_thd_is_BF(bf_thd, false) ? "BF" : "normal", - bf_trx ? bf_trx->id : TRX_ID_MAX, + arg->bf_trx_id, thd_get_thread_id(bf_thd), wsrep_thd_trx_seqno(bf_thd), wsrep_thd_client_state_str(bf_thd), @@ -18636,28 +18637,84 @@ wsrep_innobase_kill_one_trx( if (wsrep_thd_set_wsrep_aborter(bf_thd, thd)) { WSREP_DEBUG("innodb kill transaction skipped due to wsrep_aborter set"); - wsrep_thd_UNLOCK(thd); - DBUG_VOID_RETURN; + goto ret1; } - /* Note that we need to release this as it will be acquired - below in wsrep-lib */ - wsrep_thd_UNLOCK(thd); - DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort"); + aborting= true; - if (wsrep_thd_bf_abort(bf_thd, thd, signal)) - { - lock_t* wait_lock = victim_trx->lock.wait_lock; - if (wait_lock) { - DBUG_ASSERT(victim_trx->is_wsrep()); - WSREP_DEBUG("victim has wait flag: %lu", - thd_get_thread_id(thd)); - - WSREP_DEBUG("canceling wait lock"); - victim_trx->lock.was_chosen_as_deadlock_victim= TRUE; - lock_cancel_waiting_and_release(wait_lock); +ret1: + trx_mutex_exit(victim_trx); + lock_mutex_exit(); +ret0: + if (thd) { + wsrep_thd_UNLOCK(thd); + if (aborting) { + DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort"); + wsrep_thd_bf_abort(bf_thd, thd, arg->signal); } + wsrep_thd_kill_UNLOCK(thd); } + if (bf_thd) { + wsrep_thd_UNLOCK(bf_thd); + wsrep_thd_kill_UNLOCK(bf_thd); + } + free(arg); +} + +/** This function is used to kill one transaction. + +This transaction was open on this node (not-yet-committed), and a +conflicting writeset from some other node that was being applied +caused a locking conflict. First committed (from other node) +wins, thus open transaction is rolled back. BF stands for +brute-force: any transaction can get aborted by galera any time +it is necessary. + +This conflict can happen only when the replicated writeset (from +other node) is being applied, not when it’s waiting in the queue. +If our local transaction reached its COMMIT and this conflicting +writeset was in the queue, then it should fail the local +certification test instead. + +A brute force abort is only triggered by a locking conflict +between a writeset being applied by an applier thread (slave thread) +and an open transaction on the node, not by a Galera writeset +comparison as in the local certification failure. + +@param[in] bf_thd Brute force (BF) thread +@param[in,out] victim_trx Vimtim trx to be killed +@param[in] signal Should victim be signaled */ +void +wsrep_innobase_kill_one_trx( + THD* bf_thd, + trx_t *victim_trx, + bool signal) +{ + ut_ad(bf_thd); + ut_ad(victim_trx); + ut_ad(lock_mutex_own()); + ut_ad(trx_mutex_own(victim_trx)); + + DBUG_ENTER("wsrep_innobase_kill_one_trx"); + + DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", + { + const char act[]= + "now " + "SIGNAL sync.before_wsrep_thd_abort_reached " + "WAIT_FOR signal.before_wsrep_thd_abort"; + DBUG_ASSERT(!debug_sync_set_action(bf_thd, + STRING_WITH_LEN(act))); + };); + + trx_t* bf_trx= thd_to_trx(bf_thd); + bg_wsrep_kill_trx_arg *arg = (bg_wsrep_kill_trx_arg*)malloc(sizeof(*arg)); + arg->thd_id = thd_get_thread_id(victim_trx->mysql_thd); + arg->trx_id = victim_trx->id; + arg->bf_thd_id = thd_get_thread_id(bf_thd); + arg->bf_trx_id = bf_trx ? bf_trx->id : TRX_ID_MAX; + arg->signal = signal; + mysql_manager_submit(bg_wsrep_kill_trx, arg); DBUG_VOID_RETURN; } @@ -18693,13 +18750,44 @@ wsrep_abort_transaction( if (victim_trx) { lock_mutex_enter(); trx_mutex_enter(victim_trx); - wsrep_innobase_kill_one_trx(bf_thd, victim_trx, signal); + victim_trx->lock.was_chosen_as_wsrep_victim= true; trx_mutex_exit(victim_trx); lock_mutex_exit(); + + wsrep_thd_kill_LOCK(victim_thd); + wsrep_thd_LOCK(victim_thd); + bool aborting= !wsrep_thd_set_wsrep_aborter(bf_thd, victim_thd); + wsrep_thd_UNLOCK(victim_thd); + if (aborting) { + DEBUG_SYNC(bf_thd, "before_wsrep_thd_abort"); + DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", + { + const char act[]= + "now " + "SIGNAL sync.before_wsrep_thd_abort_reached " + "WAIT_FOR signal.before_wsrep_thd_abort"; + DBUG_ASSERT(!debug_sync_set_action(bf_thd, + STRING_WITH_LEN(act))); + };); + wsrep_thd_bf_abort(bf_thd, victim_thd, signal); + } + wsrep_thd_kill_UNLOCK(victim_thd); + wsrep_srv_conc_cancel_wait(victim_trx); DBUG_VOID_RETURN; } else { + DBUG_EXECUTE_IF("sync.before_wsrep_thd_abort", + { + const char act[]= + "now " + "SIGNAL sync.before_wsrep_thd_abort_reached " + "WAIT_FOR signal.before_wsrep_thd_abort"; + DBUG_ASSERT(!debug_sync_set_action(bf_thd, + STRING_WITH_LEN(act))); + };); + wsrep_thd_kill_LOCK(victim_thd); wsrep_thd_bf_abort(bf_thd, victim_thd, signal); + wsrep_thd_kill_UNLOCK(victim_thd); } DBUG_VOID_RETURN; From 259b945204eec0dc623fd861c0f83fcb2b3bd763 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 12 Feb 2021 15:05:24 +0100 Subject: [PATCH 148/150] remove find_thread_with_thd_data_lock_callback let the caller take the lock if needed --- sql/sql_parse.cc | 27 +++++---------------------- sql/sql_show.h | 1 - storage/innobase/handler/ha_innodb.cc | 6 ++++-- 3 files changed, 9 insertions(+), 25 deletions(-) diff --git a/sql/sql_parse.cc b/sql/sql_parse.cc index d71d29bc85a..9d9831d9209 100644 --- a/sql/sql_parse.cc +++ b/sql/sql_parse.cc @@ -9098,24 +9098,6 @@ THD *find_thread_by_id(longlong id, bool query_id) return arg.thd; } -static my_bool find_thread_with_thd_data_lock_callback(THD *thd, find_thread_callback_arg *arg) -{ - if (arg->id == (arg->query_id ? thd->query_id : (longlong) thd->thread_id)) - { - mysql_mutex_lock(&thd->LOCK_thd_kill); // Lock from delete - mysql_mutex_lock(&thd->LOCK_thd_data); // XXX DELME - arg->thd= thd; - return 1; - } - return 0; -} -THD *find_thread_by_id_with_thd_data_lock(longlong id, bool query_id) -{ - find_thread_callback_arg arg(id, query_id); - server_threads.iterate(find_thread_with_thd_data_lock_callback, &arg); - return arg.thd; -} - /** kill one thread. @@ -9132,7 +9114,7 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ uint error= (type == KILL_TYPE_QUERY ? ER_NO_SUCH_QUERY : ER_NO_SUCH_THREAD); DBUG_ENTER("kill_one_thread"); DBUG_PRINT("enter", ("id: %lld signal: %u", id, (uint) kill_signal)); - tmp= find_thread_by_id_with_thd_data_lock(id, type == KILL_TYPE_QUERY); + tmp= find_thread_by_id(id, type == KILL_TYPE_QUERY); if (!tmp) DBUG_RETURN(error); @@ -9159,6 +9141,7 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ faster and do a harder kill than KILL_SYSTEM_THREAD; */ + mysql_mutex_lock(&tmp->LOCK_thd_data); // for various wsrep* checks below #ifdef WITH_WSREP if (((thd->security_ctx->master_access & SUPER_ACL) || thd->security_ctx->user_matches(tmp->security_ctx)) && @@ -9180,8 +9163,8 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ else #endif /* WITH_WSREP */ { - WSREP_DEBUG("kill_one_thread %llu, victim: %llu wsrep_aborter %llu by signal %d", - thd->thread_id, id, tmp->wsrep_aborter, kill_signal); + WSREP_DEBUG("kill_one_thread %llu, victim: %llu wsrep_aborter %llu by signal %d", + thd->thread_id, id, tmp->wsrep_aborter, kill_signal); tmp->awake_no_mutex(kill_signal); WSREP_DEBUG("victim: %llu taken care of", id); error= 0; @@ -9190,9 +9173,9 @@ kill_one_thread(THD *thd, longlong id, killed_state kill_signal, killed_type typ else error= (type == KILL_TYPE_QUERY ? ER_KILL_QUERY_DENIED_ERROR : ER_KILL_DENIED_ERROR); + mysql_mutex_unlock(&tmp->LOCK_thd_data); } mysql_mutex_unlock(&tmp->LOCK_thd_kill); - mysql_mutex_unlock(&tmp->LOCK_thd_data); DBUG_PRINT("exit", ("%d", error)); DBUG_RETURN(error); } diff --git a/sql/sql_show.h b/sql/sql_show.h index 8e807f0c1a5..c1845d8c1b3 100644 --- a/sql/sql_show.h +++ b/sql/sql_show.h @@ -144,7 +144,6 @@ const char* get_one_variable(THD *thd, const SHOW_VAR *variable, /* These functions were under INNODB_COMPATIBILITY_HOOKS */ int get_quote_char_for_identifier(THD *thd, const char *name, size_t length); THD *find_thread_by_id(longlong id, bool query_id= false); -THD *find_thread_by_id_with_thd_data_lock(longlong id, bool query_id= false); class select_result_explain_buffer; /* diff --git a/storage/innobase/handler/ha_innodb.cc b/storage/innobase/handler/ha_innodb.cc index 8d55e6901a5..7ca9e83a368 100644 --- a/storage/innobase/handler/ha_innodb.cc +++ b/storage/innobase/handler/ha_innodb.cc @@ -18583,8 +18583,10 @@ static void bg_wsrep_kill_trx(void *void_arg) trx_t *victim_trx; bool aborting= false; - bf_thd= find_thread_by_id_with_thd_data_lock(arg->bf_thd_id); - thd= find_thread_by_id_with_thd_data_lock(arg->thd_id); + if ((bf_thd= find_thread_by_id(arg->bf_thd_id))) + wsrep_thd_LOCK(bf_thd); + if ((thd= find_thread_by_id(arg->thd_id))) + wsrep_thd_LOCK(thd); if (!thd || !bf_thd || !(victim_trx= thd_to_trx(thd))) goto ret0; From b91e77cff3fb5fbb32ebb061ed342469b434c4e8 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 12 Feb 2021 11:29:40 +0100 Subject: [PATCH 149/150] fix a 3-way deadlock in galera_sr.galera-features#56 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit rarely (try --repeat 1000), the following happens: * from wsrep_bf_abort (when a thread is being killed), wsrep-lib starts streaming_rollback that wants to convert_streaming_client_to_applier. wsrep_create_streaming_applier creates a new THD(). All while the other THD is being killed, so under LOCK_thd_kill and LOCK_thd_data. In particular, THD::init() takes LOCK_global_system_variables under LOCK_thd_kill. * updating @@wsrep_slave_threads takes LOCK_global_system_variables and LOCK_wsrep_cluster_config (in that order) and invokes wsrep_slave_threads_update() that takes LOCK_wsrep_slave_threads * wsrep_replication_process() takes LOCK_wsrep_slave_threads and invokes wsrep_close_applier(), that does thd->set_killed() which takes LOCK_thd_kill. et voilà. As a fix I copied a workaround from wsrep_cluster_address_update() to wsrep_slave_threads_update(). It seems to be safe: without mutexes a race condition is possible and a concurrent SET might change wsrep_slave_threads, but wsrep_slave_threads_update() always verifies if there's a need to do something, so it will not run twice in this case, it'll be a no-op. --- sql/wsrep_var.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index dea388d30de..239daadc4f6 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -674,7 +674,11 @@ static void wsrep_slave_count_change_update () bool wsrep_slave_threads_update (sys_var *self, THD* thd, enum_var_type type) { + mysql_mutex_unlock(&LOCK_wsrep_cluster_config); + mysql_mutex_unlock(&LOCK_global_system_variables); mysql_mutex_lock(&LOCK_wsrep_slave_threads); + mysql_mutex_lock(&LOCK_global_system_variables); + mysql_mutex_lock(&LOCK_wsrep_cluster_config); bool res= false; wsrep_slave_count_change_update(); From 26965387230a9b13fb716344477d108bb87dea98 Mon Sep 17 00:00:00 2001 From: Sergei Golubchik Date: Fri, 12 Feb 2021 17:31:25 +0100 Subject: [PATCH 150/150] updating @@wsrep_cluster_address deadlocks wsrep_cluster_address_update() causes LOCK_wsrep_slave_threads to be locked under LOCK_wsrep_cluster_config, while normally the order should be the opposite. Fix: don't protect @@wsrep_cluster_address value with the LOCK_wsrep_cluster_config, LOCK_global_system_variables is enough. Only protect wsrep reinitialization with the LOCK_wsrep_cluster_config. And make it use a local copy of the global @@wsrep_cluster_address. Also, introduce a helper function that checks whether wsrep_cluster_address is set and also asserts that it can be safely read by the caller. --- sql/mysqld.cc | 9 ++++++--- sql/sys_vars.cc | 5 ++--- sql/wsrep_check_opts.cc | 2 +- sql/wsrep_mysqld.cc | 13 ++++--------- sql/wsrep_mysqld.h | 10 +++++++++- sql/wsrep_thd.cc | 22 ++++++++-------------- sql/wsrep_var.cc | 28 +++++++++++++++------------- 7 files changed, 45 insertions(+), 44 deletions(-) diff --git a/sql/mysqld.cc b/sql/mysqld.cc index 6aecb0cf11e..71298e2ba00 100644 --- a/sql/mysqld.cc +++ b/sql/mysqld.cc @@ -5804,9 +5804,12 @@ int mysqld_main(int argc, char **argv) wsrep_init_startup (false); } - WSREP_DEBUG("Startup creating %ld applier threads running %lu", - wsrep_slave_threads - 1, wsrep_running_applier_threads); - wsrep_create_appliers(wsrep_slave_threads - 1); + if (wsrep_cluster_address_exists()) + { + WSREP_DEBUG("Startup creating %ld applier threads running %lu", + wsrep_slave_threads - 1, wsrep_running_applier_threads); + wsrep_create_appliers(wsrep_slave_threads - 1); + } } } diff --git a/sql/sys_vars.cc b/sql/sys_vars.cc index 5f5f9b5daf8..87ab62c4261 100644 --- a/sql/sys_vars.cc +++ b/sql/sys_vars.cc @@ -5445,13 +5445,12 @@ static Sys_var_charptr Sys_wsrep_cluster_name( ON_CHECK(wsrep_cluster_name_check), ON_UPDATE(wsrep_cluster_name_update)); -static PolyLock_mutex PLock_wsrep_cluster_config(&LOCK_wsrep_cluster_config); static Sys_var_charptr Sys_wsrep_cluster_address ( "wsrep_cluster_address", "Address to initially connect to cluster", PREALLOCATED GLOBAL_VAR(wsrep_cluster_address), CMD_LINE(REQUIRED_ARG), IN_SYSTEM_CHARSET, DEFAULT(""), - &PLock_wsrep_cluster_config, NOT_IN_BINLOG, + NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(wsrep_cluster_address_check), ON_UPDATE(wsrep_cluster_address_update)); @@ -5482,7 +5481,7 @@ static Sys_var_ulong Sys_wsrep_slave_threads( "wsrep_slave_threads", "Number of slave appliers to launch", GLOBAL_VAR(wsrep_slave_threads), CMD_LINE(REQUIRED_ARG), VALID_RANGE(1, 512), DEFAULT(1), BLOCK_SIZE(1), - &PLock_wsrep_cluster_config, NOT_IN_BINLOG, + NO_MUTEX_GUARD, NOT_IN_BINLOG, ON_CHECK(0), ON_UPDATE(wsrep_slave_threads_update)); diff --git a/sql/wsrep_check_opts.cc b/sql/wsrep_check_opts.cc index 935bacffffc..e5a0dcb2ede 100644 --- a/sql/wsrep_check_opts.cc +++ b/sql/wsrep_check_opts.cc @@ -63,7 +63,7 @@ int wsrep_check_opts() else { // non-mysqldump SST requires wsrep_cluster_address on startup - if (!wsrep_cluster_address || !wsrep_cluster_address[0]) + if (!wsrep_cluster_address_exists()) { WSREP_ERROR ("%s SST method requires wsrep_cluster_address to be " "configured on startup.", wsrep_sst_method); diff --git a/sql/wsrep_mysqld.cc b/sql/wsrep_mysqld.cc index 8e68f7229eb..fc54c7f6ec4 100644 --- a/sql/wsrep_mysqld.cc +++ b/sql/wsrep_mysqld.cc @@ -878,13 +878,13 @@ void wsrep_init_startup (bool sst_first) if (!strcmp(wsrep_provider, WSREP_NONE)) return; /* Skip replication start if no cluster address */ - if (!wsrep_cluster_address || wsrep_cluster_address[0] == 0) return; + if (!wsrep_cluster_address_exists()) return; /* Read value of wsrep_new_cluster before wsrep_start_replication(), the value is reset to FALSE inside wsrep_start_replication. */ - if (!wsrep_start_replication()) unireg_abort(1); + if (!wsrep_start_replication(wsrep_cluster_address)) unireg_abort(1); wsrep_create_rollbacker(); wsrep_create_appliers(1); @@ -1034,7 +1034,7 @@ void wsrep_shutdown_replication() my_pthread_setspecific_ptr(THR_THD, NULL); } -bool wsrep_start_replication() +bool wsrep_start_replication(const char *wsrep_cluster_address) { int rcode; WSREP_DEBUG("wsrep_start_replication"); @@ -1049,12 +1049,7 @@ bool wsrep_start_replication() return true; } - if (!wsrep_cluster_address || wsrep_cluster_address[0]== 0) - { - // if provider is non-trivial, but no address is specified, wait for address - WSREP_DEBUG("wsrep_start_replication exit due to empty address"); - return true; - } + DBUG_ASSERT(wsrep_cluster_address[0]); bool const bootstrap(TRUE == wsrep_new_cluster); wsrep_new_cluster= FALSE; diff --git a/sql/wsrep_mysqld.h b/sql/wsrep_mysqld.h index 29b1c4cf1f4..16663f7152c 100644 --- a/sql/wsrep_mysqld.h +++ b/sql/wsrep_mysqld.h @@ -203,7 +203,7 @@ extern void wsrep_close_applier_threads(int count); /* new defines */ extern void wsrep_stop_replication(THD *thd); -extern bool wsrep_start_replication(); +extern bool wsrep_start_replication(const char *wsrep_cluster_address); extern void wsrep_shutdown_replication(); extern bool wsrep_must_sync_wait (THD* thd, uint mask= WSREP_SYNC_WAIT_BEFORE_READ); extern bool wsrep_sync_wait (THD* thd, uint mask= WSREP_SYNC_WAIT_BEFORE_READ); @@ -280,6 +280,13 @@ void WSREP_LOG(void (*fun)(const char* fmt, ...), const char* fmt, ...); #define WSREP_PROVIDER_EXISTS \ (wsrep_provider && strncasecmp(wsrep_provider, WSREP_NONE, FN_REFLEN)) +static inline bool wsrep_cluster_address_exists() +{ + if (mysqld_server_started) + mysql_mutex_assert_owner(&LOCK_global_system_variables); + return wsrep_cluster_address && wsrep_cluster_address[0]; +} + #define WSREP_QUERY(thd) (thd->query()) extern my_bool wsrep_ready_get(); @@ -501,6 +508,7 @@ wsrep::key wsrep_prepare_key_for_toi(const char* db, const char* table, #define wsrep_thr_deinit() do {} while(0) #define wsrep_init_globals() do {} while(0) #define wsrep_create_appliers(X) do {} while(0) +#define wsrep_cluster_address_exists() (false) #endif /* WITH_WSREP */ diff --git a/sql/wsrep_thd.cc b/sql/wsrep_thd.cc index 26cfa4c58c4..2d814c62424 100644 --- a/sql/wsrep_thd.cc +++ b/sql/wsrep_thd.cc @@ -125,11 +125,7 @@ bool wsrep_create_appliers(long threads, bool mutex_protected) return false; } - if (!wsrep_cluster_address || wsrep_cluster_address[0]== 0) - { - WSREP_DEBUG("wsrep_create_appliers exit due to empty address"); - return false; - } + DBUG_ASSERT(wsrep_cluster_address[0]); long wsrep_threads=0; @@ -284,16 +280,14 @@ static void wsrep_rollback_process(THD *rollbacker, void wsrep_create_rollbacker() { - if (wsrep_cluster_address && wsrep_cluster_address[0] != 0) - { - Wsrep_thd_args* args(new Wsrep_thd_args(wsrep_rollback_process, - WSREP_ROLLBACKER_THREAD, - pthread_self())); + DBUG_ASSERT(wsrep_cluster_address[0]); + Wsrep_thd_args* args(new Wsrep_thd_args(wsrep_rollback_process, + WSREP_ROLLBACKER_THREAD, + pthread_self())); - /* create rollbacker */ - if (create_wsrep_THD(args, false)) - WSREP_WARN("Can't create thread to manage wsrep rollback"); - } + /* create rollbacker */ + if (create_wsrep_THD(args, false)) + WSREP_WARN("Can't create thread to manage wsrep rollback"); } /* diff --git a/sql/wsrep_var.cc b/sql/wsrep_var.cc index 239daadc4f6..35f14532e85 100644 --- a/sql/wsrep_var.cc +++ b/sql/wsrep_var.cc @@ -554,30 +554,31 @@ bool wsrep_cluster_address_update (sys_var *self, THD* thd, enum_var_type type) /* stop replication is heavy operation, and includes closing all client connections. Closing clients may need to get LOCK_global_system_variables at least in MariaDB. - - Note: releasing LOCK_global_system_variables may cause race condition, if - there can be several concurrent clients changing wsrep_provider */ + char *tmp= my_strdup(wsrep_cluster_address, MYF(MY_WME)); WSREP_DEBUG("wsrep_cluster_address_update: %s", wsrep_cluster_address); mysql_mutex_unlock(&LOCK_global_system_variables); + + mysql_mutex_lock(&LOCK_wsrep_cluster_config); wsrep_stop_replication(thd); - if (wsrep_start_replication()) + if (*tmp && wsrep_start_replication(tmp)) { wsrep_create_rollbacker(); WSREP_DEBUG("Cluster address update creating %ld applier threads running %lu", wsrep_slave_threads, wsrep_running_applier_threads); wsrep_create_appliers(wsrep_slave_threads); } - /* locking order to be enforced is: - 1. LOCK_global_system_variables - 2. LOCK_wsrep_cluster_config - => have to juggle mutexes to comply with this - */ - mysql_mutex_unlock(&LOCK_wsrep_cluster_config); + mysql_mutex_lock(&LOCK_global_system_variables); - mysql_mutex_lock(&LOCK_wsrep_cluster_config); + if (strcmp(tmp, wsrep_cluster_address)) + { + my_free((void*)wsrep_cluster_address); + wsrep_cluster_address= tmp; + } + else + my_free(tmp); return false; } @@ -674,11 +675,12 @@ static void wsrep_slave_count_change_update () bool wsrep_slave_threads_update (sys_var *self, THD* thd, enum_var_type type) { - mysql_mutex_unlock(&LOCK_wsrep_cluster_config); + if (!wsrep_cluster_address_exists()) + return false; + mysql_mutex_unlock(&LOCK_global_system_variables); mysql_mutex_lock(&LOCK_wsrep_slave_threads); mysql_mutex_lock(&LOCK_global_system_variables); - mysql_mutex_lock(&LOCK_wsrep_cluster_config); bool res= false; wsrep_slave_count_change_update();