Compare commits

..

51 Commits

Author SHA1 Message Date
SPRINX0\prochazka 100e3fe75f deleted sast workflows 2026-04-08 10:59:29 +02:00
SPRINX0\prochazka af7930cea2 Enhance aggregation functions in SQL queries for improved PostgreSQL compatibility 2026-04-08 10:55:24 +02:00
SPRINX0\prochazka 6b4f6b909c Merge branch 'feature/postgres-optimalization' of https://github.com/dbgate/dbgate into feature/postgres-optimalization 2026-04-08 10:26:35 +02:00
Jan Prochazka 9a6e5cd7cc Update plugins/dbgate-plugin-postgres/src/backend/sql/views.js
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-04-08 10:21:22 +02:00
SPRINX0\prochazka 9f64b6ec7a Merge branch 'master' into feature/postgres-optimalization 2026-04-08 10:20:28 +02:00
Stela Augustinova 77f720e34c Refactor connection handling in extractShellConnection to improve volatile ID management and ensure secure credential handling 2026-04-08 10:20:09 +02:00
Stela Augustinova 168dcb7824 Enhance error handling for connection requests in subprocesses and validate connection ID format 2026-04-08 10:20:09 +02:00
Stela Augustinova 759186a212 Improve error handling for volatile connection responses in subprocess communication 2026-04-08 10:20:09 +02:00
Stela Augustinova 71ed7a76ea Handle errors in volatile connection resolution and remove unused registration function 2026-04-08 10:20:09 +02:00
Stela Augustinova bd939b22c7 Fix volatile connection resolution to prevent multiple resolves 2026-04-08 10:20:09 +02:00
Stela Augustinova c327f77294 Refactor volatile connections handling in connections and runners modules 2026-04-08 10:20:09 +02:00
Stela Augustinova d907d79beb Streamline volatile connections handling and remove unused registration module 2026-04-08 10:20:09 +02:00
Stela Augustinova 93b879927c Implement volatile connections handling in runners and shell modules 2026-04-08 10:20:09 +02:00
Stela Augustinova 0c545d4cf9 Enhance clipboard formatters to skip empty rows, improving data handling in clipboard operations 2026-04-08 10:20:09 +02:00
Stela Augustinova 95c90c1517 Improve clipboard formatters to omit undefined values, enhancing data integrity in exports 2026-04-08 10:20:09 +02:00
CI workflows cb731fa858 chore: auto-update github workflows 2026-04-08 10:20:09 +02:00
Stela Augustinova 9bb3b09ecf SYNC: Add SAST workflow for security scanning using Semgrep 2026-04-08 10:20:09 +02:00
SPRINX0\prochazka 7c8f541d3e deleted sast workflow 2026-04-08 10:18:37 +02:00
Jan Prochazka ce41687382 Merge pull request #1417 from dbgate/feature/auth-error
Implement volatile connections handling in runners and shell modules
2026-04-08 10:14:02 +02:00
Stela Augustinova 4b083dea5c Refactor connection handling in extractShellConnection to improve volatile ID management and ensure secure credential handling 2026-04-07 14:56:29 +02:00
Stela Augustinova c84473c1eb Enhance error handling for connection requests in subprocesses and validate connection ID format 2026-04-07 14:26:58 +02:00
Stela Augustinova 7fc078f3e6 Improve error handling for volatile connection responses in subprocess communication 2026-04-07 14:15:18 +02:00
Stela Augustinova cbbd538248 Handle errors in volatile connection resolution and remove unused registration function 2026-04-07 14:01:13 +02:00
Stela Augustinova 825f6e562b Fix volatile connection resolution to prevent multiple resolves 2026-04-07 13:46:34 +02:00
Stela Augustinova a278afb260 Refactor volatile connections handling in connections and runners modules 2026-04-07 13:42:11 +02:00
Stela Augustinova 2fbeea717c Streamline volatile connections handling and remove unused registration module 2026-04-07 13:26:16 +02:00
Jan Prochazka c7259e4663 Merge pull request #1412 from dbgate/feature/copy-sql
Improve clipboard formatters to omit undefined values, enhancing data…
2026-04-07 13:11:49 +02:00
Stela Augustinova 69a2669342 Implement volatile connections handling in runners and shell modules 2026-04-07 13:06:04 +02:00
CI workflows 42d1ca8fd4 chore: auto-update github workflows 2026-04-07 10:27:40 +00:00
Stela Augustinova 1cf52d8b39 SYNC: Add SAST workflow for security scanning using Semgrep 2026-04-07 10:27:24 +00:00
Jan Prochazka 6e482afab2 v7.1.7-premium-beta.1 2026-04-02 16:39:06 +02:00
SPRINX0\prochazka ddf3295e6d Merge branch 'master' into feature/postgres-optimalization 2026-04-02 16:33:25 +02:00
SPRINX0\prochazka 79e087abd3 Optimize PostgreSQL analysis queries and add support for Info Schema routines 2026-04-02 16:32:36 +02:00
CI workflows a7cf51bdf7 chore: auto-update github workflows 2026-04-02 13:55:33 +00:00
Jan Prochazka dfdb31e2f8 Merge pull request #1413 from dbgate/feature/integration-test-pro
Update test workflow to include directory changes for integration tests
2026-04-02 15:55:14 +02:00
Stela Augustinova 3508ddc3ca Update test workflow to include directory changes for integration tests 2026-04-02 11:02:36 +02:00
Jan Prochazka e6f5295420 Merge pull request #1410 from dbgate/feature/large-fields
Enhance binary size handling in grid cell display
2026-04-01 16:01:23 +02:00
CI workflows 2bb08921c3 chore: auto-update github workflows 2026-04-01 13:55:00 +00:00
Stela Augustinova ee2d0e4c30 Remove unnecessary restart policy for DynamoDB service 2026-04-01 15:54:35 +02:00
Jan Prochazka c43a838572 Merge pull request #1411 from dbgate/feature/unreadable-dropdown
Correct class binding and update style variables in SelectField compo…
2026-04-01 15:53:23 +02:00
CI workflows 17ff6a8013 chore: auto-update github workflows 2026-04-01 13:53:13 +00:00
Stela Augustinova 62ad6a0d08 Remove unnecessary restart policy for MongoDB service 2026-04-01 15:52:48 +02:00
CI workflows 5c049fa867 chore: auto-update github workflows 2026-04-01 13:51:09 +00:00
CI workflows 619f17114a Update pro ref 2026-04-01 13:50:58 +00:00
Stela Augustinova 1c1431014c SYNC: Merge pull request #87 from dbgate/feature/collection-test 2026-04-01 13:50:46 +00:00
Stela Augustinova f68ca1e786 Correct class binding and update style variables in SelectField component 2026-04-01 13:24:34 +02:00
Stela Augustinova 8d16a30064 Fix message formatting for large binary fields in stringifyCellValue function 2026-04-01 10:55:47 +02:00
Stela Augustinova cf601c33c0 Enhance binary size handling in grid cell display 2026-04-01 10:25:40 +02:00
SPRINX0\prochazka 899aec2658 v7.1.5-premium-beta.1 2026-03-20 14:24:11 +01:00
SPRINX0\prochazka 74e47587e2 Merge branch 'master' into feature/postgres-optimalization 2026-03-20 14:23:40 +01:00
SPRINX0\prochazka cb70f3c318 postgres loading optimalization 2026-03-19 12:17:29 +01:00
36 changed files with 1220 additions and 1945 deletions
+1 -1
View File
@@ -47,7 +47,7 @@ jobs:
repository: dbgate/dbgate-pro
token: ${{ secrets.GH_TOKEN }}
path: dbgate-pro
ref: 5baa88d0ad253537298e911868579bae0835888d
ref: 6b5e2ff831db9baedb2a43862daa4247810b15de
- name: Merge dbgate/dbgate-pro
run: |
mkdir ../dbgate-pro
+1 -1
View File
@@ -47,7 +47,7 @@ jobs:
repository: dbgate/dbgate-pro
token: ${{ secrets.GH_TOKEN }}
path: dbgate-pro
ref: 5baa88d0ad253537298e911868579bae0835888d
ref: 6b5e2ff831db9baedb2a43862daa4247810b15de
- name: Merge dbgate/dbgate-pro
run: |
mkdir ../dbgate-pro
+1 -1
View File
@@ -39,7 +39,7 @@ jobs:
repository: dbgate/dbgate-pro
token: ${{ secrets.GH_TOKEN }}
path: dbgate-pro
ref: 5baa88d0ad253537298e911868579bae0835888d
ref: 6b5e2ff831db9baedb2a43862daa4247810b15de
- name: Merge dbgate/dbgate-pro
run: |
mkdir ../dbgate-pro
+1 -1
View File
@@ -44,7 +44,7 @@ jobs:
repository: dbgate/dbgate-pro
token: ${{ secrets.GH_TOKEN }}
path: dbgate-pro
ref: 5baa88d0ad253537298e911868579bae0835888d
ref: 6b5e2ff831db9baedb2a43862daa4247810b15de
- name: Merge dbgate/dbgate-pro
run: |
mkdir ../dbgate-pro
+1 -1
View File
@@ -35,7 +35,7 @@ jobs:
repository: dbgate/dbgate-pro
token: ${{ secrets.GH_TOKEN }}
path: dbgate-pro
ref: 5baa88d0ad253537298e911868579bae0835888d
ref: 6b5e2ff831db9baedb2a43862daa4247810b15de
- name: Merge dbgate/dbgate-pro
run: |
mkdir ../dbgate-pro
+1 -1
View File
@@ -30,7 +30,7 @@ jobs:
repository: dbgate/dbgate-pro
token: ${{ secrets.GH_TOKEN }}
path: dbgate-pro
ref: 5baa88d0ad253537298e911868579bae0835888d
ref: 6b5e2ff831db9baedb2a43862daa4247810b15de
- name: Merge dbgate/dbgate-pro
run: |
mkdir ../dbgate-pro
+34
View File
@@ -23,26 +23,49 @@ jobs:
- uses: actions/checkout@v3
with:
fetch-depth: 1
- name: Checkout dbgate/dbgate-pro
uses: actions/checkout@v2
with:
repository: dbgate/dbgate-pro
token: ${{ secrets.GH_TOKEN }}
path: dbgate-pro
ref: 6b5e2ff831db9baedb2a43862daa4247810b15de
- name: Merge dbgate/dbgate-pro
run: |
mkdir ../dbgate-pro
mv dbgate-pro/* ../dbgate-pro/
cd ..
mkdir dbgate-merged
cd dbgate-pro
cd sync
yarn
node sync.js --nowatch
cd ..
- name: yarn install
run: |
cd ../dbgate-merged
yarn install
- name: Integration tests
run: |
cd ../dbgate-merged
cd integration-tests
yarn test:ci
- name: Filter parser tests
if: always()
run: |
cd ../dbgate-merged
cd packages/filterparser
yarn test:ci
- name: Datalib (perspective) tests
if: always()
run: |
cd ../dbgate-merged
cd packages/datalib
yarn test:ci
- name: Tools tests
if: always()
run: |
cd ../dbgate-merged
cd packages/tools
yarn test:ci
services:
@@ -98,3 +121,14 @@ jobs:
FIREBIRD_USE_LEGACY_AUTH: true
ports:
- '3050:3050'
mongodb:
image: mongo:4.0.12
ports:
- '27017:27017'
volumes:
- mongo-data:/data/db
- mongo-config:/data/configdb
dynamodb:
image: amazon/dynamodb-local
ports:
- '8000:8000'
@@ -0,0 +1,536 @@
const requireEngineDriver = require('dbgate-api/src/utility/requireEngineDriver');
const crypto = require('crypto');
const stream = require('stream');
const { mongoDbEngine, dynamoDbEngine } = require('../engines');
const tableWriter = require('dbgate-api/src/shell/tableWriter');
const tableReader = require('dbgate-api/src/shell/tableReader');
const copyStream = require('dbgate-api/src/shell/copyStream');
function randomCollectionName() {
return 'test_' + crypto.randomBytes(6).toString('hex');
}
const documentEngines = [
{ label: 'MongoDB', engine: mongoDbEngine },
{ label: 'DynamoDB', engine: dynamoDbEngine },
];
async function connectEngine(engine) {
const driver = requireEngineDriver(engine.connection);
const conn = await driver.connect(engine.connection);
return { driver, conn };
}
async function createCollection(driver, conn, collectionName, engine) {
if (engine.connection.engine.startsWith('dynamodb')) {
await driver.operation(conn, {
type: 'createCollection',
collection: {
name: collectionName,
partitionKey: '_id',
partitionKeyType: 'S',
},
});
} else {
await driver.operation(conn, {
type: 'createCollection',
collection: { name: collectionName },
});
}
}
async function dropCollection(driver, conn, collectionName) {
try {
await driver.operation(conn, {
type: 'dropCollection',
collection: collectionName,
});
} catch (e) {
// Ignore errors when dropping (collection may not exist)
}
}
async function insertDocument(driver, conn, collectionName, doc) {
return driver.updateCollection(conn, {
inserts: [{ pureName: collectionName, document: {}, fields: doc }],
updates: [],
deletes: [],
});
}
async function readAll(driver, conn, collectionName) {
return driver.readCollection(conn, { pureName: collectionName, limit: 1000 });
}
async function updateDocument(driver, conn, collectionName, condition, fields) {
return driver.updateCollection(conn, {
inserts: [],
updates: [{ pureName: collectionName, condition, fields }],
deletes: [],
});
}
async function deleteDocument(driver, conn, collectionName, condition) {
return driver.updateCollection(conn, {
inserts: [],
updates: [],
deletes: [{ pureName: collectionName, condition }],
});
}
describe('Collection CRUD', () => {
describe.each(documentEngines.map(e => [e.label, e.engine]))('%s', (label, engine) => {
let driver;
let conn;
let collectionName;
beforeAll(async () => {
const result = await connectEngine(engine);
driver = result.driver;
conn = result.conn;
});
afterAll(async () => {
if (conn) {
await driver.close(conn);
}
});
beforeEach(async () => {
collectionName = randomCollectionName();
await createCollection(driver, conn, collectionName, engine);
});
afterEach(async () => {
await dropCollection(driver, conn, collectionName);
});
// ---- INSERT ----
test('insert a single document', async () => {
const res = await insertDocument(driver, conn, collectionName, {
_id: 'doc1',
name: 'Alice',
age: 30,
});
expect(res.inserted.length).toBe(1);
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(1);
expect(all.rows[0].name).toBe('Alice');
expect(all.rows[0].age).toBe(30);
});
test('insert multiple documents', async () => {
await insertDocument(driver, conn, collectionName, { _id: 'a1', name: 'Alice' });
await insertDocument(driver, conn, collectionName, { _id: 'a2', name: 'Bob' });
await insertDocument(driver, conn, collectionName, { _id: 'a3', name: 'Charlie' });
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(3);
const names = all.rows.map(r => r.name).sort();
expect(names).toEqual(['Alice', 'Bob', 'Charlie']);
});
test('insert document with nested object', async () => {
await insertDocument(driver, conn, collectionName, {
_id: 'nested1',
name: 'Alice',
address: { city: 'Prague', zip: '11000' },
});
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(1);
expect(all.rows[0].address.city).toBe('Prague');
expect(all.rows[0].address.zip).toBe('11000');
});
// ---- READ ----
test('read from empty collection returns no rows', async () => {
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(0);
});
test('read with limit', async () => {
await insertDocument(driver, conn, collectionName, { _id: 'l1', name: 'A' });
await insertDocument(driver, conn, collectionName, { _id: 'l2', name: 'B' });
await insertDocument(driver, conn, collectionName, { _id: 'l3', name: 'C' });
const limited = await driver.readCollection(conn, {
pureName: collectionName,
limit: 2,
});
expect(limited.rows.length).toBe(2);
});
test('count documents', async () => {
await insertDocument(driver, conn, collectionName, { _id: 'c1', name: 'A' });
await insertDocument(driver, conn, collectionName, { _id: 'c2', name: 'B' });
const result = await driver.readCollection(conn, {
pureName: collectionName,
countDocuments: true,
});
expect(result.count).toBe(2);
});
test('count documents on empty collection returns zero', async () => {
const result = await driver.readCollection(conn, {
pureName: collectionName,
countDocuments: true,
});
expect(result.count).toBe(0);
});
// ---- UPDATE ----
test('update an existing document', async () => {
await insertDocument(driver, conn, collectionName, { _id: 'u1', name: 'Alice', age: 25 });
const res = await updateDocument(driver, conn, collectionName, { _id: 'u1' }, { name: 'Alice Updated' });
expect(res.errorMessage).toBeUndefined();
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(1);
expect(all.rows[0].name).toBe('Alice Updated');
});
test('update does not create new document', async () => {
await insertDocument(driver, conn, collectionName, { _id: 'u2', name: 'Bob' });
await updateDocument(driver, conn, collectionName, { _id: 'nonexistent' }, { name: 'Ghost' });
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(1);
expect(all.rows[0].name).toBe('Bob');
});
test('update only specified fields', async () => {
await insertDocument(driver, conn, collectionName, { _id: 'u3', name: 'Carol', age: 40, city: 'London' });
await updateDocument(driver, conn, collectionName, { _id: 'u3' }, { age: 41 });
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(1);
expect(all.rows[0].name).toBe('Carol');
expect(all.rows[0].age).toBe(41);
expect(all.rows[0].city).toBe('London');
});
// ---- DELETE ----
test('delete an existing document', async () => {
await insertDocument(driver, conn, collectionName, { _id: 'd1', name: 'Alice' });
await insertDocument(driver, conn, collectionName, { _id: 'd2', name: 'Bob' });
const res = await deleteDocument(driver, conn, collectionName, { _id: 'd1' });
expect(res.errorMessage).toBeUndefined();
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(1);
expect(all.rows[0].name).toBe('Bob');
});
test('delete non-existing document does not affect collection', async () => {
await insertDocument(driver, conn, collectionName, { _id: 'dx1', name: 'Alice' });
await deleteDocument(driver, conn, collectionName, { _id: 'nonexistent' });
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(1);
expect(all.rows[0].name).toBe('Alice');
});
test('delete all documents leaves empty collection', async () => {
await insertDocument(driver, conn, collectionName, { _id: 'da1', name: 'A' });
await insertDocument(driver, conn, collectionName, { _id: 'da2', name: 'B' });
await deleteDocument(driver, conn, collectionName, { _id: 'da1' });
await deleteDocument(driver, conn, collectionName, { _id: 'da2' });
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(0);
});
// ---- EDGE CASES ----
test('insert and read document with empty string field', async () => {
await insertDocument(driver, conn, collectionName, { _id: 'e1', name: '', value: 'test' });
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(1);
expect(all.rows[0].name).toBe('');
expect(all.rows[0].value).toBe('test');
});
test('insert and read document with numeric values', async () => {
await insertDocument(driver, conn, collectionName, {
_id: 'n1',
intVal: 42,
floatVal: 3.14,
zero: 0,
negative: -10,
});
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(1);
expect(all.rows[0].intVal).toBe(42);
expect(all.rows[0].floatVal).toBeCloseTo(3.14);
expect(all.rows[0].zero).toBe(0);
expect(all.rows[0].negative).toBe(-10);
});
test('insert and read document with boolean values', async () => {
await insertDocument(driver, conn, collectionName, {
_id: 'b1',
active: true,
deleted: false,
});
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(1);
expect(all.rows[0].active).toBe(true);
expect(all.rows[0].deleted).toBe(false);
});
test('reading non-existing collection returns error or empty', async () => {
const result = await driver.readCollection(conn, {
pureName: 'nonexistent_collection_' + crypto.randomBytes(4).toString('hex'),
limit: 10,
});
// Depending on the driver, this may return an error or empty rows
if (result.errorMessage) {
expect(typeof result.errorMessage).toBe('string');
} else {
expect(result.rows.length).toBe(0);
}
});
test('replace full document via update with document field', async () => {
await insertDocument(driver, conn, collectionName, { _id: 'r1', name: 'Original', extra: 'data' });
await driver.updateCollection(conn, {
inserts: [],
updates: [
{
pureName: collectionName,
condition: { _id: 'r1' },
document: { _id: 'r1', name: 'Replaced' },
fields: {},
},
],
deletes: [],
});
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(1);
expect(all.rows[0].name).toBe('Replaced');
});
test('insert then update then delete lifecycle', async () => {
// Insert
await insertDocument(driver, conn, collectionName, { _id: 'life1', name: 'Lifecycle', status: 'created' });
let all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(1);
expect(all.rows[0].status).toBe('created');
// Update
await updateDocument(driver, conn, collectionName, { _id: 'life1' }, { status: 'updated' });
all = await readAll(driver, conn, collectionName);
expect(all.rows[0].status).toBe('updated');
// Delete
await deleteDocument(driver, conn, collectionName, { _id: 'life1' });
all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(0);
});
});
});
function createDocumentImportStream(documents) {
const pass = new stream.PassThrough({ objectMode: true });
pass.write({ __isStreamHeader: true, __isDynamicStructure: true });
for (const doc of documents) {
pass.write(doc);
}
pass.end();
return pass;
}
function createExportStream() {
const writable = new stream.Writable({ objectMode: true });
writable.resultArray = [];
writable._write = (chunk, encoding, callback) => {
writable.resultArray.push(chunk);
callback();
};
return writable;
}
describe('Collection Import/Export', () => {
describe.each(documentEngines.map(e => [e.label, e.engine]))('%s', (label, engine) => {
let driver;
let conn;
let collectionName;
beforeAll(async () => {
const result = await connectEngine(engine);
driver = result.driver;
conn = result.conn;
});
afterAll(async () => {
if (conn) {
await driver.close(conn);
}
});
beforeEach(async () => {
collectionName = randomCollectionName();
await createCollection(driver, conn, collectionName, engine);
});
afterEach(async () => {
await dropCollection(driver, conn, collectionName);
});
test('import documents via stream', async () => {
const documents = [
{ _id: 'imp1', name: 'Alice', age: 30 },
{ _id: 'imp2', name: 'Bob', age: 25 },
{ _id: 'imp3', name: 'Charlie', age: 35 },
];
const reader = createDocumentImportStream(documents);
const writer = await tableWriter({
systemConnection: conn,
driver,
pureName: collectionName,
createIfNotExists: true,
});
await copyStream(reader, writer);
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(3);
const names = all.rows.map(r => r.name).sort();
expect(names).toEqual(['Alice', 'Bob', 'Charlie']);
});
test('export documents via stream', async () => {
await insertDocument(driver, conn, collectionName, { _id: 'exp1', name: 'Alice', city: 'Prague' });
await insertDocument(driver, conn, collectionName, { _id: 'exp2', name: 'Bob', city: 'Vienna' });
await insertDocument(driver, conn, collectionName, { _id: 'exp3', name: 'Charlie', city: 'Berlin' });
const reader = await tableReader({
systemConnection: conn,
driver,
pureName: collectionName,
});
const writer = createExportStream();
await copyStream(reader, writer);
const rows = writer.resultArray.filter(x => !x.__isStreamHeader);
expect(rows.length).toBe(3);
const names = rows.map(r => r.name).sort();
expect(names).toEqual(['Alice', 'Bob', 'Charlie']);
});
test('import then export round-trip', async () => {
const documents = [
{ _id: 'rt1', name: 'Alice', value: 100 },
{ _id: 'rt2', name: 'Bob', value: 200 },
{ _id: 'rt3', name: 'Charlie', value: 300 },
{ _id: 'rt4', name: 'Diana', value: 400 },
];
// Import
const importReader = createDocumentImportStream(documents);
const importWriter = await tableWriter({
systemConnection: conn,
driver,
pureName: collectionName,
createIfNotExists: true,
});
await copyStream(importReader, importWriter);
// Export
const exportReader = await tableReader({
systemConnection: conn,
driver,
pureName: collectionName,
});
const exportWriter = createExportStream();
await copyStream(exportReader, exportWriter);
const rows = exportWriter.resultArray.filter(x => !x.__isStreamHeader);
expect(rows.length).toBe(4);
const sortedRows = rows.sort((a, b) => a._id.localeCompare(b._id));
for (const doc of documents) {
const found = sortedRows.find(r => r._id === doc._id);
expect(found).toBeDefined();
expect(found.name).toBe(doc.name);
expect(found.value).toBe(doc.value);
}
});
test('import documents with nested objects', async () => {
const documents = [
{ _id: 'nest1', name: 'Alice', address: { city: 'Prague', zip: '11000' } },
{ _id: 'nest2', name: 'Bob', address: { city: 'Vienna', zip: '1010' } },
];
const reader = createDocumentImportStream(documents);
const writer = await tableWriter({
systemConnection: conn,
driver,
pureName: collectionName,
createIfNotExists: true,
});
await copyStream(reader, writer);
const all = await readAll(driver, conn, collectionName);
expect(all.rows.length).toBe(2);
const alice = all.rows.find(r => r.name === 'Alice');
expect(alice.address.city).toBe('Prague');
expect(alice.address.zip).toBe('11000');
});
test('import many documents', async () => {
const documents = [];
for (let i = 0; i < 150; i++) {
documents.push({ _id: `many${i}`, name: `Name${i}`, index: i });
}
const reader = createDocumentImportStream(documents);
const writer = await tableWriter({
systemConnection: conn,
driver,
pureName: collectionName,
createIfNotExists: true,
});
await copyStream(reader, writer);
const result = await driver.readCollection(conn, {
pureName: collectionName,
countDocuments: true,
});
expect(result.count).toBe(150);
});
test('export empty collection returns no data rows', async () => {
const reader = await tableReader({
systemConnection: conn,
driver,
pureName: collectionName,
});
const writer = createExportStream();
await copyStream(reader, writer);
const rows = writer.resultArray.filter(x => !x.__isStreamHeader);
expect(rows.length).toBe(0);
});
});
});
+17
View File
@@ -123,5 +123,22 @@ services:
retries: 3
start_period: 40s
mongodb:
image: mongo:4.0.12
restart: always
volumes:
- mongo-data:/data/db
- mongo-config:/data/configdb
ports:
- 27017:27017
dynamodb:
image: amazon/dynamodb-local
restart: always
ports:
- 8000:8000
volumes:
firebird-data:
mongo-data:
mongo-config:
+23
View File
@@ -738,6 +738,27 @@ const firebirdEngine = {
skipDropReferences: true,
};
/** @type {import('dbgate-types').TestEngineInfo} */
const mongoDbEngine = {
label: 'MongoDB',
connection: {
engine: 'mongo@dbgate-plugin-mongo',
server: 'localhost',
port: 27017,
},
};
/** @type {import('dbgate-types').TestEngineInfo} */
const dynamoDbEngine = {
label: 'DynamoDB',
connection: {
engine: 'dynamodb@dbgate-plugin-dynamodb',
server: 'localhost',
port: 8000,
authType: 'onpremise',
},
};
const enginesOnCi = [
// all engines, which would be run on GitHub actions
mysqlEngine,
@@ -788,3 +809,5 @@ module.exports.libsqlFileEngine = libsqlFileEngine;
module.exports.libsqlWsEngine = libsqlWsEngine;
module.exports.duckdbEngine = duckdbEngine;
module.exports.firebirdEngine = firebirdEngine;
module.exports.mongoDbEngine = mongoDbEngine;
module.exports.dynamoDbEngine = dynamoDbEngine;
+4 -2
View File
@@ -1,5 +1,6 @@
const requireEngineDriver = require('dbgate-api/src/utility/requireEngineDriver');
const engines = require('./engines');
const { mongoDbEngine, dynamoDbEngine } = require('./engines');
global.DBGATE_PACKAGES = {
'dbgate-tools': require('dbgate-tools'),
'dbgate-sqltree': require('dbgate-sqltree'),
@@ -9,7 +10,7 @@ global.DBGATE_PACKAGES = {
async function connectEngine(engine) {
const { connection } = engine;
const driver = requireEngineDriver(connection);
for (;;) {
for (; ;) {
try {
const conn = await driver.connect(connection);
await driver.getVersion(conn);
@@ -26,7 +27,8 @@ async function connectEngine(engine) {
async function run() {
await new Promise(resolve => setTimeout(resolve, 10000));
await Promise.all(engines.map(engine => connectEngine(engine)));
const documentEngines = [mongoDbEngine, dynamoDbEngine];
await Promise.all([...engines, ...documentEngines].map(engine => connectEngine(engine)));
}
run();
+1 -1
View File
@@ -1,6 +1,6 @@
{
"private": true,
"version": "7.1.6",
"version": "7.1.7-premium-beta.1",
"name": "dbgate-all",
"workspaces": [
"packages/*",
+55 -1
View File
@@ -492,7 +492,61 @@ module.exports = {
return mask && !platformInfo.allowShellConnection ? maskConnection(res) : encryptConnection(res);
}
const res = await this.datastore.get(conid);
return res || null;
if (res) return res;
// In a forked runner-script child process, ask the parent for connections that may be
// volatile (in-memory only, e.g. ask-for-password). We only do this when
// there really is a parent (process.send exists) to avoid an infinite loop
// when the parent's own getCore falls through here.
// The check is intentionally narrow: only runner scripts pass
// --process-display-name script, so connect/session/ssh-forward subprocesses
// are not affected and continue to return null immediately.
if (process.send && processArgs.processDisplayName === 'script') {
const conn = await new Promise(resolve => {
let resolved = false;
const cleanup = () => {
process.removeListener('message', handler);
process.removeListener('disconnect', onDisconnect);
clearTimeout(timeout);
};
const settle = value => {
if (!resolved) {
resolved = true;
cleanup();
resolve(value);
}
};
const handler = message => {
if (message?.msgtype === 'volatile-connection-response' && message.conid === conid) {
settle(message.conn || null);
}
};
const onDisconnect = () => settle(null);
const timeout = setTimeout(() => settle(null), 5000);
// Don't let the timer alone keep the process alive if all other work is done
timeout.unref();
process.on('message', handler);
process.once('disconnect', onDisconnect);
try {
process.send({ msgtype: 'get-volatile-connection', conid });
} catch {
settle(null);
}
});
if (conn) {
volatileConnections[conn._id] = conn; // cache for subsequent calls
return conn;
}
}
return null;
},
get_meta: true,
+21
View File
@@ -196,6 +196,27 @@ module.exports = {
// @ts-ignore
const { msgtype } = message;
if (handleProcessCommunication(message, subprocess)) return;
if (msgtype === 'get-volatile-connection') {
const connections = require('./connections');
// @ts-ignore
const conid = message.conid;
if (!conid || typeof conid !== 'string') return;
const trySend = payload => {
if (!subprocess.connected) return;
try {
subprocess.send(payload);
} catch {
// child disconnected between the check and the send — ignore
}
};
connections.getCore({ conid }).then(conn => {
trySend({ msgtype: 'volatile-connection-response', conid, conn: conn?.unsaved ? conn : null });
}).catch(err => {
logger.error({ ...extractErrorLogData(err), conid }, 'DBGM-00000 Error resolving volatile connection for child process');
trySend({ msgtype: 'volatile-connection-response', conid, conn: null });
});
return;
}
this[`handle_${msgtype}`](runid, message);
});
return _.pick(newOpened, ['runid']);
+1
View File
@@ -7,6 +7,7 @@ async function runScript(func) {
if (processArgs.checkParent) {
childProcessChecker();
}
try {
await func();
process.exit(0);
+19 -1
View File
@@ -12,6 +12,13 @@ import isPlainObject from 'lodash/isPlainObject';
import md5 from 'blueimp-md5';
export const MAX_GRID_TEXT_LENGTH = 1000; // maximum length of text in grid cell, longer text is truncated
export const MAX_GRID_BINARY_SIZE = 10000; // maximum binary size (base64 chars or byte count) before showing 'too large' in grid cell
function formatByteSize(bytes: number): string {
if (bytes >= 1024 * 1024) return `${(bytes / (1024 * 1024)).toFixed(1)} MB`;
if (bytes >= 1024) return `${(bytes / 1024).toFixed(1)} KB`;
return `${bytes} B`;
}
export type EditorDataType =
| 'null'
@@ -329,6 +336,9 @@ export function stringifyCellValue(
return { value: `${tag}("${uuidStr}")`, gridStyle: 'valueCellStyle' };
}
}
if (intent === 'gridCellIntent' && value.$binary.base64.length > MAX_GRID_BINARY_SIZE) {
return { value: `(Field too large, ${formatByteSize(Math.round(value.$binary.base64.length * 3 / 4))})`, gridStyle: 'nullCellStyle' };
}
return {
value: base64ToHex(value.$binary.base64),
gridStyle: 'valueCellStyle',
@@ -417,6 +427,14 @@ export function stringifyCellValue(
}
}
if (value?.type === 'Buffer' && _isArray(value.data)) {
if (intent === 'gridCellIntent') {
return value.data.length > MAX_GRID_BINARY_SIZE
? { value: `(Field too large, ${formatByteSize(value.data.length)})`, gridStyle: 'nullCellStyle' }
: { value: '0x' + arrayToHexString(value.data), gridStyle: 'valueCellStyle' };
}
}
if (_isArray(value)) {
switch (intent) {
case 'gridCellIntent':
@@ -545,7 +563,7 @@ export function shouldOpenMultilineDialog(value) {
}
export function isJsonLikeLongString(value) {
return _isString(value) && value.length > 100 && value.match(/^\s*\{.*\}\s*$|^\s*\[.*\]\s*$/m);
return _isString(value) && value.length > 100 && value.length <= MAX_GRID_BINARY_SIZE && value.match(/^\s*\{.*\}\s*$|^\s*\[.*\]\s*$/m);
}
export function getIconForRedisType(type) {
+5 -7
View File
@@ -24,7 +24,7 @@
{#if isNative}
<select
value={options.find(x => x.value == value) ? value : defaultValue}
class="{selectClass}"
class={selectClass}
{...$$restProps}
on:change={e => {
dispatch('change', e.target['value']);
@@ -47,7 +47,7 @@
{...$$restProps}
items={options ?? []}
value={isMulti
? _.compact((value && Array.isArray(value)) ? value.map(item => options?.find(x => x.value == item)) : [])
? _.compact(value && Array.isArray(value) ? value.map(item => options?.find(x => x.value == item)) : [])
: (options?.find(x => x.value == value) ?? null)}
on:select={e => {
if (isMulti) {
@@ -69,7 +69,6 @@
</div>
{/if}
<style>
.select {
--border: var(--theme-input-border);
@@ -78,10 +77,10 @@
--background: var(--theme-input-background);
--borderHoverColor: var(--theme-input-border-hover-color);
--borderFocusColor: var(--theme-input-border-focus-color);
--listBackground: var(--theme-input-list-background);
--listBackground: var(--theme-input-background);
--itemActiveBackground: var(--theme-input-item-active-background);
--itemIsActiveBG: var(--theme-input-item-active-background);
--itemHoverBG: var(--theme-input-item-hover-background);
--itemHoverBG: var(--theme-input-multi-clear-hover);
--itemColor: var(--theme-input-item-foreground);
--listEmptyColor: var(--theme-input-background);
--height: 40px;
@@ -95,9 +94,8 @@
--multiClearHoverFill: var(--theme-input-multi-clear-foreground);
--multiItemActiveBG: var(--theme-input-multi-item-background);
--multiItemActiveColor: var(--theme-input-multi-item-foreground);
--multiItemBG: var(--theme-input-multi-item-background);
--multiItemBG: var(--theme-input-multi-clear-background);
--multiItemDisabledHoverBg: var(--theme-input-multi-item-background);
--multiItemDisabledHoverColor: var(--theme-input-multi-item-foreground);
}
</style>
@@ -6,6 +6,7 @@ import { getConnectionInfo } from '../utility/metadataLoaders';
import { findEngineDriver, findObjectLike } from 'dbgate-tools';
import { findFileFormat } from '../plugins/fileformats';
import { getCurrentConfig, getExtensions } from '../stores';
import { getVolatileRemapping } from '../utility/api';
export function getTargetName(extensions, source, values) {
const key = `targetName_${source}`;
@@ -38,6 +39,30 @@ function extractDriverApiParameters(values, direction, driver) {
export function extractShellConnection(connection, database) {
const config = getCurrentConfig();
// Case 1: connection._id is the original ID and a volatile remap exists.
// Use the volatile ID so the backend child process can look up the credentials.
const volatileId = getVolatileRemapping(connection._id);
if (volatileId !== connection._id) {
return {
_id: volatileId,
engine: connection.engine,
database,
};
}
// Case 2: apiCall.transformApiArgs already remapped the conid before the
// connection was fetched, so connection._id IS already the volatile ID and
// connection.unsaved === true. Falling through to allowShellConnection here
// would embed plaintext credentials in the generated script — always use the
// _id reference instead.
if (connection.unsaved) {
return {
_id: connection._id,
engine: connection.engine,
database,
};
}
return config.allowShellConnection
? {
..._.omitBy(
@@ -72,6 +72,8 @@ class Analyser extends DatabaseAnalyser {
...replacements,
$typeAggFunc: this.driver.dialect.stringAgg ? 'string_agg' : 'max',
$typeAggParam: this.driver.dialect.stringAgg ? ", '|'" : '',
$hashColumnAggTail: this.driver.dialect.stringAgg ? ", ',' ORDER BY a.attnum" : '',
$hashConstraintAggTail: this.driver.dialect.stringAgg ? ", ',' ORDER BY con.conname" : '',
$md5Function: this.dialect?.isFipsComplianceOn ? 'LENGTH' : 'MD5',
});
return query;
@@ -83,131 +85,92 @@ class Analyser extends DatabaseAnalyser {
}
async _runAnalysis() {
this.feedback({ analysingMessage: 'DBGM-00241 Loading tables' });
const tables = await this.analyserQuery('tableList', ['tables']);
const useInfoSchema = this.driver.__analyserInternals.useInfoSchemaRoutines;
const routinesQueryName = useInfoSchema ? 'routinesInfoSchema' : 'routines';
const proceduresParametersQueryName = useInfoSchema ? 'proceduresParametersInfoSchema' : 'proceduresParameters';
this.feedback({ analysingMessage: 'DBGM-00242 Loading columns' });
const columns = await this.analyserQuery('columns', ['tables', 'views']);
this.feedback({ analysingMessage: 'DBGM-00243 Loading primary keys' });
const pkColumns = await this.analyserQuery('primaryKeys', ['tables']);
let fkColumns = null;
this.feedback({ analysingMessage: 'DBGM-00244 Loading foreign key constraints' });
// const fk_tableConstraints = await this.analyserQuery('fk_tableConstraints', ['tables']);
this.feedback({ analysingMessage: 'DBGM-00245 Loading foreign key refs' });
const foreignKeys = await this.analyserQuery('foreignKeys', ['tables']);
this.feedback({ analysingMessage: 'DBGM-00246 Loading foreign key columns' });
const fk_keyColumnUsage = await this.analyserQuery('fk_keyColumnUsage', ['tables']);
// const cntKey = x => `${x.constraint_name}|${x.constraint_schema}`;
const fkRows = [];
// const fkConstraintDct = _.keyBy(fk_tableConstraints.rows, cntKey);
for (const fkRef of foreignKeys.rows) {
// const cntBase = fkConstraintDct[cntKey(fkRef)];
// const cntRef = fkConstraintDct[`${fkRef.unique_constraint_name}|${fkRef.unique_constraint_schema}`];
// if (!cntBase || !cntRef) continue;
const baseCols = _.sortBy(
fk_keyColumnUsage.rows.filter(
x =>
x.table_name == fkRef.table_name &&
x.constraint_name == fkRef.constraint_name &&
x.table_schema == fkRef.table_schema
),
'ordinal_position'
);
const refCols = _.sortBy(
fk_keyColumnUsage.rows.filter(
x =>
x.table_name == fkRef.ref_table_name &&
x.constraint_name == fkRef.unique_constraint_name &&
x.table_schema == fkRef.ref_table_schema
),
'ordinal_position'
);
if (baseCols.length != refCols.length) continue;
for (let i = 0; i < baseCols.length; i++) {
const baseCol = baseCols[i];
const refCol = refCols[i];
fkRows.push({
...fkRef,
pure_name: fkRef.table_name,
schema_name: fkRef.table_schema,
ref_table_name: fkRef.ref_table_name,
ref_schema_name: fkRef.ref_table_schema,
column_name: baseCol.column_name,
ref_column_name: refCol.column_name,
update_action: fkRef.update_action,
delete_action: fkRef.delete_action,
});
}
}
fkColumns = { rows: fkRows };
this.feedback({ analysingMessage: 'DBGM-00247 Loading views' });
const views = await this.analyserQuery('views', ['views']);
this.feedback({ analysingMessage: 'DBGM-00248 Loading materialized views' });
const matviews = this.driver.dialect.materializedViews ? await this.analyserQuery('matviews', ['matviews']) : null;
this.feedback({ analysingMessage: 'DBGM-00249 Loading materialized view columns' });
const matviewColumns = this.driver.dialect.materializedViews
? await this.analyserQuery('matviewColumns', ['matviews'])
: null;
this.feedback({ analysingMessage: 'DBGM-00250 Loading routines' });
const routines = await this.analyserQuery('routines', ['procedures', 'functions']);
this.feedback({ analysingMessage: 'DBGM-00251 Loading routine parameters' });
const routineParametersRows = await this.analyserQuery('proceduresParameters');
this.feedback({ analysingMessage: 'DBGM-00252 Loading indexes' });
const indexes = this.driver.__analyserInternals.skipIndexes
? { rows: [] }
: await this.analyserQuery('indexes', ['tables']);
this.feedback({ analysingMessage: 'DBGM-00253 Loading index columns' });
const indexcols = this.driver.__analyserInternals.skipIndexes
? { rows: [] }
: await this.analyserQuery('indexcols', ['tables']);
this.feedback({ analysingMessage: 'DBGM-00254 Loading unique names' });
const uniqueNames = await this.analyserQuery('uniqueNames', ['tables']);
// Run all independent queries in parallel
this.feedback({ analysingMessage: 'DBGM-00241 Loading database structure' });
const [
tables,
views,
columns,
pkColumns,
foreignKeys,
uniqueNames,
routines,
routineParametersRows,
indexes,
indexcols,
matviews,
matviewColumns,
triggers,
] = await Promise.all([
this.analyserQuery('tableList', ['tables']),
this.analyserQuery('views', ['views']),
this.analyserQuery('columns', ['tables', 'views']),
this.analyserQuery('primaryKeys', ['tables']),
this.analyserQuery('foreignKeys', ['tables']),
this.analyserQuery('uniqueNames', ['tables']),
this.analyserQuery(routinesQueryName, ['procedures', 'functions']),
this.analyserQuery(proceduresParametersQueryName),
this.driver.__analyserInternals.skipIndexes
? Promise.resolve({ rows: [] })
: this.analyserQuery('indexes', ['tables']),
this.driver.__analyserInternals.skipIndexes
? Promise.resolve({ rows: [] })
: this.analyserQuery('indexcols', ['tables']),
this.driver.dialect.materializedViews
? this.analyserQuery('matviews', ['matviews'])
: Promise.resolve(null),
this.driver.dialect.materializedViews
? this.analyserQuery('matviewColumns', ['matviews'])
: Promise.resolve(null),
this.analyserQuery('triggers'),
]);
// Load geometry/geography columns if the views exist (these are rare, so run after views are loaded)
let geometryColumns = { rows: [] };
if (views.rows.find(x => x.pure_name == 'geometry_columns' && x.schema_name == 'public')) {
this.feedback({ analysingMessage: 'DBGM-00255 Loading geometry columns' });
geometryColumns = await this.analyserQuery('geometryColumns', ['tables']);
}
let geographyColumns = { rows: [] };
if (views.rows.find(x => x.pure_name == 'geography_columns' && x.schema_name == 'public')) {
this.feedback({ analysingMessage: 'DBGM-00256 Loading geography columns' });
geographyColumns = await this.analyserQuery('geographyColumns', ['tables']);
const hasGeometry = views.rows.find(x => x.pure_name == 'geometry_columns' && x.schema_name == 'public');
const hasGeography = views.rows.find(x => x.pure_name == 'geography_columns' && x.schema_name == 'public');
if (hasGeometry || hasGeography) {
const [geomCols, geogCols] = await Promise.all([
hasGeometry
? this.analyserQuery('geometryColumns', ['tables'])
: Promise.resolve({ rows: [] }),
hasGeography
? this.analyserQuery('geographyColumns', ['tables'])
: Promise.resolve({ rows: [] }),
]);
geometryColumns = geomCols;
geographyColumns = geogCols;
}
this.feedback({ analysingMessage: 'DBGM-00257 Loading triggers' });
const triggers = await this.analyserQuery('triggers');
this.feedback({ analysingMessage: 'DBGM-00258 Finalizing DB structure' });
const columnColumnsMapped = fkColumns.rows.map(x => ({
pureName: x.pure_name,
schemaName: x.schema_name,
constraintSchema: x.constraint_schema,
// Pre-build lookup maps for O(1) access instead of O(n) scanning per table/view
const columnsByTable = _.groupBy(columns.rows, x => `${x.schema_name}.${x.pure_name}`);
const indexcolsByOidAttnum = _.keyBy(indexcols.rows, x => `${x.oid}_${x.attnum}`);
const uniqueNameSet = new Set(uniqueNames.rows.map(x => x.constraint_name));
const indexesByTable = _.groupBy(indexes.rows, x => `${x.schema_name}.${x.table_name}`);
const matviewColumnsByTable = matviewColumns
? _.groupBy(matviewColumns.rows, x => `${x.schema_name}.${x.pure_name}`)
: {};
const columnColumnsMapped = foreignKeys.rows.map(x => ({
pureName: x.table_name,
schemaName: x.table_schema,
constraintName: x.constraint_name,
columnName: x.column_name,
refColumnName: x.ref_column_name,
updateAction: x.update_action,
deleteAction: x.delete_action,
refTableName: x.ref_table_name,
refSchemaName: x.ref_schema_name,
refSchemaName: x.ref_table_schema,
}));
const fkByTable = _.groupBy(columnColumnsMapped, x => `${x.schemaName}.${x.pureName}`);
const pkColumnsMapped = pkColumns.rows.map(x => ({
pureName: x.pure_name,
schemaName: x.schema_name,
@@ -215,6 +178,7 @@ class Analyser extends DatabaseAnalyser {
constraintName: x.constraint_name,
columnName: x.column_name,
}));
const pkByTable = _.groupBy(pkColumnsMapped, x => `${x.schemaName}.${x.pureName}`);
const procedureParameters = routineParametersRows.rows
.filter(i => i.routine_type == 'PROCEDURE')
@@ -252,6 +216,7 @@ class Analyser extends DatabaseAnalyser {
const res = {
tables: tables.rows.map(table => {
const tableKey = `${table.schema_name}.${table.pure_name}`;
const newTable = {
pureName: table.pure_name,
schemaName: table.schema_name,
@@ -259,20 +224,16 @@ class Analyser extends DatabaseAnalyser {
objectId: `tables:${table.schema_name}.${table.pure_name}`,
contentHash: table.hash_code_columns ? `${table.hash_code_columns}-${table.hash_code_constraints}` : null,
};
const tableIndexes = indexesByTable[tableKey] || [];
return {
...newTable,
columns: columns.rows
.filter(col => col.pure_name == table.pure_name && col.schema_name == table.schema_name)
.map(col => getColumnInfo(col, newTable, geometryColumns, geographyColumns)),
primaryKey: DatabaseAnalyser.extractPrimaryKeys(newTable, pkColumnsMapped),
foreignKeys: DatabaseAnalyser.extractForeignKeys(newTable, columnColumnsMapped),
indexes: indexes.rows
.filter(
x =>
x.table_name == table.pure_name &&
x.schema_name == table.schema_name &&
!uniqueNames.rows.find(y => y.constraint_name == x.index_name)
)
columns: (columnsByTable[tableKey] || []).map(col =>
getColumnInfo(col, newTable, geometryColumns, geographyColumns)
),
primaryKey: DatabaseAnalyser.extractPrimaryKeys(newTable, pkByTable[tableKey] || []),
foreignKeys: DatabaseAnalyser.extractForeignKeys(newTable, fkByTable[tableKey] || []),
indexes: tableIndexes
.filter(x => !uniqueNameSet.has(x.index_name))
.map(idx => {
const indOptionSplit = idx.indoption.split(' ');
return {
@@ -281,7 +242,7 @@ class Analyser extends DatabaseAnalyser {
columns: _.compact(
idx.indkey
.split(' ')
.map(colid => indexcols.rows.find(col => col.oid == idx.oid && col.attnum == colid))
.map(colid => indexcolsByOidAttnum[`${idx.oid}_${colid}`])
.filter(col => col != null)
.map((col, colIndex) => ({
columnName: col.column_name,
@@ -290,19 +251,14 @@ class Analyser extends DatabaseAnalyser {
),
};
}),
uniques: indexes.rows
.filter(
x =>
x.table_name == table.pure_name &&
x.schema_name == table.schema_name &&
uniqueNames.rows.find(y => y.constraint_name == x.index_name)
)
uniques: tableIndexes
.filter(x => uniqueNameSet.has(x.index_name))
.map(idx => ({
constraintName: idx.index_name,
columns: _.compact(
idx.indkey
.split(' ')
.map(colid => indexcols.rows.find(col => col.oid == idx.oid && col.attnum == colid))
.map(colid => indexcolsByOidAttnum[`${idx.oid}_${colid}`])
.filter(col => col != null)
.map(col => ({
columnName: col.column_name,
@@ -317,9 +273,7 @@ class Analyser extends DatabaseAnalyser {
schemaName: view.schema_name,
contentHash: view.hash_code,
createSql: `CREATE VIEW "${view.schema_name}"."${view.pure_name}"\nAS\n${view.create_sql}`,
columns: columns.rows
.filter(col => col.pure_name == view.pure_name && col.schema_name == view.schema_name)
.map(col => getColumnInfo(col)),
columns: (columnsByTable[`${view.schema_name}.${view.pure_name}`] || []).map(col => getColumnInfo(col)),
})),
matviews: matviews
? matviews.rows.map(matview => ({
@@ -328,8 +282,7 @@ class Analyser extends DatabaseAnalyser {
schemaName: matview.schema_name,
contentHash: matview.hash_code,
createSql: `CREATE MATERIALIZED VIEW "${matview.schema_name}"."${matview.pure_name}"\nAS\n${matview.definition}`,
columns: matviewColumns.rows
.filter(col => col.pure_name == matview.pure_name && col.schema_name == matview.schema_name)
columns: (matviewColumnsByTable[`${matview.schema_name}.${matview.pure_name}`] || [])
.map(col => getColumnInfo(col)),
}))
: undefined,
@@ -396,14 +349,31 @@ class Analyser extends DatabaseAnalyser {
}
async _getFastSnapshot() {
const viewModificationsQueryData = await this.analyserQuery('viewModifications');
const matviewModificationsQueryData = this.driver.dialect.materializedViews
? await this.analyserQuery('matviewModifications')
: null;
const routineModificationsQueryData = await this.analyserQuery('routineModifications');
const useInfoSchema = this.driver.__analyserInternals.useInfoSchemaRoutines;
const routineModificationsQueryName = useInfoSchema ? 'routineModificationsInfoSchema' : 'routineModifications';
// Run all modification queries in parallel
const [
tableModificationsQueryData,
viewModificationsQueryData,
matviewModificationsQueryData,
routineModificationsQueryData,
] = await Promise.all([
this.analyserQuery('tableModifications'),
this.analyserQuery('viewModifications'),
this.driver.dialect.materializedViews
? this.analyserQuery('matviewModifications')
: Promise.resolve(null),
this.analyserQuery(routineModificationsQueryName),
]);
return {
tables: null,
tables: tableModificationsQueryData.rows.map(x => ({
objectId: `tables:${x.schema_name}.${x.pure_name}`,
pureName: x.pure_name,
schemaName: x.schema_name,
contentHash: `${x.hash_code_columns}-${x.hash_code_constraints}`,
})),
views: viewModificationsQueryData.rows.map(x => ({
objectId: `views:${x.schema_name}.${x.pure_name}`,
pureName: x.pure_name,
@@ -1,22 +1,38 @@
module.exports = `
select
table_schema as "schema_name",
table_name as "pure_name",
column_name as "column_name",
is_nullable as "is_nullable",
data_type as "data_type",
character_maximum_length as "char_max_length",
numeric_precision as "numeric_precision",
numeric_scale as "numeric_scale",
column_default as "default_value"
from information_schema.columns
where
table_schema !~ '^_timescaledb_'
and (
('tables:' || table_schema || '.' || table_name) =OBJECT_ID_CONDITION
or
('views:' || table_schema || '.' || table_name) =OBJECT_ID_CONDITION
)
and table_schema =SCHEMA_NAME_CONDITION
order by ordinal_position
SELECT
n.nspname AS "schema_name",
c.relname AS "pure_name",
a.attname AS "column_name",
CASE WHEN a.attnotnull THEN 'NO' ELSE 'YES' END AS "is_nullable",
format_type(a.atttypid, NULL) AS "data_type",
CASE
WHEN a.atttypmod > 0 AND t.typname IN ('varchar', 'bpchar', 'char') THEN a.atttypmod - 4
WHEN a.atttypmod > 0 AND t.typname IN ('bit', 'varbit') THEN a.atttypmod
ELSE NULL
END AS "char_max_length",
CASE
WHEN a.atttypmod > 0 AND t.typname = 'numeric' THEN ((a.atttypmod - 4) >> 16) & 65535
ELSE NULL
END AS "numeric_precision",
CASE
WHEN a.atttypmod > 0 AND t.typname = 'numeric' THEN (a.atttypmod - 4) & 65535
ELSE NULL
END AS "numeric_scale",
pg_get_expr(d.adbin, d.adrelid) AS "default_value"
FROM pg_catalog.pg_attribute a
JOIN pg_catalog.pg_class c ON c.oid = a.attrelid
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
JOIN pg_catalog.pg_type t ON t.oid = a.atttypid
LEFT JOIN pg_catalog.pg_attrdef d ON d.adrelid = a.attrelid AND d.adnum = a.attnum
WHERE a.attnum > 0
AND NOT a.attisdropped
AND c.relkind IN ('r', 'v', 'p', 'f')
AND n.nspname !~ '^_timescaledb_'
AND (
('tables:' || n.nspname || '.' || c.relname) =OBJECT_ID_CONDITION
OR
('views:' || n.nspname || '.' || c.relname) =OBJECT_ID_CONDITION
)
AND n.nspname =SCHEMA_NAME_CONDITION
ORDER BY a.attnum
`;
@@ -5,7 +5,8 @@ SELECT
con.conname AS constraint_name,
nsp2.nspname AS ref_table_schema,
rel2.relname AS ref_table_name,
conpk.conname AS unique_constraint_name,
att.attname AS column_name,
att2.attname AS ref_column_name,
CASE con.confupdtype
WHEN 'a' THEN 'NO ACTION'
WHEN 'r' THEN 'RESTRICT'
@@ -13,26 +14,26 @@ SELECT
WHEN 'n' THEN 'SET NULL'
WHEN 'd' THEN 'SET DEFAULT'
ELSE con.confupdtype::text
END AS update_action,
CASE con.confdeltype
END AS update_action,
CASE con.confdeltype
WHEN 'a' THEN 'NO ACTION'
WHEN 'r' THEN 'RESTRICT'
WHEN 'c' THEN 'CASCADE'
WHEN 'n' THEN 'SET NULL'
WHEN 'd' THEN 'SET DEFAULT'
ELSE con.confdeltype::text
END AS delete_action
END AS delete_action
FROM pg_constraint con
JOIN pg_class rel ON rel.oid = con.conrelid
JOIN pg_namespace nsp ON nsp.oid = rel.relnamespace
JOIN pg_class rel2 ON rel2.oid = con.confrelid
JOIN pg_namespace nsp2 ON nsp2.oid = rel2.relnamespace
JOIN pg_constraint conpk
ON conpk.conrelid = con.confrelid
AND conpk.conkey = con.confkey
AND conpk.contype IN ('p','u') -- 'p' = primary key, 'u' = unique constraint
WHERE con.contype = 'f' AND ('tables:' || nsp.nspname || '.' || rel.relname) =OBJECT_ID_CONDITION AND nsp.nspname =SCHEMA_NAME_CONDITION
JOIN LATERAL unnest(con.conkey, con.confkey) WITH ORDINALITY AS cols(attnum, ref_attnum, ordinal_position) ON TRUE
JOIN pg_attribute att ON att.attrelid = con.conrelid AND att.attnum = cols.attnum
JOIN pg_attribute att2 ON att2.attrelid = con.confrelid AND att2.attnum = cols.ref_attnum
WHERE con.contype = 'f'
AND ('tables:' || nsp.nspname || '.' || rel.relname) =OBJECT_ID_CONDITION
AND nsp.nspname =SCHEMA_NAME_CONDITION
ORDER BY con.conname, cols.ordinal_position
;
`;
@@ -19,15 +19,16 @@ const triggers = require('./triggers');
const listDatabases = require('./listDatabases');
const listVariables = require('./listVariables');
const listProcesses = require('./listProcesses');
const fk_keyColumnUsage = require('./fk_key_column_usage');
const routinesInfoSchema = require('./routinesInfoSchema');
const proceduresParametersInfoSchema = require('./proceduresParametersInfoSchema');
const routineModificationsInfoSchema = require('./routineModificationsInfoSchema');
const tableModifications = require('./tableModifications');
module.exports = {
columns,
tableList,
viewModifications,
primaryKeys,
fk_keyColumnUsage,
foreignKeys,
views,
routines,
@@ -45,4 +46,8 @@ module.exports = {
listDatabases,
listVariables,
listProcesses,
routinesInfoSchema,
proceduresParametersInfoSchema,
routineModificationsInfoSchema,
tableModifications,
};
@@ -1,31 +1,34 @@
module.exports = `
SELECT
proc.specific_schema AS schema_name,
proc.routine_name AS pure_name,
proc.routine_type as routine_type,
args.parameter_name AS parameter_name,
args.parameter_mode,
args.data_type AS data_type,
args.ordinal_position AS parameter_index,
args.parameter_mode AS parameter_mode
FROM
information_schema.routines proc
LEFT JOIN
information_schema.parameters args
ON proc.specific_schema = args.specific_schema
AND proc.specific_name = args.specific_name
WHERE
proc.specific_schema NOT IN ('pg_catalog', 'information_schema') -- Exclude system schemas
AND args.parameter_name IS NOT NULL
AND proc.routine_type IN ('PROCEDURE', 'FUNCTION') -- Filter for procedures
AND proc.specific_schema !~ '^_timescaledb_'
AND proc.specific_schema =SCHEMA_NAME_CONDITION
SELECT
n.nspname AS "schema_name",
p.proname AS "pure_name",
CASE p.prokind WHEN 'p' THEN 'PROCEDURE' ELSE 'FUNCTION' END AS "routine_type",
a.parameter_name AS "parameter_name",
CASE (p.proargmodes::text[])[a.ordinal_position]
WHEN 'o' THEN 'OUT'
WHEN 'b' THEN 'INOUT'
WHEN 'v' THEN 'VARIADIC'
WHEN 't' THEN 'TABLE'
ELSE 'IN'
END AS "parameter_mode",
pg_catalog.format_type(a.parameter_type, NULL) AS "data_type",
a.ordinal_position AS "parameter_index"
FROM pg_catalog.pg_proc p
JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
CROSS JOIN LATERAL unnest(
COALESCE(p.proallargtypes, p.proargtypes::oid[]),
p.proargnames
) WITH ORDINALITY AS a(parameter_type, parameter_name, ordinal_position)
WHERE p.prokind IN ('f', 'p')
AND p.proargnames IS NOT NULL
AND a.parameter_name IS NOT NULL
AND n.nspname !~ '^_timescaledb_'
AND n.nspname NOT IN ('pg_catalog', 'information_schema')
AND n.nspname =SCHEMA_NAME_CONDITION
AND (
(routine_type = 'PROCEDURE' AND ('procedures:' || proc.specific_schema || '.' || routine_name) =OBJECT_ID_CONDITION)
OR
(routine_type = 'FUNCTION' AND ('functions:' || proc.specific_schema || '.' || routine_name) =OBJECT_ID_CONDITION)
(p.prokind = 'p' AND ('procedures:' || n.nspname || '.' || p.proname) =OBJECT_ID_CONDITION)
OR
(p.prokind != 'p' AND ('functions:' || n.nspname || '.' || p.proname) =OBJECT_ID_CONDITION)
)
ORDER BY
schema_name,
args.ordinal_position;
ORDER BY n.nspname, a.ordinal_position
`;
@@ -0,0 +1,31 @@
module.exports = `
SELECT
proc.specific_schema AS schema_name,
proc.routine_name AS pure_name,
proc.routine_type as routine_type,
args.parameter_name AS parameter_name,
args.parameter_mode,
args.data_type AS data_type,
args.ordinal_position AS parameter_index,
args.parameter_mode AS parameter_mode
FROM
information_schema.routines proc
LEFT JOIN
information_schema.parameters args
ON proc.specific_schema = args.specific_schema
AND proc.specific_name = args.specific_name
WHERE
proc.specific_schema NOT IN ('pg_catalog', 'information_schema')
AND args.parameter_name IS NOT NULL
AND proc.routine_type IN ('PROCEDURE', 'FUNCTION')
AND proc.specific_schema !~ '^_timescaledb_'
AND proc.specific_schema =SCHEMA_NAME_CONDITION
AND (
(routine_type = 'PROCEDURE' AND ('procedures:' || proc.specific_schema || '.' || routine_name) =OBJECT_ID_CONDITION)
OR
(routine_type = 'FUNCTION' AND ('functions:' || proc.specific_schema || '.' || routine_name) =OBJECT_ID_CONDITION)
)
ORDER BY
schema_name,
args.ordinal_position;
`;
@@ -1,10 +1,13 @@
module.exports = `
select
routine_name as "pure_name",
routine_schema as "schema_name",
$md5Function(routine_definition) as "hash_code",
routine_type as "object_type"
from
information_schema.routines where routine_schema !~ '^_timescaledb_'
and routine_type in ('PROCEDURE', 'FUNCTION') and routine_schema =SCHEMA_NAME_CONDITION
SELECT
p.proname AS "pure_name",
n.nspname AS "schema_name",
$md5Function(p.prosrc) AS "hash_code",
CASE p.prokind WHEN 'p' THEN 'PROCEDURE' ELSE 'FUNCTION' END AS "object_type"
FROM pg_catalog.pg_proc p
JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
WHERE p.prokind IN ('f', 'p')
AND n.nspname !~ '^_timescaledb_'
AND n.nspname NOT IN ('pg_catalog', 'information_schema')
AND n.nspname =SCHEMA_NAME_CONDITION
`;
@@ -0,0 +1,10 @@
module.exports = `
select
routine_name as "pure_name",
routine_schema as "schema_name",
$md5Function(routine_definition) as "hash_code",
routine_type as "object_type"
from
information_schema.routines where routine_schema !~ '^_timescaledb_'
and routine_type in ('PROCEDURE', 'FUNCTION') and routine_schema =SCHEMA_NAME_CONDITION
`;
@@ -1,19 +1,23 @@
module.exports = `
select
routine_name as "pure_name",
routine_schema as "schema_name",
max(routine_definition) as "definition",
max($md5Function(routine_definition)) as "hash_code",
routine_type as "object_type",
$typeAggFunc(data_type $typeAggParam) as "data_type",
max(external_language) as "language"
from
information_schema.routines where routine_schema !~ '^_timescaledb_'
and routine_schema =SCHEMA_NAME_CONDITION
and (
(routine_type = 'PROCEDURE' and ('procedures:' || routine_schema || '.' || routine_name) =OBJECT_ID_CONDITION)
or
(routine_type = 'FUNCTION' and ('functions:' || routine_schema || '.' || routine_name) =OBJECT_ID_CONDITION)
)
group by routine_name, routine_schema, routine_type
SELECT
p.proname AS "pure_name",
n.nspname AS "schema_name",
max(p.prosrc) AS "definition",
max($md5Function(p.prosrc)) AS "hash_code",
CASE max(p.prokind) WHEN 'p' THEN 'PROCEDURE' ELSE 'FUNCTION' END AS "object_type",
$typeAggFunc(pg_catalog.format_type(p.prorettype, NULL) $typeAggParam) AS "data_type",
max(l.lanname) AS "language"
FROM pg_catalog.pg_proc p
JOIN pg_catalog.pg_namespace n ON n.oid = p.pronamespace
JOIN pg_catalog.pg_language l ON l.oid = p.prolang
WHERE p.prokind IN ('f', 'p')
AND n.nspname !~ '^_timescaledb_'
AND n.nspname NOT IN ('pg_catalog', 'information_schema')
AND n.nspname =SCHEMA_NAME_CONDITION
AND (
(p.prokind = 'p' AND ('procedures:' || n.nspname || '.' || p.proname) =OBJECT_ID_CONDITION)
OR
(p.prokind != 'p' AND ('functions:' || n.nspname || '.' || p.proname) =OBJECT_ID_CONDITION)
)
GROUP BY p.proname, n.nspname, p.prokind
`;
@@ -0,0 +1,19 @@
module.exports = `
select
routine_name as "pure_name",
routine_schema as "schema_name",
max(routine_definition) as "definition",
max($md5Function(routine_definition)) as "hash_code",
routine_type as "object_type",
$typeAggFunc(data_type $typeAggParam) as "data_type",
max(external_language) as "language"
from
information_schema.routines where routine_schema !~ '^_timescaledb_'
and routine_schema =SCHEMA_NAME_CONDITION
and (
(routine_type = 'PROCEDURE' and ('procedures:' || routine_schema || '.' || routine_name) =OBJECT_ID_CONDITION)
or
(routine_type = 'FUNCTION' and ('functions:' || routine_schema || '.' || routine_name) =OBJECT_ID_CONDITION)
)
group by routine_name, routine_schema, routine_type
`;
@@ -1,10 +1,35 @@
module.exports = `
select infoTables.table_schema as "schema_name", infoTables.table_name as "pure_name",
pg_relation_size('"'||infoTables.table_schema||'"."'||infoTables.table_name||'"') as "size_bytes"
from information_schema.tables infoTables
where infoTables.table_type not like '%VIEW%'
and ('tables:' || infoTables.table_schema || '.' || infoTables.table_name) =OBJECT_ID_CONDITION
and infoTables.table_schema <> 'pg_internal'
and infoTables.table_schema !~ '^_timescaledb_'
and infoTables.table_schema =SCHEMA_NAME_CONDITION
SELECT
n.nspname AS "schema_name",
c.relname AS "pure_name",
pg_relation_size(c.oid) AS "size_bytes",
$md5Function(
COALESCE(
(SELECT $typeAggFunc(
a.attname || ':' || pg_catalog.format_type(a.atttypid, a.atttypmod) || ':' || a.attnotnull::text
$hashColumnAggTail
)
FROM pg_catalog.pg_attribute a
WHERE a.attrelid = c.oid AND a.attnum > 0 AND NOT a.attisdropped),
''
)
) AS "hash_code_columns",
$md5Function(
COALESCE(
(SELECT $typeAggFunc(
con.conname || ':' || con.contype::text
$hashConstraintAggTail
)
FROM pg_catalog.pg_constraint con
WHERE con.conrelid = c.oid),
''
)
) AS "hash_code_constraints"
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'p', 'f')
AND ('tables:' || n.nspname || '.' || c.relname) =OBJECT_ID_CONDITION
AND n.nspname <> 'pg_internal'
AND n.nspname !~ '^_timescaledb_'
AND n.nspname =SCHEMA_NAME_CONDITION
`;
@@ -0,0 +1,34 @@
module.exports = `
SELECT
n.nspname AS "schema_name",
c.relname AS "pure_name",
$md5Function(
COALESCE(
(SELECT $typeAggFunc(
a.attname || ':' || pg_catalog.format_type(a.atttypid, a.atttypmod) || ':' || a.attnotnull::text
$hashColumnAggTail
)
FROM pg_catalog.pg_attribute a
WHERE a.attrelid = c.oid AND a.attnum > 0 AND NOT a.attisdropped),
''
)
) AS "hash_code_columns",
$md5Function(
COALESCE(
(SELECT $typeAggFunc(
con.conname || ':' || con.contype::text
$hashConstraintAggTail
)
FROM pg_catalog.pg_constraint con
WHERE con.conrelid = c.oid),
''
)
) AS "hash_code_constraints"
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r', 'p', 'f')
AND n.nspname <> 'pg_internal'
AND n.nspname !~ '^_timescaledb_'
AND n.nspname NOT IN ('pg_catalog', 'information_schema')
AND n.nspname =SCHEMA_NAME_CONDITION
`;
@@ -1,8 +1,13 @@
module.exports = `
select
table_name as "pure_name",
table_schema as "schema_name",
$md5Function(view_definition) as "hash_code"
from
information_schema.views where table_schema != 'information_schema' and table_schema != 'pg_catalog' and table_schema !~ '^_timescaledb_' and table_schema =SCHEMA_NAME_CONDITION
SELECT
c.relname AS "pure_name",
n.nspname AS "schema_name",
$md5Function(pg_get_viewdef(c.oid, true)) AS "hash_code"
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind = 'v'
AND n.nspname != 'information_schema'
AND n.nspname != 'pg_catalog'
AND n.nspname !~ '^_timescaledb_'
AND n.nspname =SCHEMA_NAME_CONDITION
`;
@@ -1,11 +1,20 @@
module.exports = `
select
table_name as "pure_name",
table_schema as "schema_name",
view_definition as "create_sql",
$md5Function(view_definition) as "hash_code"
from
information_schema.views
where table_schema !~ '^_timescaledb_' and table_schema =SCHEMA_NAME_CONDITION
and ('views:' || table_schema || '.' || table_name) =OBJECT_ID_CONDITION
WITH view_defs AS (
SELECT
c.relname AS pure_name,
n.nspname AS schema_name,
pg_get_viewdef(c.oid, true) AS viewdef
FROM pg_catalog.pg_class c
JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind = 'v'
AND n.nspname !~ '^_timescaledb_'
AND n.nspname =SCHEMA_NAME_CONDITION
AND ('views:' || n.nspname || '.' || c.relname) =OBJECT_ID_CONDITION
)
SELECT
pure_name AS "pure_name",
schema_name AS "schema_name",
viewdef AS "create_sql",
$md5Function(viewdef) AS "hash_code"
FROM view_defs
`;
@@ -418,6 +418,7 @@ const redshiftDriver = {
},
__analyserInternals: {
skipIndexes: true,
useInfoSchemaRoutines: true,
},
engine: 'redshift@dbgate-plugin-postgres',
title: 'Amazon Redshift',
+1 -1
View File
@@ -7,7 +7,7 @@ checkout-and-merge-pro:
repository: dbgate/dbgate-pro
token: ${{ secrets.GH_TOKEN }}
path: dbgate-pro
ref: 5baa88d0ad253537298e911868579bae0835888d
ref: 6b5e2ff831db9baedb2a43862daa4247810b15de
- name: Merge dbgate/dbgate-pro
run: |
mkdir ../dbgate-pro
+20
View File
@@ -26,30 +26,37 @@ jobs:
with:
fetch-depth: 1
- _include: checkout-and-merge-pro
- name: yarn install
run: |
cd ../dbgate-merged
yarn install
- name: Integration tests
run: |
cd ../dbgate-merged
cd integration-tests
yarn test:ci
- name: Filter parser tests
if: always()
run: |
cd ../dbgate-merged
cd packages/filterparser
yarn test:ci
- name: Datalib (perspective) tests
if: always()
run: |
cd ../dbgate-merged
cd packages/datalib
yarn test:ci
- name: Tools tests
if: always()
run: |
cd ../dbgate-merged
cd packages/tools
yarn test:ci
@@ -138,3 +145,16 @@ jobs:
FIREBIRD_USE_LEGACY_AUTH: true
ports:
- '3050:3050'
mongodb:
image: mongo:4.0.12
ports:
- '27017:27017'
volumes:
- mongo-data:/data/db
- mongo-config:/data/configdb
dynamodb:
image: amazon/dynamodb-local
ports:
- '8000:8000'
+65 -1675
View File
File diff suppressed because it is too large Load Diff