A fully tested Apache Cassandra CQL query builder with support for the DataStax NodeJS driver, written in the spirit of Knex for CQL 3.x.
npm install cassanknex
CQL was purposefully designed to be SQL-esq to enhance ease of access for those familiar w/ relational databases while Knex is the canonical NodeJS query builder for SQL dialects; however, even given the lexical similarities, the difference between the usage of CQL vs SQL is significant enough that adding CQL as yet another Knex SQL dialect does not make sense. Thus, CassanKnex.
CassanKnex can be used to execute queries against a Cassandra cluster via cassandra-driver
(the official DataStax NodeJS driver) or as a simple CQL statement generator via the following relative instantiations:
Compiled CQL statements can be retrieved at any time via the cql
method.
var cassanKnex = require("cassanknex")(<DRIVER_OPTIONS|undefined>);
var qb = cassanKnex(KEYSPACE).QUERY_COMMAND()
.QUERY_MODIFIER_1()
.
.
.QUERY_MODIFIER_N();
var cql = qb.cql(); // get the cql statement
Where KEYSPACE
is the name of the relevant keyspace and
QUERY_COMMAND
and QUERY_MODIFIER
are among the list of available Query Commands and Query Modifiers.
<DRIVER_OPTIONS>
may be provided to configure the client, and is an object w/ the following optional fields:
connection
: <InitializedDatastaxDriverInstance>
or <DatastaxConnectionArguments>
The client will use an initialized datastax driver instance if provied (either the Cassandra driver or DSE driver will work).
Alternatively, you can provide arguments that will be forwarded to the underlying Cassandra driver instance.
Default is 'undefined'debug
: boolean
Toggle debug logs (see debugging). Default is falseawsKeyspace
: boolean
Toggle when using aws managed keyspaces. Will disable prepared statements on DDL statements. Default is false
Execution of a given query is performed by invoking either the exec
, stream
or eachRow
methods
(which are straight pass throughs to the DataStax driver's execute
, stream
and eachRow
methods, respectively);
batch queries may be executed via the batch
method (again, a pass through to the DataStax driver's own batch
method).
You may provide your own driver or use the included DataStax driver.
var cassanKnex = require("cassanknex")({
connection: {
contactPoints: ["LIST OF CONNECTION POINTS"]
}
});
cassanKnex.on("ready", function (err) {
if (err)
console.error("Error Connecting to Cassandra Cluster", err);
else
console.log("Cassandra Connected");
var qb = cassanKnex(KEYSPACE).QUERY_COMMAND()
.QUERY_MODIFIER_1()
.
.
.QUERY_MODIFIER_N();
// pass through to the underlying DataStax nodejs-driver 'execute' method
qb.exec(function(err, res) {
// do something w/ your query response
});
// OR pass through to the underlying DataStax nodejs-driver 'stream' method
var onReadable = function () {
// Readable is emitted as soon a row is received and parsed
var row;
while (row = this.read()) {
console.log(row);
// do something w/ the row response
}
}
, onEnd = function () {
// Stream ended, there aren't any more rows
console.log("query finished");
}
, onError = function (err) {
// Something went wrong: err is a response error from Cassandra
console.log("query error", err);
};
// Invoke the stream method
qb.stream({
"readable": onReadable,
"end": onEnd,
"error": onError
});
// OR pass through to the underlying DataStax nodejs-driver 'eachRow' method
var rowCallback = function (n, row) {
// The callback will be invoked per each row as soon as they are received
console.log(row);
// do something w/ the row response
}
, errorCb = function (err) {
// Something went wrong: err is a response error from Cassandra
console.log("query error", err);
};
// Invoke the eachRow method
qb.eachRow(rowCallback, errorCb);
// Invoke the batch method to process multiple requests
cassanKnex().batch([qb, qb], function(err, res) {
// do something w/ your response
});
});
While the package includes the vanilla Cassandra driver (supported by Datastax),
and will use that driver to connect to your cluster if you provide a connection configuration, you may optionally provide your own initialized driver to the cassaknex
constructor.
This allows for using either the DSE driver or a different version of the Cassandra driver, per your applications needs.
e.g., w/ the built in cassandra-driver
:
var cassanKnex = require("cassanknex")({
connection: { // default is 'undefined'
contactPoints: ["10.0.0.2"]
},
exec: { // default is '{}'
prepare: false // default is 'true'
}
});
cassanKnex.on("ready", function (err) {...});
or, using a custom dse-driver
connection:
// create a new dse-driver connection
var dse = require("dse-driver");
var dseClient = new dse.Client({
contactPoints: ["10.0.0.2"],
queryOptions: {
prepare: true
},
socketOptions: {
readTimeout: 0
},
profiles: []
});
// initialize dse-driver connection
dseClient.connect(function (err) {
if (err) {
console.log("Error initializing dse-driver", err);
}
else {
// provide connection to cassanknex constructor
var cassanKnex = require("cassanknex")({
connection: dseClient,
debug: false
});
cassanKnex.on("ready", function (err) {
// ...
});
}
});
var cassanKnex = require("cassanknex")({
connection: { // default is 'undefined'
contactPoints: ["10.0.0.2"]
},
exec: { // default is '{}'
prepare: false // default is 'true'
},
awsKeyspace: false // default is 'false'
});
cassanKnex.on("ready", function (err) {
if (err)
console.error("Error Connecting to Cassandra Cluster", err);
else {
console.log("Cassandra Connected");
var qb("keyspace").select("id", "foo", "bar", "baz")
.ttl("foo")
.where("id", "=", "1")
.orWhere("id", "in", ["2", "3"])
.orWhere("baz", "=", "bar")
.andWhere("foo", "IN", ["baz", "bar"])
.limit(10)
.from("table")
.exec(function(err, res) {
// executes query :
// 'SELECT "id","foo","bar","baz",ttl("foo") FROM "keyspace"."table"
// WHERE "id" = ? OR "id" in (?, ?)
// OR "baz" = ? AND "foo" IN (?, ?)
// LIMIT 10;'
// with bindings array : [ '1', '2', '3', 'bar', 'baz', 'bar' ]
if (err)
console.error("error", err);
else
console.log("res", res);
});
}
});
To enable debug
mode pass { debug: true }
into the CassanKnex require
statement, e.g.
var cassanKnex = require("cassanknex")({ debug: true });
When debug
is enabled the query object will be logged upon execution,
and you'll receive two informational components provided to ease the act of debugging:
_queryPhases
:
_methodStack
:
_queryPhases
) at each step, when debug == false
the _cql
query statement and accompanying _bindings
array are not created until either qb.cql()
or qb.exec()
are called.So you'll see something akin to the following insert
statement upon invoking either qb.cql()
or qb.exec()
:
var values = {
"id": "foo"
, "bar": "baz"
, "baz": ["foo", "bar"]
};
var qb = cassanknex("cassanKnexy");
qb.insert(values)
.usingTimestamp(250000)
.usingTTL(50000)
.into("columnFamily")
.cql();
// =>
{ _debug: true,
_dialect: 'cql',
_exec: {},
_execPrepare: true,
_keyspace: 'cassanKnexy',
_columnFamily: 'columnFamily',
_methodStack:
[ 'insert',
'usingTimestamp',
'insert',
'usingTTL',
'insert',
'into',
'insert',
'insert' ],
_queryPhases:
[ 'INSERT INTO ("id","bar","baz") VALUES (?, ?, ?);',
'INSERT INTO ("id","bar","baz") VALUES (?, ?, ?) USING TIMESTAMP ?;',
'INSERT INTO ("id","bar","baz") VALUES (?, ?, ?) USING TIMESTAMP ? AND USING TTL ?;',
'INSERT INTO "cassanKnexy"."columnFamily" ("id","bar","baz") VALUES (?, ?, ?) USING TIMESTAMP ? AND USING TTL ?;',
'INSERT INTO "cassanKnexy"."columnFamily" ("id","bar","baz") VALUES (?, ?, ?) USING TIMESTAMP ? AND USING TTL ?;' ],
_cql: 'INSERT INTO "cassanKnexy"."columnFamily" ("id","bar","baz") VALUES (?, ?, ?) USING TIMESTAMP ? AND USING TTL ?;',
_bindings: [ 'foo', 'baz', [ 'foo', 'bar' ], 250000, 50000 ],
_statements:
[ { grouping: 'compiling', type: 'insert', value: [Object] },
{ grouping: 'using', type: 'usingTimestamp', val: 250000 },
{ grouping: 'using', type: 'usingTTL', val: 50000 } ],
... }
While fuller documentation for all methods is in the works, the test files provide thorough examples as to method usage.
All methods take an optional
options
object as the first argument in the call signature; if provided, the options will be passed through to the correspondingcassandra-driver
call.
execute a query and return the response via a callback:
var item = {
foo: "bar",
bar: ["foo", "baz"]
};
var qb = cassanKnex("cassanKnexy")
.insert(item)
.into("columnFamily")
.exec(function(err, result) {
// do something w/ your err/result
});
// w/ options
qb.exec({ prepare: false }, function(err, result) {
// do something w/ your err/result
});
execute a query and invoke a callback as each row is received:
var rowCallback = function (n, row) {
// Readable is emitted as soon a row is received and parsed
}
, errorCallback = function (err) {
// Something went wrong: err is a response error from Cassandra
};
var qb = cassanKnex("cassanKnexy")
.select()
.from("columnFamily");
// Invoke the eachRow method
qb.eachRow(rowCallback, errorCallback);
execute a query and stream each row as it is received:
var onReadable = function () {
// Readable is emitted as soon a row is received and parsed
var row;
while (row = this.read()) {
// do something w/ your row
}
}
, onEnd = function () {
// Stream ended, there aren't any more rows
}
, onError = function (err) {
// Something went wrong: err is a response error from Cassandra
};
var qb = cassanKnex("cassanKnexy")
.select()
.from("columnFamily");
// Invoke the stream method
qb.stream({
"readable": onReadable,
"end": onEnd,
"error": onError
});
execute a batch of cassanknex queries in a single batch statement:
var qb1 = cassanKnex("cassanKnexy")
.insert({foo: "is bar"})
.usingTimestamp(250000)
.usingTTL(50000)
.into("columnFamily");
var qb2 = cassanKnex("cassanKnexy")
.insert({bar: "is foo"})
.usingTimestamp(250000)
.usingTTL(50000)
.into("columnFamily");
// w/o options
cassanKnex().batch([qb1, qb2], function(err, res) {
// do something w/ your err/result
});
// w/ options
cassanKnex().batch({prepare: true}, [qb1, qb2], function(err, res) {
// do something w/ your err/result
});
insert - compile an insert query string
var qb = cassanKnex("cassanKnexy")
, values = {
"id": "foo"
, "bar": "baz"
, "baz": ["foo", "bar"]
};
qb.insert(values)
.usingTimestamp(250000)
.usingTTL(50000)
.into("columnFamily");
// => INSERT INTO cassanKnexy.columnFamily (id,bar,baz)
// VALUES (?, ?, ?)
// USING TIMESTAMP ?
// AND USING TTL ?;
select - compile a select OR select as query string
var qb = cassanKnex("cassanKnexy");
qb.select("id", "foo", "bar", "baz")
.ttl("foo")
.where("id", "=", "1")
.orWhere("id", "in", ["2", "3"])
.orWhere("baz", "=", "bar")
.andWhere("foo", "IN", ["baz", "bar"])
.limitPerPartition(10)
.from("columnFamily");
// => SELECT "id","foo","bar","baz",ttl("foo") FROM "cassanKnexy"."columnFamily"
// WHERE "id" = ?
// OR "id" in (?, ?)
// OR "baz" = ?
// AND "foo" IN (?, ?)
// PER PARTITION LIMIT ?;
var qb = cassanKnex("cassanKnexy");
qb.select({id: "foo"})
.ttl({id: "fooTTL"})
.limit(10)
.from("columnFamily");
// => SELECT "id" AS "foo",ttl("id") AS "fooTTL" FROM "cassanKnexy"."columnFamily" LIMIT ?;
update - compile an update query string
var qb = cassanKnex("cassanKnexy");
qb.update("columnFamily")
.set("bar", "foo")
.set("foo", "bar")
.where("foo[bar]", "=", "baz")
.where("id", "in", ["1", "1", "2", "3", "5"]);
// => UPDATE cassanKnexy.columnFamily
// SET bar = ?,foo = ?
// WHERE foo[bar] = ?
// AND id in (?, ?, ?, ?, ?);
set column values using object parameters:
var qb = cassanKnex("cassanKnexy");
qb.update("columnFamily")
.set({
"bar": "baz",
"foo": ["bar", "baz"]
})
.where("foo[bar]", "=", "baz")
.where("id", "in", ["1", "1", "2", "3", "5"]);
// => UPDATE cassanKnexy.columnFamily
// SET bar = ?,foo = ?
// WHERE foo[bar] = ?
// AND id in (?, ?, ?, ?, ?);
var qb = cassanKnex("cassanKnexy");
qb.update("columnFamily")
.add("bar", {"foo": "baz"}) // "bar" is a map
.remove("foo", ["bar"]) // "foo" is a set
.where("id", "=", 1);
// => UPDATE cassanKnexy.columnFamily
// SET "bar" = "bar" + ?,
// "foo" = "foo" - ?;
// WHERE id = ?;
or w/ object notation:
var qb = cassanKnex("cassanKnexy");
qb.update("columnFamily")
.add({
"bar": {"baz": "foo"}, // "bar" is a map
"foo": ["baz"] // "foo" is a set
})
.remove({
"bar": ["foo"], // "bar" is a map
"foo": ["bar"] // "foo" is a set
})
.where("id", "=", 1);
var qb = cassanKnex("cassanKnexy");
qb.update("columnFamily")
.increment("bar", 5) // incr by 5
.increment("baz", 7) // incr by 7
.decrement("foo", 9) // decr by 9
.decrement("bop", 11) // decr by 11
.where("id", "=", 1);
// => UPDATE cassanKnexy.columnFamily
// SET "bar" = "bar" + ?,
// "baz" = "baz" + ?,
// "foo" = "foo" - ?;
// WHERE id = ?;
or w/ object notation:
var qb = cassanKnex("cassanKnexy");
qb.update("columnFamily")
.increment({"bar": 5, "baz": 7})
.decrement({"foo": 9, "bop": 11})
.where("id", "=", 1);
delete - compile a delete query string
var qb = cassanknex("cassanKnexy");
qb.delete()
.from("columnFamily")
.where("foo[bar]", "=", "baz")
.where("id", "in", ["1", "1", "2", "3", "5"]);
// => DELETE FROM cassanKnexy.columnFamily
// WHERE foo[bar] = ?
// AND id in (?, ?, ?, ?, ?);
var qb = cassanknex("cassanKnexy");
qb.delete(["foo", "bar"])
// OR
qb.delete("foo", "bar")
.from("columnFamily")
.where("foo[bar]", "=", "baz")
.where("id", "in", ["1", "1", "2", "3", "5"]);
// => DELETE foo,bar FROM cassanKnexy.columnFamily
// WHERE foo[bar] = ?
// AND id in (?, ?, ?, ?, ?);
var cassanKnex = require("cassanknex")({
connection: {
contactPoints: ["10.0.0.2"]
}
});
cassanKnex.on("ready", function (err) {
if (err)
console.error("Error Connecting to Cassandra Cluster", err);
else {
console.log("Cassandra Connected");
// get the Cassandra Driver
var client = cassanKnex.getClient();
}
});
var cassanKnex = require("cassanknex")();
// get the Cassandra Driver
var driver = cassanKnex.getDriver();
mocha
dev dependency from 6.2.0 to 9.2.2cassandra-driver
dependency from v3 to v4awsKeyspace
with
for column family statements.minTimeuuidWhere
and maxTimeuuidWhere
, and aggregation QueryModifiers dateOf
, unixTimestampOf
, toDate
, toTimestamp
, toUnixTimestamp
per #48.Date
type for column family compilation.count
, per issue #30.3.1.6
to 3.2.2
.createColumnFamily
primary
annotation, per issue #35.withOptions
, limitPerPartition
, ttl
, add
and remove
, increment
and decrement
.createIndexCustom
.3.1.5
to 3.1.6
.3.1.1
to 3.1.5
.if
(for update
), ifExists
(for update
), and ifNotExists
(for insert
) per PR #28.2.2.2
to 3.1.1
.getDriver
method to allow retrieving the raw DataStax Driver module from cassanknex per issue #25.2.2.1
to 2.2.2
.getClient
method to allow retrieving the Cassandra Driver instance from cassanknex.orderBy
for standard queries.truncate
functionality.batch
execution functionality per the specifications laid out in issue #19.createType
/IfNotExists
and dropType
/IfExists
.frozen
/Set
/Map
/List
.options
support for eachRow
per issue #8.delete
.set
calls; e.g. .set(<Object := {<String>: <Mixed>, ...}>)
.eachRow
method.stream
method.createIndex
.allowFiltering
.All CICD logic is managed by CircleCI via the configuration in the .circleci/
directory.
This configuration will automatically package and publish a new version to NPM when an appropriate Github Release is created.
So, to publish a new version, simply create a new Github Release whose name matches the current release version (i.e., v1.20.5
).