Install docker
and docker-compose
globally to your machine. And open port 8080
for anyone.
Create weavedb.config.js
in /grpc-node/node-server
directory.
admin
: an EVM private key for rollup node admin. The admin can add DBs to the node.bundler
: Arweave RSA keys for rollup bundler.rollups
: this can be either empty or filled with pre-defined DB instances. Each key is the database name.
secure
: passed down to the contract initial-state, allways use true
in production (default).owner
: the DB contract owner EVM address.tick
: time span in millisecond in which tick query will be periodically executed.contractTxId
: Warp L1 contractTxId.plugins
: add offchain plugins, plugin scripts have to be placed in /grpc-node/node-server/plugins
with the same name.rollup
: a bloolean value to enable rollup to Arweave/Warpmodule.exports = {
admin: EVM_PRIVATE_KEY,
bundler: ARWEAVE_RSA_KEYS,
rollups: {},
}
dir
: cache dirctory, default to /grpc-node/node-server/cache
.dbname
: cache database name, cache will be stored in dir/dbname
.sequencerUrl
: the Warp sequencer URL to use with SDK.apiKey
: a Warp gateway API key to use with SDK.arweave
: arweave network configurationweavedb_srcTxId
: WeaveDB contract srcTxIdweavedb_version
: WeaveDB contract versionnostr
: enable WebSocket for Nostr, this turns the node into a Nostr relay.
db
: the database name for Nostr events, there can be only one DB instance to receive Nostr events.snapshot
: config to store snapshots to GCP/S3.
count
: the number of bundles between snapshots (default to 100
).gcp
: GCP configurationsbucket
: [projectId].appspot.com
keyFilename
: gcs.json
s3
: S3 configurationsWith everything included,
module.exports = {
dir: "/home/xyz/cache",
dbname: "mydb",
sequencerUrl: "https://gw.warp.cc/",
apiKey: "xxxxx",
snapshot:{
count: 100,
gcs: { bucket: "xyz.appspot.com", keyFilename: "gcs.json" }
},
admin: "privateky...",
arweave: {
host: "arweave.net",
port: 443,
protocol: "https",
},
weavedb_srcTxId: "Ohr4AU6jRUCLoNSTTqu3bZ8GulKZ0V8gUm-vwrRbmS4",
weavedb_version: "0.37.2",
bundler: {
kty: "RSA",
...
},
nostr: { db: "nostr" },
rollups: {
testdb: {
secure: true,
owner: "0xdef...",
tick: 1000 * 60 * 5,
contractTxId: "abcdef...",
rollup: true,
plugins: { notifications: {} },
},
nostr: {
owner: "0xdef...",
rollup: false,
}
},
}
If contractTxId
is specified and the rollup node is re-initialized without cache, it will auto-recover the rollup DB state from Warp L1 transaction history.
yarn run-rollup
Anyone can access the rollup node stats, which returns the deployed DB information.
const DB = require("weavedb-node-client")
const db = new DB({ rpc: "localhost:8080", contractTxId: "testdb" })
const stats = await db.node({op: "stats"})
The admin EOA account can manage the rollup node and DBs from anywhere.
const tx = await db.admin(
{
op: "add_db",
key: "testdb2",
db: {
app: "http://localhost:3000", // this will be shown on the explorer
name: "Jots", // this will be shown on the explorer
rollup: true,
plugins: { notifications: {} },
tick: 1000 * 60 * 5,
},
},
{ privateKey: admin.privateKey }
)
You can recover existing db with contractTxId
after starting a new node.
const tx = await db.admin(
{
op: "add_db",
key: "testdb2",
db: {
app: "http://localhost:3000", // this will be shown on the explorer
name: "Jots", // this will be shown on the explorer
rollup: true,
plugins: { notifications: {} },
tick: 1000 * 60 * 5,
contractTxId: "Warp_L1_contractTxId"
},
},
{ privateKey: admin.privateKey }
)
const { contractTxId, srcTxId } = await db.admin(
{ op: "deploy_contract", key: "testdb2" },
{ privateKey: admin.privateKey }
)
// you will need the "contractTxId" for regular DB queries
const tx = await db.admin(
{ op: "deploy_contract", key: "testdb2" },
{ privateKey: admin.privateKey }
)
const tx = await db.admin(
{ op: "remove_db", key: "testdb2" },
{ privateKey: admin.privateKey }
)
You will need the L1 contractTxId
from the deployment operation to instantiate the DB client.
All L2 transactions will be signed with L1 contractTxId
for L1/L2 verifiability.
const DB = require("weavedb-node-client")
const db = new DB({ rpc: "localhost:8080", contractTxId })
const db_info = await db.getInfo()
We currently have only one plugin for Jots called notifications
which generates personal notifications from onchain Jots activities. The notification DB will be an offchain WeaveDB instance, which won't be recorded onchain. Not every data should be onchain, and offchain plugins solve the problem. WeaveDB can seamlessly run in multiple environment such as blockchain, offchain (local), browser and centralized cloud.
git clone https://github.com/weavedb/rdk.git
If you are running the rollup node on localhost:8080
, you can view blocks and transactions on our public WeaveDB Scan.
However, the public explorer may not be up-to-date. To run the latest explorer, go to explorer
folder.
cd rdk/explorer
yarn
yarn dev
Now the explorer is running locally at localhost:3000/node/localhost.
You can run Envoy separately on your computer, and run the bare rollup file index.js
without Docker. This way, you don't have to restart docker-compose every time you make changes in development.
yarn envoy
Then you can run the rollup server without Docker.
cd rdk/node/node-server
yarn
node index.js
To test rollup executions in your local environment, you can run arlocal (Arweave local testnet), and redirect WeaveDB SDK / Warp SDK to it.
You don't need to run Envoy for local tests. Envoy is to access the node from web browsers. Also, stop arlocal if you are running test scripts with mocha. The test scripts will start everything with a clean state.
We have Test
helper utility, to make testing easier. Here is some boilerplate for you to start writing tests.
const { expect } = require("chai")
const DB = require("weavedb-node-client")
const SDK = require("weavedb-sdk-node")
const { wait, Test } = require("./lib/utils")
describe("rollup node", function () {
this.timeout(0)
let admin, network, bundler, test
before(async () => {
// testing in insecure mode, never do that in production
test = new Test({ secure: false })
;({ network, bundler, admin } = await test.start())
})
after(async () => {
await test.stop()
// some processes linger, so force exit for now
process.exit()
})
it("should start server", async () => {
const db = new DB({
rpc: "localhost:9090",
contractTxId: "testdb",
arweave: network,
})
const stats = await db.node({ op: "stats" })
expect(stats).to.eql({ dbs: [] })
// add a DB to node
const tx = await db.admin(
{
op: "add_db",
key: "testdb",
db: {
app: "http://localhost:3000",
name: "Jots",
rollup: true,
owner: admin.address,
},
},
{ privateKey: admin.privateKey },
)
expect(tx.success).to.eql(true)
await wait(2000)
// deploy L1 warp contract (via node)
const { contractTxId, srcTxId } = await db.admin(
{ op: "deploy_contract", key: "testdb" },
{ privateKey: admin.privateKey },
)
expect((await db.node({ op: "stats" })).dbs[0].data.rollup).to.eql(true)
await wait(2000)
// check L1 warp contract info directly with SDK (not via node)
const warp_db = new SDK({
type: 3,
contractTxId,
arweave: network,
})
await warp_db.init()
expect((await warp_db.getInfo()).version).to.eql("0.37.2")
// update the DB (via node)
const db2 = new DB({
rpc: "localhost:9090",
contractTxId,
})
const Bob = { name: "Bob" }
const tx2 = await db2.set(Bob, "ppl", "Bob", {
privateKey: admin.privateKey,
})
expect(tx2.success).to.eql(true)
expect(await db2.get("ppl", "Bob")).to.eql(Bob)
// check rollup
await wait(5000)
expect(
(await warp_db.db.readState()).cachedValue.state.rollup.height,
).to.eql(1)
// check if L1 Warp state is the same as L2 DB state
expect(await warp_db.get("ppl", "Bob")).to.eql(Bob)
})
})
Run the tests.
cd rdk/node/node-server
yarn test