Open erights opened 6 years ago
I have a variant, that started because I wanted to combine sealing and stamping. The primary reason to combine them is so that the structure of the API makes sure the layers are applied correctly (unseal, then verify), so that the party each side of a communication trusts is the component invoked (sender trusts the brand, receiver trusted the unsealer), and so that the use-cases are more clearly and directly supported.
Structure changes:
makeBrand
, to create two facets: Stamp applies a brand, and Brand facet verifies itStamped both Brands and unseal functions with an appropriate brand so that each party's trusted element can verify that the untrusted element of the other side.
function makeBrand() {
const ws = new WeakSet();
const Stamp = {
stamp(payload) {
ws.add(payload);
},
// seal after branding; error unless a real Boxer is provided
seal(payload, sealFn) {
BoxerBrand.demand(sealFn);
Stamp.stamp(payload);
return sealFn(payload);
},
};
// The ability to test for a brand is the primary visible usage, hence it gets the name
const Brand = {
test(specimen) {
return ws.has(specimen);
},
// Throw if the specimen is not of the brand
demand(specimen) {
if (Brand.test(specimen)) {
return specimen;
} else {
throw "Brand check failed:";
}
},
};
// stamp the returned Brand as an authentic Brand
BrandStamp.stamp(Brand);
return [Stamp, Brand];
}
// A brand used for legit Brands const [BrandStamp, BrandBrand] = makeBrand();
// A brand used for legit Boxers const [BoxerStamp, BoxerBrand] = makeBrand();
function makeBoxer() { const wm = new WeakMap(); const result = { seal(payload) { const box = {}; wm.set(box, payload); return box; }, // Unseal the box if possible. If a Brand is provided, require that it is an // authentic Brand and that the unsealed result is of that Brand. // unseal(box, brand = undefined) { const result = wm.get(box); if (result === undefined) { throw "Box not sealed with corresponding sealer"; } if (brand) { BrandBrand.demand(brand); brand.demand(result); } return result; }, }; BoxerStamp.stamp(result.unseal); return result; }
// per module const [fromMeStamp, fromMeBrand] = new makeBrand(); export const isFromMe = fromMeBrand;
const forMe = new Boxer();
export const sealForMe = forMe.seal;
// Alice says
carol(fromMeStamp.seal(payload, bob.sealForMe));
// Bob says
const payloadFromAlice = forMe.unseal(box, alice.isFromMe);
In the first example, to chain methods:
carol(bob.sealForMe(fromMe.stamp(payload)));
The payload must be returned from Brand.seal()
:
function Brand() {
const ws = new WeakSet();
return {
stamp(payload) {
ws.add(payload);
return payload; // <--
},
test(specimen) {
return ws.has(specimen);
}
};
}
Here is Mike's example, converted to sealing and branding:
function Brand() {
const ws = new WeakSet();
return {
stamp(payload) {
ws.add(payload);
return payload;
},
test(specimen) {
return ws.has(specimen);
}
};
}
function Pair() {
const wm = new WeakMap();
return {
seal(payload) {
const box = {};
wm.set(box, payload);
return box;
},
unseal(box) {
return wm.get(box);
}
};
}
const alice = (function() {
const fromMe = new Brand();
const forMe = new Pair();
const isFromMe = fromMe.test;
const sealForMe = forMe.seal;
function send() {
const messageForBob = bob.sealForMe(fromMe.stamp({
text: 'Have a nice day, Bob! Sincerely, Alice',
}));
console.group('Alice is sending');
carol.convey(bob, messageForBob);
console.groupEnd();
}
return Object.freeze({
isFromMe,
sealForMe,
send
});
})();
const bob = (function() {
const fromMe = new Brand();
const forMe = new Pair();
const isFromMe = fromMe.test;
const sealForMe = forMe.seal;
function mailbox(box) {
const message = forMe.unseal(box);
if (alice.isFromMe(message)) {
console.log(`Bob read from alice: ${message.text}`);
} else {
console.log('Bob ignored a message of questionable provenance!');
}
}
return Object.freeze({
isFromMe,
sealForMe,
mailbox
});
})();
const carol = (function () {
const fromMe = new Brand();
const forMe = new Pair();
const isFromMe = fromMe.test;
const sealForMe = forMe.seal;
// Carol is evil!
const evil = true;
function convey(recipient, message) {
if (evil) {
console.log('Carol got ' + message); // message is a surrogate. No leak.
console.log('Carol unboxed ' + message.text); // key not present
}
// Carol delivers Bob's mail.
recipient.mailbox(message);
if (evil) {
recipient.mailbox(
// Bob will not open it alice.isFromMe() will return false.
{ text: 'Have an evil day! Sincerely, Alice' }
);
}
}
return Object.freeze({
isFromMe,
sealForMe,
convey
});
})();
// INIT
alice.send();
// Alice is sending
// Carol got [object Object]
// Carol unboxed undefined
// Bob reads from Alice: Have a nice day, Bob! Sincerely, Alice
// Bob ignored a message of questionable provenance!
Hi JF, looks good except that our favorite comment got truncated ;)
The below creates a value that can be unsealed by any of n intended recipients in O(1) time.
Is there any way to do such a multiplexing unseal or a multiplexing test in O(1) time with test
and seal
?
// whitelist.js
const { apply } = Reflect.apply
const { has } = WeakSet.prototype
// whitelist(...keys)(aKey) is true when aKey is a public key that is among ...keys and
// the call happened in the context of the private key corresponding to aKey
export function whitelist(...keys) {
const keySet = new WeakSet(keys);
return (k) => frenemies.isPublicKey(k) && k() && Reflect.apply(has, keySet, [k])
}
Module m1 creates a whitelist granting privilege to modules [m2, m3, m4]
// m1.js
import { publicKey as m2pk } from './m2';
import { publicKey as m3pk } from './m3';
import { publicKey as m4pk } from './m4';
import { whitelist } from './whitelist'
const myWhitelist = whitelist(m2pk, m3pk, m4pk)
export let box = frenemies.box('secret', myWhitelist)
Any unbox operations on the exported box run in time independent of the number of modules on the whitelist assuming WeakSet.prototype.has is constant time.
As documented at http://erights.org/javadoc/org/erights/e/elib/sealing/package-summary.html the E sealer/unsealer included one additional feature to enable O(1) lookup, analogous to including an alleged key fingerprint (unforgeable token) with the cyphertext (box). Adapted to this example:
function Pair() {
const wm = new WeakMap();
const fingerprint = def({});
function seal(payload) {
const box = def({fingerprint});
wm.set(box, payload);
return box;
}
seal.fingerprint = fingerprint;
function unseal(box) {
return wm.get(box);
}
unseal.fingerprint = fingerprint;
return def({seal, unseal});
}
This enables us to build something like the KeyKOS capOpener as a combinator of unseal functions:
function makeCanOpener(unsealers) {
const wm = new WeakMap();
for (let unsealer of unsealers) {
wm.set(unsealer.fingerprint, unsealer);
}
return function unseal(box) {
return wm.get(box.fingerprint)(box);
}
}
Doing this for stamp/test is more difficult because only the payload is passed, and it may be multiply stamped. But O(logN) should not be difficult.
@mikesamuel
The below creates a value that can be unsealed by any of n intended recipients in O(1) time. Is there any way to do such a multiplexing unseal or a multiplexing test in O(1) time with test and seal?
Given the implementation of sealers with WeakMap, it would just need to be added to the WeakMap of each of the Boxers, right (I refuse to say Pair :). That would require some code addition, but once there it would achieve the O(1) you are looking for?
I hate "Pair" too. I hate "Boxer" less but still hate it. Better name suggestions?
Changing the sealer/unsealer to be separate named facets as well (like the stamp/brand) instead of member functions in an outer named thing would eliminate the issue, since you would just have a sealer and unsealer.
I understand the fingerprint, but I don't understand how that helps with the whitelist.
It looks like the can opener allows me to open something sealed by one of many.
But if I want to grant privilege based on a whitelist, I want to seal something so that any one of many can unseal it.
How might that work?
But if I want to grant privilege based on a whitelist, I want to seal something so that any one of many can unseal it.
How might that work?
Send a function from fingerprints to sealed messages.
function makeCanCloser(sealers) {
const wm = new WeakMap();
for (const sealer of sealers) {
wm.set(sealer.fingerprint, sealer);
}
return value => fingerprint => wm.get(fingerprint)(value);
}
// sender usage:
const canCloser = makeCanCloser(sealers);
const msg = canCloser(value);
// recipient usage:
const value = unsealer(msg(unsealer.fingerprint));
Thanks, @andersk
I put together a standalone runnable version.
@andersk , I see from @mikesamuel 's code that your line 6 should end in (value)
rather than (box)
.
Very cool pattern! Thank you both. Postponing the sealing until just before the unsealing is clever and I didn't see it coming.
Another difference: @andersk 's original version throws on non-match, while @mikesamuel 's returns void 0
(i.e., undefined
) rather than a box. If we want detectable failure to be considered a non-erroneous condition to test for, then I think undefined
is better. One can build the throwing behavior cleanly on the other, but not so much vice versa.
@erights Corrected, thanks.
The number of variants people seem to want suggests to me (as, I think, to you) that this would be better positioned as a library than as a language proposal, now that we’ve shown we can implement the needed functionality efficiently without language extensions.
Yes I think so, at least until we gain experience using it as a library.
@andersk said
The number of variants people seem to want suggests to me (as, I think, to you) that this would be better positioned as a library than as a language proposal, now that we’ve shown we can implement the needed functionality efficiently without language extensions.
We've shown that we can implement sealers/unsealers in library code. We haven't shown that we can provide identities for modules in library code.
I think we could pare down the proposal and separate out some library code, but I don't see a way to solve the module identity problem without any language proposal.
The benefit of doing it as a language proposal is that one module can assume that others have them. If it's hard to get a principal's public key or sealer, then it's hard to write code that depends on them having an identity.
https://www.npmjs.com/package/module-keys provides a babel plugin that takes care of this, but if a library maintainer wants to depend on keys, then they have to require all clients use this plugin. That's a much larger pill to swallow.
Using a babel plugins mean that you are writing something that is not JavaScript. If it's not even a tentative future variant of JavaScript, that's an additionally large pill to swallow.
@erights said
If we want detectable failure to be considered a non-erroneous condition to test for, then I think undefined is better. One can build the throwing behavior cleanly on the other, but not so much vice versa.
For boxes, I provided a function that takes a second fallback parameter that is returned in case of failure. This allows building a throwing version by passing in a sentinel value for fallback.
Maybe I’m missing something about the rationale. Is it really so hard to write
import Pair from "some-library";
const {seal, unseal} = Pair();
export seal;
in every module that uses this functionality? Or if it’s really about having this in every module, what good does it do to force an identity upon modules that will never unseal anything?
Why should this be tied to the module system? What if a module wants to manage multiple identities, either to avoid a confused deputy problem, or just to maintain compatibility after an internal refactoring? Or what if a class wants to dynamically create separate identities for each instance?
@andersk [Replying inline & out of order]
What if a module wants to manage multiple identities, either to avoid a confused deputy problem, or just to maintain compatibility after an internal refactoring? Or what if a class wants to dynamically create separate identities for each instance?
Nothing prevents creating special purpose identities or at a different granularity.
Is it really so hard to write \<polyfill> in every module that uses this functionality?
Yes. If I want to preserve system-level security properties by creating lists of modules that should have some access, I need a way to identify those modules.
For example, I want to craft a list of modules that the project team has decided warrant access to the abusable authority embodied by Node's child_process
API.
I want to be able to potentially put any module on a whitelist even if that module never needs to seal or unseal anything.
I could use a string module specifier, but those don't survive directory renaming, transpilation or bundling in the way that a sealer or public key function does.
Why should this be tied to the module system?
Modules are coarse grained, but granular enough that few modules need any particular privilege.
A project team can better approximate POLA by withholding most privileges from most modules.
Modules have an identifiable purpose and exist across multiple versions of a product, so reasoning about what privileges a module needs are likely to apply largely unchanged to the next version of the module.
Modules are also a natural unit for human code review to focus on, which enables reasoning about why a module is trustworthy with respect to a particular privilege.
But to do that we need some way to identify modules regardless of whether the module has any interest in its own identity. The interest in identity comes from project level cross-cutting concerns.
I could use a string module specifier, but those don't survive directory renaming, transpilation or bundling in the way that a sealer or public key function does.
The only thing that could really survive that is in-source code, which would require cooperation from the module author. How else could a module be uniquely identified besides it’s contents (requiring opt-in) or its import specifier?
@ljharb
I'm not sure we're talking about the same thing. Let me answer your question as I naively understand it and you can tell me where I'm missing the point.
The only thing that could really survive that is in-source code, which would require cooperation from the module author.
If in my whitelist I do,
const whitelist = new Set([ './foo.js', './bar.js' ]); // module specifiers.
there's no way for a transpiler or bundler to recognize those strings as module specifiers.
If instead I do
import publicKey as keyForFoo from './foo.js';
import publicKey as keyForBar from './bar.js';
const whitelist = new Set([keyForFoo, keyForBar]);
there are clear cues that any transpiler that inlines or bundles modules can use.
None of that requires cooperation from the author of either ('./bar.js', './foo.js') if modules implicitly have identities.
How else could a module be uniquely identified besides it’s contents (requiring opt-in) or its import specifier?
Via a functional public/private key analogue with a default public export :) @andersk has shown that sealer/unsealer pairs with auto-export of the sealer are also sufficient for identity.
Thanks, your first answer is clear. As for the second, so you’re saying that modules would all have a second “id” interface that this syntax would access? If so, would a module be able to override this, and for what use cases, and if so, how would that assure maintaining the guarantees about module identity?
@ljharb I was imagining that a module would implicitly export its public key via an export binding named "publicKey" unless there is an explicit export binding with the same name.
Any implicit publicKey
export binding would not be part of *
so that import * from ...
would not override it. In this way I think it's similar to the 'default' export binding.
The name "id" would work as well, but short names are more prone to accidental conflict.
You can see the polyfill via the polyfilling babel plugin or in the testcases: before vs after
How about
import * as foo from './foo.js';
import * as bar from './bar.js';
const whitelist = moduleKeys([foo, bar]);
where
function moduleKeys(modules) {
return new Set(modules.filter(m => 'publicKey' in m).map(m => m.publicKey));
}
so that any module can be added to the whitelist without cooperation from its author, and a module need only be modified to export a publicKey
when it is also modified to take advantage of a capability granted to it?
@andersk, are you saying that the module namespace object could serve as a proxy for module identity? I don't know enough about that part of the spec to know whether it'd work. There is a trusted path to it though so impersonation shouldn't be a problem.
@mikesamuel so if a module already had export const publicKey = 3
, would that not be identifiable? If a module had export * from './other'
, would it get the other's publicKey, or its own (you mentioned this)? What if "other" had an explicit publicKey export, would that override its own actual ID?
The committee wasn't willing to risk web compat issues with the named export of "then", I doubt it'd fly for any other IdentifierName, including "publicKey".
In other words, any name that could possibly be an existing named export is likely a nonstarter.
I very much like the idea of adding a symbol on the Module Namespace object that provided the ID. That seems like it would address a lot of issues - it would prevent needing new syntax, it would avoid any possible conflicts with export names, and it would prevent individual modules from being able to break useful properties that emerge when every module has a unique ID.
@mikesamuel No, I’m just still trying to figure out if we can’t get the properties you’re looking for without a language extension. When I suggested adding three lines to every participating module to export its own key, you objected that you wanted to be able to add an unmodified module to a whitelist without cooperation from the module’s author. So I suggested a simple scheme to let you do that: test whether the publicKey
attribute is present before adding that publicKey
to the whitelist set. Nothing magical is going on. If the module is unmodified, it cannot yet be attempting to use the capability that would have been granted to its key, so there’s no harm in dropping it from the whitelist.
@andersk
If the module is unmodified, it cannot yet be attempting to use the capability that would have been granted to its key, so there’s no harm in dropping it from the whitelist.
This seems to assume that a module would have to reference its key to make use of a granted privilege. I do not assume that. For example, the privilege granted might be the privilege to import a sensitive module as explained at https://youtu.be/1Gun2lRb5Gw?t=25m42s using something like the (deferred) import traps.
That was what I meant when I said "I want to be able to potentially put any module on a whitelist even if that module never needs to seal or unseal anything."
@ljharb
In other words, any name that could possibly be an existing named export is likely a nonstarter.
Acknowledged.
I very much like the idea of adding a symbol on the Module Namespace object that provided the ID. That seems like it would address a lot of issues - it would prevent needing new syntax, it would avoid any possible conflicts with export names, and it would prevent individual modules from being able to break useful properties that emerge when every module has a unique ID.
So maybe
import * as foo from './foo.js';
import * as bar from './bar.js';
const whitelist = makeWhitelist([
foo[Symbol.moduleKey],
bar[Symbol.moduleKey],
]);
A module still needs access to its private key or unsealer. How problematic would adding names for those to the environment records be?
Where would the private half of a key reside?
import.meta
seems a natural place, but is problematic because that object seems likely to leak.
import.private
is unambiguous, but is new syntax.
Please do not make it reachable by syntax if we can help it. Please do not use import.meta as we're likely to suppress it for a long time. However, I do not yet have a positive suggestion.
On Thu, Aug 23, 2018 at 9:04 AM Mike Samuel notifications@github.com wrote:
@ljharb Where would the private half of a key reside?
import.meta seems a natural place, but is problematic because that object seems likely to leak.
import.private is unambiguous, but is new syntax.
This seems to assume that a module would have to reference its key to make use of a granted privilege. I do not assume that. For example, the privilege granted might be the privilege to import a sensitive module as explained at https://youtu.be/1Gun2lRb5Gw?t=25m42s using something like the (deferred) import traps.
Import traps have been (temporarily?) removed from the current realms proposal, but from the historical version, I gather that they receive a reference to the referencing module namespace object. Presumably there’s intentionally no way to look up the private key from the module namespace object. So any access control decision would be based on using the public key as an opaque identifier: “if the referencing module’s public key is in the whitelist of public keys allowed to import child_process
, then allow the import” (as opposed to something like “if the referencing module knows how to unseal this token, then allow the import”). But in that case, couldn’t we just skip the keys and use the module namespace object itself as the opaque identifier instead?
(To support both types of use cases, the whitelist would then consist of a set of module namespace objects, each with optional public keys.)
Perhaps import traps were just an example, but this argument would seem to generalize to all situations where the module isn’t referencing its own key.