From e4c0927321487751cedbd7ca53b90d60153a4d04 Mon Sep 17 00:00:00 2001 From: Alan Gutierrez Date: Mon, 26 Jul 2021 22:37:07 -0500 Subject: [PATCH] Thoughts on `async`/`await`. --- compassion.js | 170 +++++++++++++++++++++++++++++--- names.txt | 226 +++++++++++++++++++++++++++++++++++++++++++ package.json | 5 +- test/compassion.t.js | 55 ++++++++--- 4 files changed, 430 insertions(+), 26 deletions(-) create mode 100644 names.txt diff --git a/compassion.js b/compassion.js index 8062c2c..ad5ab83 100644 --- a/compassion.js +++ b/compassion.js @@ -5,6 +5,9 @@ const Reactor = require('reactor') const { Timer, Scheduler } = require('happenstance') const Kibitzer = require('kibitz') const Keyify = require('keyify') +const { Future } = require('perhaps') + +const Vivifier = require('vivifyer') const ua = require('./ua') const discover = require('./discover') @@ -88,8 +91,23 @@ class Conference { // Outbound messages. this._messages = new Queue().shifter().paired + // One-on-one messages. + this._calls = new Map() + + // One-on-many messages. + this._maps = new Map() + + // Countdowns can leak. If the other participants don't actually + // participate in the countdown, then the countdown will not resolve, + // but you'll probably notice the leak because you're waiting on a + // countdown that never resolved. So, it would hang also. + // **TODO** Let the messages swim to exit. Why? We're leaving. The log // is supposed to just truncate anyway. + this._countdowns = { + tracking: new Map(), + awaiting: new Map() + } // Construct our application passing ourselves as the first argument. this.application = consumer @@ -110,21 +128,132 @@ class Conference { // TODO We need to start these loops delayed, yes, but only after we've // arrived. - enqueue (body) { + enqueue (body, countdown = null) { const cookie = `${this.id}/${(this._cookie = BigInt(this._cookie) + 1n).toString(16)}` this._messages.queue.push({ - module: 'conference', - method: 'map', - key: cookie, - from: { + module: 'conference', + method: 'map', + countdown: countdown, + key: cookie, + from: { id: this.id, arrived: this._government.arrived.promise[this.id] }, - body: body + body: body }) return cookie } + async call (body, countdown = null) { + const future = new Future + this._calls.set(this.enqueue(body, countdown), future) + return future.promise + } + + async mapped (body, countdown = null) { + const future = new Future + this._maps.set(this.enqueue(body, countdown), { future, type: 'mapped' }) + return future.promise + } + + async arrayed (body, countdown = null) { + const future = new Future + this._maps.set(this.enqueue(body, countdown), { future, type: 'arrayed' }) + return future.promise + } + + // TODO You now have an interface issue. You can see that you can do + // everyone need to talk to everyone else using `countdown` and any of the + // above functions waiting for them to finish. The above functions would + // take an optional identifier that could be used to indicate that they are + // countdowns, and you can always use a promise to get a unique identifier + // shared by all the particiants. + + // But should countdown return anything other than perhaps always `true`? + // What should it otherwise return? The results from the countdown? If so, + // should it be arrayed or mapped? Should we have `mappedCountdown` and + // `arrayedCountdown` (yuck!) or should we make the type or return a flag or + // should we return both arrayed and mapped in an object and have the caller + // destructure the desired result? (If you revisit this, it is the last that + // appeas to you. You can do a real `reduce with + // conference.map().arrayed.reduce()`. + + // For now, you've decided that having an `async`/`await` of any sort is + // enough to build applications, so `countdown` will await and return + // nothing or simply `true`. + + // TODO And another interface issue. Do we countdown by virtue of sending a + // message or do we countdown by virtue of invoking a call? That is, is a + // countdown waiting for everyone to simply send a message to the other, or + // is it waiting for a call? + + // At this point there is no difference. We don't have a send that doesn't + // result in a round of paxos for each response, although everyone sending + // one message to be received by all would be more efficient. + + // In the case of Diffuser redistributing a lookup table, we could do a map + // reduce were the entires for a shard are returned, or each participant + // could post their connections and the shard could pluck the relevant ones. + + // But, then you start getting into trying to send a message and get a + // single result. You realize that in this case, it may be the case that the + // one participant who can handle that result, say adding a entry to a + // sharded key-value store, it may have crashed, disappeared and you're + // going to have to wait for a departure to rebalance. If it is the only + // place where that information is stored, you're in for a surprise. This is + // all hinting at creating the half-paxos that has a fail-over. + + // And so, is it worth it to try to save the additional Paxos rounds? + + // And yet another problem with countdown. When can we garbage collect a + // tracked countdown? Do we assume that everyone will reap it. We must, + // otherwise there will have to be a timeout to reap it. Oh, no, wait it + // can't complete until it is invoked by all, so we can insist that you + // create the wait before you throw your countdown into the ring. + + // Ah, but isn't it the case that sending the message is almost never + // enough. You can't just send your entires and when you've seen that + // everyone has sent their entries proceed. + + // So, let's hold off on all these brilliant performance brainstorms for a + // bit. + + // Countdown counts down a call and a response, either a call, or arrayed, + // or mapped, annotated with the given key. If `manyToMany` is true, + // countdown will return true when all participants have made a call with + // the countdown annotation. If it is false, countdown will return when the + // first call with the annotation completes. But, wait, how do we know if a + // countdown is manyToMany or oneToMany? + + // Oh, no. If it is one-to-one, we want to know that an arrival has made an + // initialization call, then we do have a problem with countdown where if + // you do not invoke countdown you will leak. You will await until the + // countdown is completed. This is true. It does need to be documented. + + // Okay, so maybe the countdown is either always many-to-many, or else + + // Okay, my goodness, okay. If there is some response to an event that + // requires a countdown, there is no need to worry about losing track of the + // countdown. You create the countdown promise first. Any counting down will + // be further along in the long. It cannot have already happened. + + // Countdown awaits either a single call and response from any participant + // with the given countdown annotation or a call from all participants with + // the given countdown annotation. It is a way for one participant to await + // the realization of an entry in the event log by another participant that + // may not have received the entry or maybe it already processed the entry. + + // If you want to count a many to many response, you should invoke countdown + // to create a promise prior to + + // + async countdown (key, manyToMany = true) { + const future = new Future + this._countdowns.awaiting.set(this.enqueue(key), { future, manyToMany }) + return future.promise + } + + // async broadcasts (promise) { return await this._cubbyholes.broadcasts.get(promise) } @@ -138,7 +267,6 @@ class Conference { const properties = entry.properties if (entry.body.arrive) { const arrival = entry.body.arrive - console.log(arrival, this.id, entry.body) if (entry.body.promise == '1/0') { await this.application.bootstrap() } else if (arrival.id == this.id) { @@ -178,7 +306,6 @@ class Conference { if (arrival.id == this.id) { this.destructible.durable('enqueue', async () => { for await (const message of this._messages.shifter) { - console.log(message) this._kibitzer.publish(message) } }) @@ -269,10 +396,9 @@ class Conference { } this._kibitzer.paxos.acclimate() } else { - // Bombs on a flush! - - // Paxos body, Islander body, Conference body, user body. - assert(entry.body.body.body) + // Paxos body, Islander body, Conference body, user body. The user + // body can be null, but it should be there. + assert('body' in entry.body.body) // Reminder that if you ever want to do queued instead async then the // queue should be external and a property of the object the conference // operates. @@ -284,6 +410,7 @@ class Conference { const request = envelope.request this._broadcasts[envelope.key] = { key: envelope.key, + countdown: envelope.countdown, from: envelope.from, method: envelope.method, body: envelope.body, @@ -309,6 +436,10 @@ class Conference { case 'reduce': const broadcast = this._broadcasts[envelope.key] broadcast.responses[envelope.from] = envelope.body + if (envelope.body != null && this._calls.has(envelope.key)) { + this._calls.get(envelope.key).resolve({ from: envelope.from, body: envelope.body }) + this._calls.delete(envelope.key) + } await this._checkReduced(broadcast) break } @@ -329,7 +460,6 @@ class Conference { return } } - // Feel like all ids should just be hidden from the user, but I dunno, // simplified? Maybe the user is doing some sort of system management // and address properties of paxos are meaningful? @@ -350,6 +480,7 @@ class Conference { // `reduced` postback with both the id and the arrived promise because // the only responses provided are those that are still present in the // government at the time of this postback. + console.log(this._government) await this.application.reduce({ self: { id: this.id, arrived: this._government.arrived.promise[this.id] }, cookie: broadcast.cookie, @@ -360,6 +491,19 @@ class Conference { arrayed: reduced, mapped: broadcast.responses }) + + const call = this._calls.get(broadcast.key) + if (call != null) { + call.resolve(null) + this._calls.delete(broadcast.key) + } + + const map = this._maps.get(broadcast.key) + if (map != null) { + map.future.resolve(map.type == 'mapped' ? broadcast.responses : reduced) + this._maps.delete(broadcast.key) + } + delete this._broadcasts[broadcast.key] } } diff --git a/names.txt b/names.txt new file mode 100644 index 0000000..50add32 --- /dev/null +++ b/names.txt @@ -0,0 +1,226 @@ +memento +pastiche +reiterate +switch +conduit +swimlane +linotype +udt +signal +connect-npm +puppy +ec2 +register +prove +addendum +relatable +stencil +inquiry +edify +reactor +r-tree +xy +locate +expandable +sequester +correlate +mvcc +prospect +hash.block +hash.murmur3.32 +indexeddb +hash.stream +subordinate +empathy +programmatic +moxie +hash.zero +packet +admeasure +rescue +blocker +locket +avenue +pointcut +arguable +monotonic +magazine +inlet +delta +transcript +happenstance +proof +staccato +journalist +revise +twiddle +constrain +encode +amalgamate +timezone +turnstile +cadence +delineate +splice +skip +paxos +riffle +kibitz +advance +pair +designate +homogenize +dilute +b-tree +prolific +vizsla +semblance +assessment +interrupt +abend +synonymous +supersede +wafer +hotspot +operation +adhere +eject +isochronous +copacetic +reconfigure +restrictor +mingle +perhaps +wildmap +vestibule +snafu +permeate +hash.fnv +hash.djb +establishment +cliffhanger +loiter +compassion +prolific.queue +prolific.collector +prolific.shuttle +prolific.sender.tcp +prolific.monitor +prolific.tcp +prolific.supervisor +prolific.inherit +prolific.ipc +islander +sympatico +prolific.sender.stream +prolific.pumper +prolific.sender.udp +prolific.stdout.udp +prolific.stdout.tcp +prolific.child.tcp +tzwhere.http +edify.markdown +edify.highlight +mingle.static +misnomer +reinstate +mingle.uptime +prolific.chunk +prolific.consolidator +compassion.colleague +compassion.conduit +prolific.sender.stdio +prolific.main +prolific.logger +querylist +recover +prolific.stdio +prolific.syslog +prolific.udp +mingle.srv +conference +prolific.filter +edify.include +compassion.channel +prolific.sink +prolific.resolver +prolific.file +verbatim +mingle.kubernetes +chaperon +indenter +departure +demur +resurrect +colleague +exclusive +procession +fracture +afterburner +interlocutor +edify.pug +nascent +nascent.destructor +nascent.coalesce +nascent.upgrader +nascent.jacket +nascent.rendezvous +edify.ls +wiseguy +ascension +prolific.stringify +destructible +downgrader +extant +assignation +cubbyhole +vivifyer +compassion.counterfeiter +keyify +recuperate +prolific.pipeline +prolific.sprintf +prolific.tee +pushback +prolific.l2met +thereafter +nascent.environment +salvageable +olio +finalist +descendent +prolific.reduce +prolific.evaluator +prolific.aggregate +compassion.conference +prolific.accept +prolific.acceptor +prolific.level +prolific.facility +prolific.sequence +foremost +inlet.prolific +inlet.udp +compassion.canary +diffuser +prolific.extract +prolific.revise +prolific.environment +prolific.require +mingle.http +prolific.test.udp +prolific.test.tcp +hash.8.32 +prolific.http +prolific.influxdb +prospective +prolific.error +prolific.watcher +duplicitous +descendant +reciprocate +satiate +whittle +writeahead +hash.fnv.crypto +comeuppance diff --git a/package.json b/package.json index dd850fc..cd1999c 100644 --- a/package.json +++ b/package.json @@ -29,16 +29,17 @@ "interrupt": "11.0.0-alpha.23", "keyify": "^3.0.0", "kibitz": "^3.0.0-alpha.11", + "perhaps": "0.0.11", "prolific.logger": "^11.0.0", "reactor": "^17.0.0-alpha.10", "staccato": "13.0.0-alpha.10", "transcript": "0.1.5", - "verbatim": "0.1.5" + "verbatim": "0.1.5", + "vivifyer": "3.0.0" }, "devDependencies": { "destructible": "7.0.0-alpha.67", - "perhaps": "0.0.11", "proof": "^9.0.3" }, "main": "compassion", diff --git a/test/compassion.t.js b/test/compassion.t.js index f010c1c..9125b58 100644 --- a/test/compassion.t.js +++ b/test/compassion.t.js @@ -33,7 +33,7 @@ require('proof')(4, async okay => { await snapshot.shift() } - async arrive ({ arrival }) { + async arrive ({ self: { id }, arrival }) { await this._future.promise this._snapshots[arrival.promise] = JSON.parse(JSON.stringify(this._values)) return true @@ -46,11 +46,26 @@ require('proof')(4, async okay => { return true } - async map ({ body }) { - await this._future.promise - this.events.push({ method: 'map', body }) - this._values[body.key] = body.value - return true + async map ({ self, government, body }) { + switch (body.method) { + case 'put': { + await this._future.promise + this.events.push({ method: 'map', body }) + this._values[body.key] = body.value + return true + } + break + case 'leader': { + if (self.id == government.majority[0]) { + return true + } + } + break + case 'map': { + return self.arrived + } + break + } } async reduce ({ arrayed }) { @@ -63,7 +78,7 @@ require('proof')(4, async okay => { } set (key, value) { - this.client.enqueue({ key, value }) + this.client.enqueue({ method: 'put', key, value }) } get (key) { @@ -114,13 +129,31 @@ require('proof')(4, async okay => { participants.push(await Participant.create(census.shifter())) census.push([ participants[0].url, participants[1].url ]) - console.log('waiting') - okay(await participants[0].shifter.join(entry => { - console.log(entry) + okay(await participants[1].shifter.join(entry => { return entry.method == 'acclimated' }), { method: 'acclimated' }, 'acclimated') - console.log('waiting') + const leader = await participants[1].kv.client.call({ method: 'leader' }) + okay(leader.from, '1/0', 'call') + + const called = await participants[1].kv.client.call({ method: 'unknown' }) + okay(called, null, 'no answer') + + const mapped = await participants[1].kv.client.mapped({ method: 'map' }) + okay(mapped, { '1/0': '1/0', '2/0': '2/0' }, 'mapped') + + const arrayed = await participants[1].kv.client.arrayed({ method: 'map' }) + okay(arrayed.map(item => item.value), [ '1/0', '2/0' ], 'arrayed') + + /* + const countdown = participants[1].kv.client.countdown('key') + + for (const participant of participants) { + await participant.kv.client.call({ method: 'leader' }, 'key') + } + + await countdown + */ census.push(null) destructible.destroy()