From 675c080029884f0b08fc0ece7101e75dc17aa908 Mon Sep 17 00:00:00 2001 From: Caleb Callaway Date: Sun, 11 Oct 2020 21:01:48 -0700 Subject: [PATCH] Import Ghost content --- content/blog/about/_index.md | 15 +++ ...atal-region-error-detected-run-recovery.md | 20 ++++ content/blog/borderlands-3-in-proton.md | 23 ++++ content/blog/cinnamon-raisin-oatmeal.md | 13 +++ content/blog/clear-ice.md | 17 +++ content/blog/cold-brew-at-home-2.md | 35 ++++++ content/blog/cold-brew-recipes.md | 42 +++++++ .../blog/comparing-confidence-intervals.md | 79 +++++++++++++ content/blog/complex-type-syntax.md | 40 +++++++ .../confidence-intervals-for-benchmarks.md | 25 ++++ content/blog/dissension.md | 15 +++ content/blog/ethereum-mining.md | 82 +++++++++++++ content/blog/ghost-1-0.md | 42 +++++++ ...ng-name-errors-with-short-circuit-logic.md | 47 ++++++++ content/blog/inbox-management.md | 42 +++++++ content/blog/just-dont-break-the-law.md | 23 ++++ content/blog/managing-the-namespace.md | 23 ++++ content/blog/mass-effect-savegame-import.md | 22 ++++ content/blog/mothballs.md | 18 +++ content/blog/namespace-collisions.md | 28 +++++ ...r_discovery-failed-to-add-default-route.md | 31 +++++ content/blog/new-monitor.md | 17 +++ content/blog/newt.md | 22 ++++ content/blog/notes-on-agile-scrum.md | 23 ++++ .../parallel-computing-use-short-threads.md | 37 ++++++ .../passwords-are-kind-of-awesome-actually.md | 17 +++ content/blog/presentations.md | 23 ++++ content/blog/private-key-protection.md | 75 ++++++++++++ content/blog/proton-field-guide.md | 108 ++++++++++++++++++ .../blog/running-vulkan-conformance-tests.md | 22 ++++ content/blog/sequence-to-coordinates.md | 29 +++++ .../blog/smart-pointer-copy-performance.md | 87 ++++++++++++++ content/blog/static-types-beneficial.md | 19 +++ content/blog/straight-edges-part-2.md | 19 +++ content/blog/straight-edges.md | 21 ++++ content/blog/sum-types-continued.md | 39 +++++++ .../blog/sum-types-structural-vs-nominal.md | 83 ++++++++++++++ content/blog/swedish-pancakes.md | 28 +++++ content/blog/the-dweb-needs-work.md | 57 +++++++++ content/blog/the-great-filter.md | 17 +++ content/blog/tortillas-from-scratch.md | 17 +++ content/blog/what-are-computers.md | 21 ++++ 42 files changed, 1463 insertions(+) create mode 100644 content/blog/about/_index.md create mode 100644 content/blog/bdb0060-panic-fatal-region-error-detected-run-recovery.md create mode 100644 content/blog/borderlands-3-in-proton.md create mode 100644 content/blog/cinnamon-raisin-oatmeal.md create mode 100644 content/blog/clear-ice.md create mode 100644 content/blog/cold-brew-at-home-2.md create mode 100644 content/blog/cold-brew-recipes.md create mode 100644 content/blog/comparing-confidence-intervals.md create mode 100644 content/blog/complex-type-syntax.md create mode 100644 content/blog/confidence-intervals-for-benchmarks.md create mode 100644 content/blog/dissension.md create mode 100644 content/blog/ethereum-mining.md create mode 100644 content/blog/ghost-1-0.md create mode 100644 content/blog/hiding-name-errors-with-short-circuit-logic.md create mode 100644 content/blog/inbox-management.md create mode 100644 content/blog/just-dont-break-the-law.md create mode 100644 content/blog/managing-the-namespace.md create mode 100644 content/blog/mass-effect-savegame-import.md create mode 100644 content/blog/mothballs.md create mode 100644 content/blog/namespace-collisions.md create mode 100644 content/blog/ndisc_router_discovery-failed-to-add-default-route.md create mode 100644 content/blog/new-monitor.md create mode 100644 content/blog/newt.md create mode 100644 content/blog/notes-on-agile-scrum.md create mode 100644 content/blog/parallel-computing-use-short-threads.md create mode 100644 content/blog/passwords-are-kind-of-awesome-actually.md create mode 100644 content/blog/presentations.md create mode 100644 content/blog/private-key-protection.md create mode 100644 content/blog/proton-field-guide.md create mode 100644 content/blog/running-vulkan-conformance-tests.md create mode 100644 content/blog/sequence-to-coordinates.md create mode 100644 content/blog/smart-pointer-copy-performance.md create mode 100644 content/blog/static-types-beneficial.md create mode 100644 content/blog/straight-edges-part-2.md create mode 100644 content/blog/straight-edges.md create mode 100644 content/blog/sum-types-continued.md create mode 100644 content/blog/sum-types-structural-vs-nominal.md create mode 100644 content/blog/swedish-pancakes.md create mode 100644 content/blog/the-dweb-needs-work.md create mode 100644 content/blog/the-great-filter.md create mode 100644 content/blog/tortillas-from-scratch.md create mode 100644 content/blog/what-are-computers.md diff --git a/content/blog/about/_index.md b/content/blog/about/_index.md new file mode 100644 index 0000000..dffaff5 --- /dev/null +++ b/content/blog/about/_index.md @@ -0,0 +1,15 @@ ++++ +author = "Caleb Callaway" +date = 2016-01-29T12:35:25Z +description = "" +draft = false +slug = "about" +title = "About" + ++++ + + +A place on the Internet to brain-dump about various topics, mostly technical; social media doesn't give me as much control over the data as I'd like. + +Much can be inferred about the nature of my interests from the information on [the homepage](/). + diff --git a/content/blog/bdb0060-panic-fatal-region-error-detected-run-recovery.md b/content/blog/bdb0060-panic-fatal-region-error-detected-run-recovery.md new file mode 100644 index 0000000..8d0d642 --- /dev/null +++ b/content/blog/bdb0060-panic-fatal-region-error-detected-run-recovery.md @@ -0,0 +1,20 @@ ++++ +author = "Caleb Callaway" +date = 2018-03-03T12:53:35Z +description = "" +draft = false +slug = "bdb0060-panic-fatal-region-error-detected-run-recovery" +title = "BDB0060 PANIC: fatal region error detected; run recovery" + ++++ + + +Recently I noticed the Github webhook that notified the Brainvitamins website of changes to my [resume](https://github.com/cqcallaw/resume) was bringing the site to its knees. Each time the webhook was triggered, the Apache error log was flooding with the following error: + +> BDB0060 PANIC: fatal region error detected; run recovery +> BDB0060 PANIC: fatal region error detected; run recovery +> BDB0060 PANIC: fatal region error detected; run recovery +> [repeat ad infinitum until the server runs out of disk space] + +Seems the recent Ghost upgrade corrupted the Apache installation somehow, because it was necessary to backup my Apache configuration files and purge the Apache installation (something akin to `sudo apt remove --purge apache2 && sudo apt --purge autoremove`) to resolve the issue. I found very little information about this error online; hopefully this post will help some other lost soul encountering a similar issue. + diff --git a/content/blog/borderlands-3-in-proton.md b/content/blog/borderlands-3-in-proton.md new file mode 100644 index 0000000..bac837d --- /dev/null +++ b/content/blog/borderlands-3-in-proton.md @@ -0,0 +1,23 @@ ++++ +author = "Caleb Callaway" +date = 2020-03-28T19:05:09Z +description = "" +draft = false +slug = "borderlands-3-in-proton" +title = "Borderlands 3 in Proton" + ++++ + + +Borderlands 3 recently became available through Steam, and I'm happy to report it plays quite well in Proton with the commonly available Media Foundation work-arounds are installed. My Nvidia GTX 1080 yields a respectible 50 FPS at 2560x1440 with "Badass" quality settings. + +Out-of-the box, I noticed a lot of choppiness in the framerate which disappears after the first few minutes of gameplay, even with the lowest quality settings. This is consistent with shader cache warmup issues, so I configured a dedicated, peristent shader cache with [Steam launch options](https://support.steampowered.com/kb_article.php?ref=1040-JWMT-2947): + +``` +__GL_SHADER_DISK_CACHE='1' __GL_SHADER_DISK_CACHE_PATH='/home/caleb/tmp/nvidia/shaders/cache' __GL_SHADER_DISK_CACHE_SKIP_CLEANUP='1' %command% +``` + +My GPU doesn't share a power budget with the CPU, so I also configured the [performance CPU frequency governor](https://support.feralinteractive.com/en/mac-linux-games/shadowofthetombraider/faqs/cpu_governor/). + +With the tweaks, the game itself is quite playable, though I still see some stutter in the benchmark mode that's not present when the benchmark runs in Windows. Benchmarking data is limited to the average FPS number, which makes quantifying the choppiness difficult. The statistic of interest for choppiness would be the *minimum* FPS, but I haven't found a tool for logging this data. Suggestions? + diff --git a/content/blog/cinnamon-raisin-oatmeal.md b/content/blog/cinnamon-raisin-oatmeal.md new file mode 100644 index 0000000..a50fd04 --- /dev/null +++ b/content/blog/cinnamon-raisin-oatmeal.md @@ -0,0 +1,13 @@ ++++ +author = "Caleb Callaway" +date = 2020-01-01T10:14:53Z +description = "" +draft = false +slug = "cinnamon-raisin-oatmeal" +title = "My Favorite Cinnamon Raisin Oatmeal Method" + ++++ + + +For me, cinnamon raisin oatmeal is the addition of cinnamon, sugar, and raisins to a basic oatmeal recipe. For oatmeal recipes that call for boiling water, I like to boil the water together with cinnamon sugar, so my oatmeal is cooked in what is effectively a light cinnamon simple syrup. + diff --git a/content/blog/clear-ice.md b/content/blog/clear-ice.md new file mode 100644 index 0000000..5c2e25c --- /dev/null +++ b/content/blog/clear-ice.md @@ -0,0 +1,17 @@ ++++ +author = "Caleb Callaway" +date = 2016-05-15T05:01:00Z +description = "" +draft = false +slug = "clear-ice" +title = "Clear Ice" + ++++ + + +After many weeks of experimentation with a variety of mechanisms for generating clear, pure ice, I impulse-bought the [Ice Chest](http://www.wintersmiths.com/collections/all/products/ice-chest). I haven't achieved the level of perfection seen in the product pictures, but the clarity of the ice is categorically superior to ordinary ice, and I recommend the product. + +It's common knowledge that pure ice is beautiful and lasts longer, but one quality that I particularly enjoy is the taste: the directional freezing process has a very pronounced purifying effect, so I doubly recommend a directional freezing solution if your tap water has an unpleasant after-taste. + +There's a lot of information about directional freezing on the internet, but verifying the efficacy of the process is quite simple: just fill an insulated vessel such as a insulated lunch box or vacuum flask half-full of water and leave it uncovered in the freezer for 24 hours. Don't fill the vessel completely or the expanding ice may cause deformation. + diff --git a/content/blog/cold-brew-at-home-2.md b/content/blog/cold-brew-at-home-2.md new file mode 100644 index 0000000..2879bb1 --- /dev/null +++ b/content/blog/cold-brew-at-home-2.md @@ -0,0 +1,35 @@ ++++ +author = "Caleb Callaway" +date = 2017-01-07T18:17:46Z +description = "" +draft = false +slug = "cold-brew-at-home-2" +title = "Cold Brew At Home" + ++++ + + +Over the past year, I've experimented extensively with cold brew at home, spending too much on equipment and gadgets. This post is a distillation of my learnings. + +# Equipment +I was extremely dissatisfied with the Bruer device that shows up in a lot of search results; clean-up is easy, but setting the drip rate is fussy and repeatable brew results are almost impossible. Instead, I recommend the [OXO cold brew tower](https://www.oxo.com/cold-brew-coffee-maker). $50 is not bank-breaking, and the hassle-free clean up is well worth it. + +A kitchen scale is a requirement as well; I'm reasonably satisfied with [OXO's 5-pound scale](https://www.oxo.com/products/preparing/measuring/5lb-food-scale-w-pull-out-display#black), but I find myself wanting a higher precision readout when I'm mixing drinks. + +If you want to grind your own beans, a good burr grinder is worth investigating. A medium grind setting seems to work well. + +# Coffee Selection +The number of sources, blends, roasts, etc. can be overwhelming; if you don't know what coffee to use, start with a medium roast house blend, then experiment. + +# Cold Brew Mocha Recipe + +* 1 oz. chocolate syrup (I use and heartily recommend Torani's [Dark Chocolate Sauce](http://shop.torani.com/Dark-Chocolate-Sauce/p/TOR-780001&c=Torani@Sauces)) +* .5 oz. heavy whipping cream +* 5.5 oz. 2% milk +* 2 oz. water +* 3 oz. cold brew concentrate + +Blend ingredients together thoroughly in a blender, and serve over ice (I use [ice balls](https://www.brainvitamins.net/blog/clear-ice/)). + +The quantities might seem strange, but are designed to sum up to 12 oz. The ratios of milk and water can be tweaked to taste, but I find more than 1/2 an ounce of cream makes the drink too rich, and less than 5 oz of milk makes the drink more watery than I like. + diff --git a/content/blog/cold-brew-recipes.md b/content/blog/cold-brew-recipes.md new file mode 100644 index 0000000..61067a9 --- /dev/null +++ b/content/blog/cold-brew-recipes.md @@ -0,0 +1,42 @@ ++++ +author = "Caleb Callaway" +date = 2019-01-09T08:38:20Z +description = "" +draft = false +slug = "cold-brew-recipes" +title = "Cold Brew Recipes" + ++++ + + +This post builds on the basic information in [my previous cold brew post](https://www.brainvitamins.net/blog/cold-brew-at-home-2/) with more recipes and preparation ideas. + +# With Cream and Sugar +* 2 oz. cold brew concentrate +* 6 oz. water +* 1/2 oz. simple syrup +* 1/2 oz. heavy or whipping cream + +For a hot beverage, heat everything except the cream to about 160° F, then add the cream and enjoy. For a cold beverage, mix everything together with ice. + +## Extra Creamy +For an extra creamy cup, substitute unsweetened almond milk for water in the "Cream and Sugar" recipe. I don't enjoy the flavor of hot almond milk, so I prefer to put this variant on ice. + +# Caffe Latte +The essential structure of the drink is cold brew concentrate (replacing the espresso shot in a traditional latte), sweetener, flavorings, and frothed milk. These ratios work well for me: + +* 2 oz. cold brew concentrate +* One of the following flavor options: + * 1/2 oz. simple syrup with 1/2 teaspoon vanilla or hazelnut extract + * -OR- 1/2 oz. flavored syrup (e.g. Torani or Monin) +* 6 oz. frothed milk + +Any milk that can be frothed should work. Cow's milk is a classic; almond milk also works well for cold beverages. If the almond milk contains sweetener, reduce the added sweetener as necessary. I highly recommend the [Breville Milk Cafe](https://www.breville.com/us/en/products/coffee/bmf600.html) for frothing milk; the cheap, hand-held whisks are too messy, and steamer wands are usually attached to bulky espresso machines. + +For a hot beverage, heat everything except the milk in the microwave for about 45 seconds; I aim for just over 160° F, measured with a temperature gun. Froth the milk, then pour the frothed milk into the flavored hot coffee concentrate. + +For a cold beverage, skip the heating step and add ice at the end. + +## Blended Caffe Latte +With the Milk Cafe, one can froth the cold brew and flavorings together with the milk; the result is a light, coffee-flavored milk froth that can be enjoyed hot or cold. One could probably get a similar effect with a blender. + diff --git a/content/blog/comparing-confidence-intervals.md b/content/blog/comparing-confidence-intervals.md new file mode 100644 index 0000000..81a993b --- /dev/null +++ b/content/blog/comparing-confidence-intervals.md @@ -0,0 +1,79 @@ ++++ +author = "Caleb Callaway" +date = 2020-08-21T06:34:15Z +description = "" +draft = false +slug = "comparing-confidence-intervals" +title = "Benchmark Confidence Interval Part 2: Comparison" + ++++ + + +Benchmark data generally isn't interesting in isolation; once we have one data set, we usually gather a second set of data against which the first is compared. Reporting the second result as a percentage of the first result isn't sufficient if we're rigorous and report results with [confidence intervals](https://www.brainvitamins.net/blog/confidence-intervals-for-benchmarks/); we need a more nuanced approach. + +Let's suppose we run a benchmark 5 times and record the results, then fix a performance bug and gather a second set of data to measure the improvement. The best intuition about performance gains is given by scores and confidence intervals that are [normalized](https://en.wikipedia.org/wiki/Normalization_(statistics)) using our baseline geomean score: + + + + + + + + + + + + + + + + + + + + + + + +
Geomean Score95% Confidence IntervalNormalized ScoreNormalized CI
Baseline74.581.41100.00%1.88%
Fix77.762.92104.26%3.91%
+ +All normalization is done using the _same baseline_, even the bug fix confidence interval. One can work out the normalized confidence intervals for a baseline score of `100 +/- 1` and a second score of `2 +/- 1` to see why this must be so. + +Now, let's visualize (using a LibreOffice Calc chart with custom [Y error bars](https://help.libreoffice.org/3.3/Chart/Y_Error_Bars)): + +![ci-comparison-v1-1](/blog/content/images/2020/08/ci-comparison-v1-1.png) + +Woops! The confidence intervals overlap; something's wrong here. We can't be confident our performance optimization will reliably improve the performance of the benchmark unless 95% of our new results fall outside 95% of our old results. Something is dragging down our score and we cannot confidently reject our [null hypothesis](https://en.wikipedia.org/wiki/Null_hypothesis). + +The root causes for such negative results are rich and diverse, but for illustrative purposes, let's suppose we missed an edge case in our performance optimization that interacted badly with a power management algorithm. Our intrepid product team has fixed this issue, and now we have: + + + + + + + + + + + + + + + + + + + + + + + +
Geomean Score95% Confidence IntervalNormalized ScoreNormalized CI
Baseline74.581.41100.00%1.88%
2nd Fix80.181.63107.51%2.18%
+ +![ci-comparison-v2](/blog/content/images/2020/08/ci-comparison-v2.png) + +Much better; we can confidently reject the null hypothesis and assert that our latest fix has indeed improved performance of this benchmark. + +_Many thanks to Felix Degrood for his help in developing my understanding of these concepts and tools_ + diff --git a/content/blog/complex-type-syntax.md b/content/blog/complex-type-syntax.md new file mode 100644 index 0000000..8625075 --- /dev/null +++ b/content/blog/complex-type-syntax.md @@ -0,0 +1,40 @@ ++++ +author = "Caleb Callaway" +date = 2016-04-25T01:23:42Z +description = "" +draft = false +slug = "complex-type-syntax" +title = "New Complex Type Syntax" + ++++ + + +As part of the on-going build-out of recursive types in newt, complex types have been re-worked such that every complex type is a dictionary of type declarations (previously, record types were a dictionary of _values_, with special logic to generate modified copies of this dictionary). In this new model, type declarations that reference existing types are implemented as type _aliases_. Thus, in the following type declaration, `person.age` is an alias for `int`, `person.name` is an un-aliased record type, and `person.name.first` and `person.name.last` both alias the built-in type `string`. + +``` +person { + age:int, + name { + first:string, + last:string + } +} +``` + +For purposes of assignment and conversion, a type alias is directly equivalent to the type it aliases. + +The `struct` keyword is noteworthily absent from the previous record type definition, and there are now commas separating type members. These are not accidents, as the re-worked type declarations allow for arbitrarily nested type definitions, and repeated use of the `struct` and `sum` keywords felt heavy and inelegant. For this (primarily aesthetic) reason, the keywords are omitted from the nested types, and to maintain a uniform, non-astonishing grammar, the keyword is omitted from the top-level complex type declarations as well. + +Omission of the keywords requires another mechanism for differentiating sum and product types, however, so members of record types must now be comma-delimited, while sum type variants are delimited by a vertical bar (that is, a pipe). In this new syntax, a linked list of integers might be expressed as follows: + +``` +list { + end + | item { + data:int, + next:list + } +} +``` +The new syntax very closely matches the [proposed syntax for map literals](https://github.com/cqcallaw/newt/issues/11), which is a nice isomorphism, but does raise concerns about legibility issues. Time will tell. + diff --git a/content/blog/confidence-intervals-for-benchmarks.md b/content/blog/confidence-intervals-for-benchmarks.md new file mode 100644 index 0000000..c4762fd --- /dev/null +++ b/content/blog/confidence-intervals-for-benchmarks.md @@ -0,0 +1,25 @@ ++++ +author = "Caleb Callaway" +date = 2020-08-19T04:03:10Z +description = "" +draft = false +slug = "confidence-intervals-for-benchmarks" +title = "Confidence Intervals for Benchmarks" + ++++ + + +When benchmarking, [confidence intervals](https://www.mathsisfun.com/data/confidence-interval.html) are a standard tool that give us a reliable measure of how much run-to-run variation occurs for a given workload. For example, if I run several iterations of the Bioshock benchmark and score each iteration by recording the average FPS, I might report Bioshock’s average (or [geomean](https://medium.com/@JLMC/understanding-three-simple-statistics-for-data-visualizations-2619dbb3677a)) score as `74.74` FPS with a 99% confidence interval of `0.10`. By reporting this result, I'm predicting that that 99% of Bioshock scores on this platform configuration will fall between 74.64 and 74.84 FPS. + +Unless otherwise noted, confidence intervals assume the data is normally distributed: + +[![QuaintTidyCockatiel-size_restricted](/blog/content/images/2020/08/QuaintTidyCockatiel-size_restricted.gif)](https://gfycat.com/quainttidycockatiel) + +Each pebble in the demonstration represents one benchmark result. Our normal curve may be thiner and taller (or shorter and wider), but the basic shape is the same; most of the results will cluster around the mean, with a few outliers. + +Normally distributed data means our 95% confidence interval will be smaller than our 99% confidence interval; 95% of the results will be clustered more closely around the mean value. If our 99% confidence interval is `[74.64, 74.84]`, our 95% interval might be `+/- 0.06`, or `[74.67, 74.80]`. The 100% confidence interval is always `[-infinity, +infinity]`; we’re 100% confident that every measured result will fall somewhere on the number line. + +Computing the averages of averages is not always statistically sound, so it may seem incorrect to take the average FPS from each iteration of a benchmark and average them together. In this case we can confidently say that each average has [equal weight](https://math.stackexchange.com/questions/95909/why-is-an-average-of-an-average-usually-incorrect/95912#95912); if not, we need a different benchmark! + +Next: [Comparing Benchmark Results](https://www.brainvitamins.net/blog/comparing-confidence-intervals/) + diff --git a/content/blog/dissension.md b/content/blog/dissension.md new file mode 100644 index 0000000..edd3fa9 --- /dev/null +++ b/content/blog/dissension.md @@ -0,0 +1,15 @@ ++++ +author = "Caleb Callaway" +date = 2016-04-08T06:59:54Z +description = "" +draft = false +slug = "dissension" +title = "Dissension" + ++++ + + +Via BoingBoing, I recently encountered an [interesting read](http://www.theguardian.com/society/2016/apr/07/the-sugar-conspiracy-robert-lustig-john-yudkin) about how the scientific consensus about diet was influenced by decided unscientific means. Ironically, the article is published by an organization that is at least roughly speaking a newspaper, while explicitly mentioning that newspapers have a credibility problem. Informed individuals who would say Yudkin was a fraud may well exist; I suppose that readers will believe whatever they believe. + +I personally find the narrative of corruptible science believable, which is why I think it dangerous to categorically dismiss dissenters from any scientific consensus as fools. Deniers of anthropogenic climate change may in fact be Yudkins, however small the probability might be. + diff --git a/content/blog/ethereum-mining.md b/content/blog/ethereum-mining.md new file mode 100644 index 0000000..b850604 --- /dev/null +++ b/content/blog/ethereum-mining.md @@ -0,0 +1,82 @@ ++++ +author = "Caleb Callaway" +categories = ["cryptocurrency"] +date = 2020-05-02T20:52:58Z +description = "" +draft = false +slug = "ethereum-mining" +tags = ["cryptocurrency"] +title = "Mining Ethereum for Fun" + ++++ + + +# Step 1: Get a Wallet + +To store the fruits of your computational labor, you will need an [Ethereum wallet](https://ethereum.org/wallets/). [Metamask](https://metamask.io/index.html) seems to be popular; [Atomic Wallet](https://atomicwallet.io/) is a decent stand-alone option. The wallet's *address* is shared with other agents to receive payment. + +Most wallets offer plenty of advice about best practices for securing a wallet; follow it. Best practices usually reduce to the following: + +1. Never share your password or your private key +3. There's no customer service center for crypto; make a backup +4. Be careful about sharing personally identifying information (more on this below) + +# Step 2: Find Some Compute Power +A high-end GPU is the most common way to start mining. The best bang for one's buck changes as new hardware is released; a [quick search](https://duckduckgo.com/?q=ethereum+gpu+hashrates) should yield current data. Key metrics are *hashes per second* and *power consumption*. + +For reference, the hashrate for my Nvidia GTX 1080 in its stock configuration is approximately 20.5 MH (megahashes) per second. As of this writing, 20.5 MH/s is enough to get started, but still squarely in hobbyist territory; more serious mining requires more serious hardware. + +# Step 3: Get Software, Join a Pool +The software required for mining Ether is generally quite mature at this point. [ethminer](https://github.com/ethereum-mining/ethminer) seems to work well, though the latest "stable" release of ethminer has stability issues; I recommend [building from source](https://github.com/ethereum-mining/ethminer/blob/master/docs/BUILD.md). + +With software in hand, one must join a mining pool; [ethermine](https://ethermine.org/) is a reasonable starting point. Key metrics for mining pools are *pool fees* and *payout schedules*. Every Ethereum transaction [has a transaction cost](https://ethereum.stackexchange.com/questions/3/what-is-meant-by-the-term-gas), so mining pools usually require a miner to reach a certain reward threshold before a payout is sent. + +# Step 4: Testing +``` +$ ~/src/ethminer/build/ethminer/ethminer -G -P stratum1+ssl://0xf7318Ac0253B14f703D969483fF2908b42b261cc.demo@us1.ethermine.org:5555 +``` + +* -G selects OpenCL mining. I haven't noticed any different in hashrate between OpenCL and CUDA, so I opt for openness. +* -P specifies the pool in which I wish to participate. +* 0xf7318Ac0253B14f703D969483fF2908b42b261cc is the address of the Ethereum wallet I created to for demonstration purposes. Use your own wallet address here (or don't; I won't complain if you mine for me!) +* "demo" is a unique identifier for the mining platform, useful for differentiating mining platforms. + +Mining pools have various payout rules. The ethermine.org minimum payout is 0.5 ETH; if you don't mine enough to hit that minimum in a week's time, your unpaid balance is swept to your wallet. The current status of my mining operation can be seen at https://ethermine.org/miners/0xf7318Ac0253B14f703D969483fF2908b42b261cc/dashboard. The mining operation for every wallet address will have a similar dashboard. + +The dashboard URL neatly demonstrates an important property of cryptocurrencies; no personal information was required to start mining, but since I've identfied myself as the owner of wallet address 0xf7318Ac0253B14f703D969483fF2908b42b261cc by writing this blog post, you know exactly how much Ethereum I've mined. You can also [trace every transaction](https://www.etherchain.org/account/0xf7318Ac0253B14f703D969483fF2908b42b261cc) I make with this wallet. Every cryptocurrency transaction is a matter of public record, so think carefully before associating your personal information with a wallet! + +# Step 5: Automation + +ethminer occasionally loses its connection to the mining pool and then terminates, so I created a small user systemd service to automatically restart mining: + +```bash +$ mkdir -p ~/.config/systemd/user/ + +# edit ~/.config/systemd/user/ethminer.service so it contains the following contents: +$ cat ~/.config/systemd/user/ethminer.service +Unit] +Description=Ethminer Ethereum Miner Daemon + +[Service] +ExecStart=/home/caleb/src/ethminer/build/ethminer/ethminer -G -P stratum1+ssl://0x656d98Fe99fA98D4d38e45173203e8BFc881DD0C.pilgrim@us1.ethermine.org:5555 --cl-local-work 256 --cl-global-work 268435456 +Restart=always +RestartSec=5s + +[Install] +WantedBy=default.target + +$ systemctl --user daemon-reload # reload service definitions +$ systemctl --user start ethminer.service # start service +$ journalctl --follow --user # follow service log +$ systemctl --user stop ethminer.service # stop service +$ systemctl --user enable ethminer.service # auto-start service when user logs in +``` +More details about systemd user services can be found [on the Arch wiki](https://wiki.archlinux.org/index.php/Systemd/User). + +# Useful Links + +* https://ethereum.org/learn/ +* https://unblock.net/cryptocurrency-consensus-algorithms/ + +_If you enjoyed this intro and would like to support the blog, please feel free to tip me at Ethereum address 0xf7318Ac0253B14f703D969483fF2908b42b261cc_ + diff --git a/content/blog/ghost-1-0.md b/content/blog/ghost-1-0.md new file mode 100644 index 0000000..05b9acb --- /dev/null +++ b/content/blog/ghost-1-0.md @@ -0,0 +1,42 @@ ++++ +author = "Caleb Callaway" +date = 2018-01-17T12:32:52Z +description = "" +draft = false +slug = "ghost-1-0" +title = "Ghost 1.0" + ++++ + + +As you may notice, the blog got an update; specifically, Ghost was migrated from version 0.11 to version 1.20. I've been putting the upgrade off for a while, since I expected the upgrade to be tedious and frustrating. I wasn't wrong. + +I'm philosophically opposed to switching to nginx to accomodate a single webapp, but there's no official support for Apache anymore, so I spent many hours puzzling out a valid Apache config (using Apache 2.4): + +``` + + ProxyPreserveHost On + ProxyPass http://127.0.0.1:2368/blog + # For my HTTPS-enabled site, this header was required; + # Ghost generated redirect loops if it wasn't set. + # The ugly condition appears to be a requirement; + # the REQUEST_SCHEME environment variable was null for me + + RequestHeader set X-Forwarded-Proto "https" + + + RequestHeader set X-Forwarded-Proto "http" + + + ProxyPassReverse http:/127.0.0.1:2368/blog + +``` + +The rest of the [migration process](https://docs.ghost.org/docs/migrating-to-ghost-1-0-0) mostly "Just Works." + +Essential commands: + +* (on the server) `sudo netstat -tlnp` to verify Ghost is running. The program name should be "node". The TCP port on which Ghost is listening is listed in the Local Address column. +* (on the server) `ghost stop && sudo nc -l 2368` to get a debug dump of the proxy requests coming from Apache. Replace 2368 with the correct port as needed. +* (on a client) `curl -v -L https://www.brainvitamins.net/blog/` to get a dump of the information observed by a client. + diff --git a/content/blog/hiding-name-errors-with-short-circuit-logic.md b/content/blog/hiding-name-errors-with-short-circuit-logic.md new file mode 100644 index 0000000..da951d7 --- /dev/null +++ b/content/blog/hiding-name-errors-with-short-circuit-logic.md @@ -0,0 +1,47 @@ ++++ +author = "Caleb Callaway" +date = 2017-05-12T09:01:35Z +description = "" +draft = false +slug = "hiding-name-errors-with-short-circuit-logic" +title = "Hiding name errors with short-circuit logic" + ++++ + + +A simple Python program: + +``` +flag1 = False +flag2 = True +if False: + nested_flag = False + +# No NameError here! +if flag1 and nested_flag: + print('Stuff!') +else: + print('No Stuff!') + +#Causes a NameError +if flag2 and nested_flag: + print('Stuff Again!') +else: + print('No Stuff Again!') +``` + +Sample output: +``` +Python 3.5.2 (default, Dec 2015, 13:05:11) +[GCC 4.8.2] on linux + +No Stuff! +Traceback (most recent call last): + File "python", line 11, in +NameError: name 'nested_flag' is not defined +``` + +The first conditional does not cause a NameError, presumably because the Python interpreter doesn't evaluate the second operand of the Boolean `AND` operation if the first operand evaluates to False. This is a reasonable optimization, but omitting all processing of the second operand hides the name error until very specific runtime conditions are met. In production code, the state of `flag1` may only be true 1% of the time or depend on user interactions or database reads, so this class of errors can be very difficult to catch, even with a well-designed test plan. + +The solution is simple: analyse the code for semantic correctness--including name errors--before the code ever runs. Fail early, fail fast. + diff --git a/content/blog/inbox-management.md b/content/blog/inbox-management.md new file mode 100644 index 0000000..71e53ab --- /dev/null +++ b/content/blog/inbox-management.md @@ -0,0 +1,42 @@ ++++ +author = "Caleb Callaway" +date = 2018-07-15T02:34:13Z +description = "" +draft = false +slug = "inbox-management" +title = "Inbox Management" + ++++ + + +I may one day think myself uncharitable for saying so, but I'm increasingly convinced that folks working in technology who can't manage their email inboxes are failing as knowledge workers. There's some excuse for people whose brains aren't trained to think about automation all day, but anyone capable of [fizz buzz](http://wiki.c2.com/?FizzBuzzTest) should be able to solve this problem. + +My solution is simple: + +``` +For every incoming email message: +1. Read it, and take any required action. +2. If reading the email was a waste of time, filter it. +3. When you're done with the email, archive it. +``` + +That's it! Not exactly rocket science. Details follow. + +# Bootstrapping +If your email inbox is already a mess, you need to start with a good baseline. Mark any backlog of unread messages as read. It's too late to process it; just let it go. It's okay to keep credit card bill notifications around until they're paid, but everything that doesn't require immediate or urgent action must be archived. Ditch the ads; unsubscribe if possible. This is also a good time to define or update your email filtering rules. + +# Read It and Take Action +An unread email is an email that requires your attention. Give it your attention, then make sure it's marked so it doesn't need your attention again. + +It's not necessary or appropriate to act on everything, but if you decide action is required, act! It's okay to defer action if you need to collect your thoughts or put a rate limit on your communication, but don't delay too long; 24 hours is usually an acceptable delay, particularly when multiple timezones are involved. + +# Filter It +Flag spam. Use filters. Unsubscribe from mailing lists in which you no longer participate. Don't be a forwarding service. Boost your [signal-to-noise](https://en.wikipedia.org/wiki/Signal-to-noise_ratio) ratio. + +Every step of the procedure is required, but things fall apart fastest if this step is skipped. The filter step prevents email from consuming all of your productive time. + +# Archive It +The invitation to last year's team-building event is no longer relevant. The Christmas e-card from a distant acquaintence to which you never replied isn't calling you to action, it's just clutter. If you expect to reference an email later, use folders or tags. + +Inbox zero is a worthy goal, but it's often impractical. Focus on keeping every inbox item meaningful and relevant. + diff --git a/content/blog/just-dont-break-the-law.md b/content/blog/just-dont-break-the-law.md new file mode 100644 index 0000000..94cf659 --- /dev/null +++ b/content/blog/just-dont-break-the-law.md @@ -0,0 +1,23 @@ ++++ +author = "Caleb Callaway" +categories = ["security"] +date = 2016-02-26T03:52:28Z +description = "" +draft = false +slug = "just-dont-break-the-law" +tags = ["security"] +title = "\"Just don't break the law\"" + ++++ + + +One common refrain heard from supporters of increased government surveillance is that the innocent have nothing to hide, or as I recently heard it expressed, "just don't break the law!" + +I'm sure supporters of increased government surveillance are motivated by a desire to keep people safe. I want to keep people safe too, which is why I oppose increased government surveillance. + +Even the innocent can come under suspicion of guilt. Those who believe they can escape becoming a false positive in a government surveillance system are critically ignorant of the manner in which such systems are built. Becoming a false positive is no joke; the suspicion of guilt can have dire consequences (e.g. [Maher Arar](https://en.wikipedia.org/wiki/Maher_Arar), particularly for those who don't enjoy the right to [presumption of innocence](https://en.wikipedia.org/wiki/Presumption_of_innocence). Increased government surveillance is dangerous for the falsely accused, those proven guilty by circumstantial evidence, and those who have been maliciously framed. + +Moreover, laws that are plainly unjust and immoral should be disobeyed; Rosa Parks' refusal to give up her seat is often given as a canonical example. Increased government surveillance is dangerous for those who break the law because they know it's the right to do. + +Keep the people safe: restrict government surveillance. + diff --git a/content/blog/managing-the-namespace.md b/content/blog/managing-the-namespace.md new file mode 100644 index 0000000..0bab9a1 --- /dev/null +++ b/content/blog/managing-the-namespace.md @@ -0,0 +1,23 @@ ++++ +author = "Caleb Callaway" +date = 2016-01-30T04:20:12Z +description = "" +draft = false +slug = "managing-the-namespace" +title = "Managing the Namespace" + ++++ + + +While deploying this blog, I found myself thinking deeply about the design of its URI and making it [cool](https://www.w3.org/Provider/Style/URI.html). + +Three choices come to mind: + +* www.brainvitamins.net (deploy at root) +* blog.brainvitamins.net (deploy in a virtual host) +* www.brainvitamins.net/blog (deploy in a path) + +The first option puts the blog front and center, and is a very attractive landing page, but leaves the entire server path in the hands of a single application (unless exceptions are made). The second option is fairly common practice, but assigning an entire host name to a single web application--establishing a functional equivalence between a "host" and an "application"--has always felt like a poor abstraction to me. + +The third option feels most correct, particularly since there are other [web applications](/tock) running on the server; the third option also provides the most room for growth and adaptation of the brainvitamins.net namespace. Flexibility is a desirable property for me: I don't want to write redirect/rewrite logic later because my blog moved. The lack of an attractive landing page and the reduced discoverability of the blog really bother me, but path deployment seems to best match my intentions for the blog's place in Universe. We'll see how it goes. + diff --git a/content/blog/mass-effect-savegame-import.md b/content/blog/mass-effect-savegame-import.md new file mode 100644 index 0000000..c1ccf46 --- /dev/null +++ b/content/blog/mass-effect-savegame-import.md @@ -0,0 +1,22 @@ ++++ +author = "Caleb Callaway" +date = 2020-08-16T20:05:31Z +description = "" +draft = false +slug = "mass-effect-savegame-import" +title = "Import Mass Effect Proton Savegame Into Mass Effect 2" + ++++ + + +``` +$ mkdir -p ~/.steam/steamapps/compatdata/24980/pfx/drive_c/users/steamuser/My Documents/BioWare/Mass Effect 2/Save/ME1 +$ cp -vr ~/.steam/steamapps/compatdata/17460/pfx/drive_c/users/steamuser/My\ Documents/BioWare/Mass\ Effect/Save/* ~/.steam/steamapps/compatdata/24980/pfx/drive_c/users/steamuser/My Documents/BioWare/Mass Effect 2/Save/ME1 +``` + +The Mass Effect save should now be available for import in the Mass Effect 2 New Game interface. + +The path prefixes here are the [WINE prefixes](https://linuxconfig.org/using-wine-prefixes) used by Proton. `17460` is the Steam game ID for [Mass Effect](https://store.steampowered.com/app/17460/Mass_Effect/); `24980` is the Steam game ID for [Mass Effect 2](https://store.steampowered.com/app/24980/Mass_Effect_2/). + +This quick tip brought to you by information from https://answers.ea.com/t5/Mass-Effect-2/PC-Importing-a-save-file-from-Mass-Effect-1/m-p/5810699#M7696 + diff --git a/content/blog/mothballs.md b/content/blog/mothballs.md new file mode 100644 index 0000000..2a60c4f --- /dev/null +++ b/content/blog/mothballs.md @@ -0,0 +1,18 @@ ++++ +author = "Caleb Callaway" +date = 2018-12-27T02:25:05Z +description = "" +draft = false +slug = "mothballs" +title = "Mothballs" + ++++ + + +Early in the development of [newt](https://github.com/cqcallaw/newt), two central questions formed that I found useful for focusing my work: + +1. Can it be done (that is, can the language be implemented)? +2. Will people use it? + +I've done enough work to confidently answer "yes" to the first question. The second question is harder to answer definitively, but the small amount of evidence I have indicates the answer is "no." For this reason, I don't expect newt to get much more of my time or attention. + diff --git a/content/blog/namespace-collisions.md b/content/blog/namespace-collisions.md new file mode 100644 index 0000000..147c850 --- /dev/null +++ b/content/blog/namespace-collisions.md @@ -0,0 +1,28 @@ ++++ +author = "Caleb Callaway" +date = 2016-03-25T12:20:23Z +description = "" +draft = false +slug = "namespace-collisions" +title = "Conflicting Names" + ++++ + + +The 17 lines of code that "[broke the Internet](http://arstechnica.com/information-technology/2016/03/rage-quit-coder-unpublished-17-lines-of-javascript-and-broke-the-internet/)" is probably familiar to most by now, but I believe it's worth noting that the underlying issue already has a robust solution in the form of the Internet's [Domain Name System](https://en.wikipedia.org/wiki/Domain_Name_System). + +Put simply, the issue is that the name "kik" was ambiguous; that is, a [https://en.wikipedia.org/wiki/Naming_collision](naming collision) occurred. [Namespaces](https://en.wikipedia.org/wiki/Namespace) are a well-known solution to this problem, and npm supports namespaces in the form of [scopes](https://docs.npmjs.com/misc/scope), but in this case the scope used was the generic name `starters`. Mike Bostock of D3.js fame rightly observed: + + + + +Disambiguation by author name is certainly a step in the right direction, but I believe that author names alone are insufficient, for author names _may also collide_: it's quite conceivable for some enterprising and talented developer use the handle `kik` as their author name on the npm package registry. Furthermore, the namespacing issue is not just a issue for npm, but for all package management systems. I therefore assert that ad-hoc, unmanaged namespaces are unacceptably fragile and collision-prone, and that a centralized, managed namespace authority is the only permanent solution to this class of problems. + +The Internet's DNS is the most widely-used namespace authority on the planet, so there's precious little reason to build another one. It's slightly surprising that a package registry that is as Internet-oriented as npm hasn't reused this existing solution to the namespace problem, although I can respect the desire to simplify wherever possible. + +In any event, having an established convention of using an Internet domain name as a package namespace and thereby avoiding repetition and wheel-reinvention would have avoided the original issue entirely. The folks at Kik would release their package in the `com.kik` namespace, Koçulu would release his package in some personal domain space (e.g. `me.azer`), and there would be no need to unpublish, change ownership, or break the Internet. The idea isn't original, of course, but rather one of the things I believe [Java gets right](http://docs.oracle.com/javase/tutorial/java/package/namingpkgs.html). + +Registering a domain name is not hard, doesn't have to be expensive, and provides a namespace that is very likely to be universally unique and can be re-used in a number of contexts, such as software package namespaces. It also has the nice side-effect of surfacing trademark issues quickly. + +(Parenthetically, the linked article presents Koçulu as completely disinterested with negotiation and compromise with the "corporate dicks" at Kik. Perhaps facts have been omitted, but Koçulu appears to have engaged in exactly the same sort of intransigent behavior he finds so offensive, and therefore seems to be incredibly hypocritical.) + diff --git a/content/blog/ndisc_router_discovery-failed-to-add-default-route.md b/content/blog/ndisc_router_discovery-failed-to-add-default-route.md new file mode 100644 index 0000000..3a610e2 --- /dev/null +++ b/content/blog/ndisc_router_discovery-failed-to-add-default-route.md @@ -0,0 +1,31 @@ ++++ +author = "Caleb Callaway" +date = 2016-03-14T09:10:55Z +description = "" +draft = false +slug = "ndisc_router_discovery-failed-to-add-default-route" +title = "ndisc_router_discovery() failed to add default route" + ++++ + + +In Ubuntu 14.04 (and probably in other Debian derivatives), network interfaces with a statically configured IPv6 address and gateway will still accept [Neighborhood Discovery Protocol](https://en.wikipedia.org/wiki/Neighbor_Discovery_Protocol) Router Advertisements and attempt to process them, leading to log spam of the following form: + +``` +... +ICMPv6 RA: ndisc_router_discovery() failed to add default route. +ICMPv6 RA: ndisc_router_discovery() failed to add default route. +ICMPv6 RA: ndisc_router_discovery() failed to add default route. +... +``` + +To prevent this, disable acceptance of RAs by adding `accept_ra 0` to the static configuration, as follows: + +``` +iface eth0 inet6 static + address + netmask + gateway + accept_ra 0 +``` + diff --git a/content/blog/new-monitor.md b/content/blog/new-monitor.md new file mode 100644 index 0000000..f851eb9 --- /dev/null +++ b/content/blog/new-monitor.md @@ -0,0 +1,17 @@ ++++ +author = "Caleb Callaway" +date = 2019-12-01T05:46:35Z +description = "" +draft = false +slug = "new-monitor" +title = "New Monitor" + ++++ + + +[Skyrim SE](https://www.protondb.com/app/489830) looks fantastic at 2560x1440 on the [27 inch IPS ASUS ROG display](https://www.amazon.com/gp/product/B07HZSBW7V/ref=ppx_yo_dt_b_asin_title_o01_s00?ie=UTF8&psc=1) I picked up in a Black Friday sale. Works nicely with a [Flow monitor arm](https://www.hermanmiller.com/products/accessories/technology-support/flo-monitor-arms/) for maximum desktop real estate. + +The max refresh rate reported by Ubuntu is 144hz. I'm not terribly worried about it because the [Steam overlay's FPS counter](https://ccm.net/faq/40667-how-to-display-the-in-game-fps-counter-on-steam) shows Skyrim capped at 60 FPS, even with [vysnc disabled system-wide](https://www.reddit.com/r/linux_gaming/comments/bmlywm/important_tips_for_steamplayprotondxvk_on_nvidia/). Apparently this is [limitation of the game](https://steamcommunity.com/app/489830/discussions/0/312265589446946685/). Shadow of Mordor cheerfully exceeds 60 FPS but only if the FPS cap and vsync are disabled in-game, so I suspect a modeline reporting issue of some sort. `xrandr --query` reports '59.95 + 144.00' as the refresh rate. + +I've always thought it best for the environment and my bank account to use a piece of technology until it was completely broken and unusable, but this purchase has definitely made me reconsider that position. Maybe it's finally time to jump on the raytracing bandwagon. + diff --git a/content/blog/newt.md b/content/blog/newt.md new file mode 100644 index 0000000..d351456 --- /dev/null +++ b/content/blog/newt.md @@ -0,0 +1,22 @@ ++++ +author = "Caleb Callaway" +date = 2017-12-31T05:30:07Z +description = "" +draft = false +slug = "newt" +title = "newt does I/O" + ++++ + + +newt recently hit the (long over-due) milestone of basic I/O. Examples: + +* https://github.com/cqcallaw/newt/blob/0.1/tests/t12000.nwt +* https://github.com/cqcallaw/newt/blob/0.1/tests/t12001.nwt + +The examples rely on place-holder standard library definitions, specified in https://github.com/cqcallaw/newt/blob/0.1/tests/includes/io.nwt + +Engineering the supporting functionality (recursive functions, using blocks, function overloads, import mechanisms, etc) took quite a bit of effort; memory management also chewed up a lot of time. I felt this cost was acceptable, since I've always intended for newt to be more than a proof-of-concept; I believe important data is lost in the implementation gaps and hand-waving that are frequently employed in POCs. + +Happy New Year! + diff --git a/content/blog/notes-on-agile-scrum.md b/content/blog/notes-on-agile-scrum.md new file mode 100644 index 0000000..19fb65e --- /dev/null +++ b/content/blog/notes-on-agile-scrum.md @@ -0,0 +1,23 @@ ++++ +author = "Caleb Callaway" +date = 2017-07-06T03:14:25Z +description = "" +draft = false +slug = "notes-on-agile-scrum" +title = "Notes on Agile/Scrum" + ++++ + + +Things I've learned or observed recently: + +* Some say Scrum is just what good managers do instinctively. This is likely true, but Scrum articulates a minimum viable process; **Scrum doesn't have optional components.** Strong customer advocacy (product owner), strong team and process advocacy (Scrum master), and a motivated, self-organizing team form a stable tripod; if any of those components is lacking, the structure falls over. +* Observing an organization's adherence to Scrum-like processes (even if the names don't match) strikes me as an excellent method for assessing the quality and health of an organization's management team. +* Big commitments are the sum of many small commitments; if the team can't make and meet small commitments, it won't be able to keep large ones either. +* Commitment implies an interrupt-hostile environment. +* Letting go of interrupts is hard. People won't like it, and will push back. +* Commitment to completion is a mutual agreement between the team and product owner. The team and product owner must both internalize this concept, or either one will become a source of interrupts. +* Completion of a story may require input from other teams or results from a long-running simulation, but these waiting-for-input states are not comprehended by story point estimates; this is the difference between story points and units of time. Overhead should be considered when sizing stories, however; if a story requires minimal effort but won't fit in a sprint due to fixed costs, it's worth discussing how to decompose it into smaller units of work. +* Assume customer requests are under-specified. The urge to shirk on acceptance criteria is strong; successful teams that delight customers will resist this urge. +* As a first-order approximation, the Product Owner and Scrum Master roles are full-time. Start there, then adapt if needed. + diff --git a/content/blog/parallel-computing-use-short-threads.md b/content/blog/parallel-computing-use-short-threads.md new file mode 100644 index 0000000..c03f698 --- /dev/null +++ b/content/blog/parallel-computing-use-short-threads.md @@ -0,0 +1,37 @@ ++++ +author = "Caleb Callaway" +date = 2019-05-22T08:13:23Z +description = "" +draft = false +slug = "parallel-computing-use-short-threads" +title = "Parallel Computing: Use Short Threads" + ++++ + + +Suppose you have a parallel compute engine capable of executing 64 work items in parallel. Let's also suppose you have work for this compute engine and each individual work item takes 32 seconds to complete. + +We can dispatch 2, 5, 18, or 64 such work items to our compute engine and expect the work to complete in roughly 32 seconds, plus some overhead for dispatch and result aggregation. As soon as we dispatch 65 work items, execution time *doubles* from 32 seconds to 64 seconds--more than 1 minute. Not great! + +| Work Items (32 second Runtime) | Runtime (64 Compute Lanes) | +|---------------------------------|----------------------------| +| 2 | ~32s | +| 5 | ~32s | +| 18 | ~32s | +| 64 | ~32s | +| 65 | ~64s | + +Let's try again, re-engineering our work items so they do less work but only take 4 seconds to complete. The raw number of computations is the same, so we'll have to dispatch 8x more of the work items. The compute engine's dispatcher will be working harder, but the dispatchers in parallel compute engines are designed for this and should hide the overhead well. From our previous example, 2 work items becomes 16 work items, 5 work items becomes 40, 18 work items goes to 144 work items, and 64 work items becomes 512; 65 work items becomes 520. + +16 of our shorter work items now run in about 4 seconds, as does 40 work items. 144 work items run in about 12 seconds; 144 work items * 4 seconds per work item / 64 compute lanes, rounded up to the nearest multiple of 4. 512 work items run in about 32 seconds, so we don't get any speedup for the 64 -> 512 case. 520 work items only take about 36 seconds, compare to the 64 seconds for the 32-second work items. That's almost twice as fast. + +| Work Items (4 second Runtime) | Runtime (64 Compute Lanes) | +|--------------------------------|----------------------------| +| 16 | ~4s | +| 40 | ~4s | +| 144 | ~12s | +| 512 | ~32s | +| 520 | ~36s | + +Not all workloads are reducible by 8x, and we can't reduce work item size indefinitely because dispatch costs will eventually dominate running time (the lower bound is orders of magnitude less than 4 seconds), but as a rule of thumb, shorter work items are better for computing in parallel. + diff --git a/content/blog/passwords-are-kind-of-awesome-actually.md b/content/blog/passwords-are-kind-of-awesome-actually.md new file mode 100644 index 0000000..2705929 --- /dev/null +++ b/content/blog/passwords-are-kind-of-awesome-actually.md @@ -0,0 +1,17 @@ ++++ +author = "Caleb Callaway" +date = 2019-09-30T11:53:48Z +description = "" +draft = false +slug = "passwords-are-kind-of-awesome-actually" +title = "Passwords Are Kind of Awesome, Actually" + ++++ + + +I love passwords. If I sprinkle in a few special characters, passwords can be aspirations, affirmations, mediations, or prayers. A favorite silly movie quote. Dreams for the future. The moment I fell in love. Something nobile, pure, or true. + +I don't use a password manager. I want the continual reminder that eventually becomes muscle memory. IT-mandated password rotations are wonderful; they force me to keep my focus points fresh. + +For real security, I enable [multi-factor authentication](https://www.nist.gov/itl/tig/back-basics-multi-factor-authentication)--but make sure it [isn't SMS](https://www.wired.com/2016/06/hey-stop-using-texts-two-factor-authentication/). For fun and profit, I keep typing my own ["correct horse battery staple"](https://www.xkcd.com/936/) + diff --git a/content/blog/presentations.md b/content/blog/presentations.md new file mode 100644 index 0000000..499e1d6 --- /dev/null +++ b/content/blog/presentations.md @@ -0,0 +1,23 @@ ++++ +author = "Caleb Callaway" +date = 2016-04-17T01:21:37Z +description = "" +draft = false +slug = "presentations" +title = "Presentations" + ++++ + + +To fulfil various academic requirements, I have given presentations on [newt](github.com/cqcallaw/newt). The first is a short presentation given at a Student Research Competition, and is a quick introduction to the philosophy of the language, with code samples that I believe to be very illustrative of the language's philosophy. + + + +The second presentation is older and somewhat out-of-date, but gives a greater level of detail. + + + diff --git a/content/blog/private-key-protection.md b/content/blog/private-key-protection.md new file mode 100644 index 0000000..7fd408c --- /dev/null +++ b/content/blog/private-key-protection.md @@ -0,0 +1,75 @@ ++++ +author = "Caleb Callaway" +categories = ["security"] +date = 2016-02-17T06:13:54Z +description = "" +draft = false +slug = "private-key-protection" +tags = ["security"] +title = "Private Key Protection" + ++++ + + +Obtaining access to an unencrypted copy of one's private key completely compromises the security of key-based SSH authentication, so the security of one's private key is foundational to securing this form of authentication. + +Private keys are commonly secured with a password-based encryption, but for some years now I have used a more physical means of securing access: my private key is stored on an encrypted flash drive which lives on my physical keyring and is detached from the computer when not in use. The private key is [symlinked](https://en.wikipedia.org/wiki/Symbolic_link) from a local folder so that it is not necessary to specify the identify file each time an SSH session is initiated. This configuration has the following advantages: + +* Enables transportation of a single private key between multiple workstations or laptops. Untrusted hosts will not be able to decrypt the flash drive without the passphrase. +* Disk encryption key can be very long and difficult to crack, mitigating the risk of compromise in the event of a stolen or lost USB key, and providing an additional layer of encryption that must be broken to access the private key. +* Theft of the flash drive is easily discovered, but recovering from theft does require one to store a duplicate flash drive in a secure location. + +#Howto +This guide is written for Ubuntu 14.04 LTS, where encryption of external drives is trivial. The guide likely applies to other Linux distributions as well. + +1. Obtain a USB flash drive. The drive should be compact and easily attached to a physical keyring; something like the [LaCie PetiteKey](http://www.lacie.com/products/usb-keys/petitekey/) works well. + +2. Generate a secure disk encryption passphrase with [pwgen](http://manpages.ubuntu.com/manpages/trusty/man1/pwgen.1.html). You will need this passphrase for each computer on which you wish to use the private key. + +3. Encrypt the USB flash drive. There are [many](https://help.ubuntu.com/community/EncryptedFilesystemsOnRemovableStorage) [tutorials](http://www.makeuseof.com/tag/create-secure-usb-drive-ubuntu-linux-unified-key-setup/) for this. I recommend giving the encrypted volume a meaningful name like `secure-key`. + + It's considered less secure to have the computer remember the encryption passphrase, but I recommend doing so if you directly control the computer. Remembering the encryption passphrase allows you to specify a longer, more secure passphrase than what you could reasonably enter by hand, and makes the process of activating key-based authentication much more streamlined and usable. + +4. In a [terminal](https://help.ubuntu.com/community/UsingTheTerminal), create a `.ssh` folder on the flash drive. In Ubuntu, this mount point will be `/media//`: + + $ mkdir /media/caleb/secure-key/.ssh + +5. Fix the directory permissions of the `.ssh` folder: + + $ chmod -R 0700 /media/caleb/secure-key/.ssh + +6. In a terminal, symlink your user's local `.ssh` folder to the `.ssh` folder on the encrypted flash drive: + + $ ln -sf /media/caleb/secure-key/.ssh /home/caleb/.ssh + + If you already have an existing `.ssh` folder with contents you wish to keep, simply rename the folder and move its contents after creating the symlink. + +7. Generate an SSH key if you don't already have one: + + $ ssh-keygen -t rsa + + Be sure to specify a strong passphrase when prompted to do so. I do not recommend caching this passphrase. + +8. Verify the key exists on your flash drive: + + $ ls -al /media/caleb/secure-key/.ssh + -rw------- 1 caleb caleb 751 Dec 4 2011 id_dsa + -rw-r--r-- 1 caleb caleb 610 Apr 19 2010 id_dsa.pub + -rw------- 1 caleb caleb 1766 Aug 4 2011 id_rsa + -rw-r--r-- 1 caleb caleb 410 Oct 22 2010 id_rsa.pub + + If all is well, you now have a physical layer of security for your SSH key. + +9. (Optional, but strongly recommended) [Make a backup](http://askubuntu.com/questions/318893/how-do-i-create-a-bit-identical-image-of-a-usb-stick) of the flash drive in case your primary key is lost or stolen. + +#Usage +To disable key-based authentication, simply remove the USB flash drive (safely or otherwise) and attach it to your key chain. To re-enable key-based authentication, re-attach the USB flash drive. You should be prompted for the disk-level encryption passphrase unless you elected to have your computer remember the encryption passphrase. + +#Environment Tweaks +Alternative windows managers may require some magic to automatically mount the encrypted flash drive. For instance, when using Gnome together with Xmonad, one must add the following line to `/usr/share/gnome-session/sessions/xmonad.session` + + ... + DesktopName=Unity + +However, doing so will cause another issue where the battery indicator will no longer be displayed. To get the battery indicator back, remove or disable the line `NotShowIn=Unity;` in the file `/etc/xdg/autostart/indicator-power.desktop`. + diff --git a/content/blog/proton-field-guide.md b/content/blog/proton-field-guide.md new file mode 100644 index 0000000..208eca1 --- /dev/null +++ b/content/blog/proton-field-guide.md @@ -0,0 +1,108 @@ ++++ +author = "Caleb Callaway" +date = 2020-10-08T06:28:13Z +description = "" +draft = false +slug = "proton-field-guide" +title = "A Proton Field Guide" + ++++ + + +[Proton](https://github.com/ValveSoftware/Proton/) is good. Backed by Valve Software, Proton builds on existing F/OSS projects like [Wine](https://www.winehq.org/) and [DXVK](https://github.com/doitsujin/dxvk) to provide Linux support for video games. Performance can be quite good; I've heard Proton out-performs poorly optimized native ports in some cases. + +# Initial Setup +Valve provides [beta support](https://steamcommunity.com/games/221410/announcements/detail/1696055855739350561) for a small selection of titles. Less formal support is available for a larger set of titles through the "Steam Play->Enable Steam Play for all other titles" option in the Settings dialog. + +![enable-steam-play](/blog/content/images/2020/10/enable-steam-play.png) + +Steam downloads the Windows version of a Proton-enabled app, but still stores files in the familiar `~/.steam/steam/steamapps/common/[appname]` directory. On first launch, Proton provisions a Wine prefix (created in `~/.steam/steam/steamapps/compatdata/[appid]` by default) that holds pertinent state data and a basic Windows filesystem in `$WINEPREFIX/pfx/drive_c`. This filesystem is presented to the app as its root Windows filesystem. Save games are usually stored in the prefix as well, making save game import [complicated](https://www.brainvitamins.net/blog/mass-effect-savegame-import/). + +Windows applications frequently have additional dependencies like the [Visual C++ Runtime Redistributables](https://support.microsoft.com/en-us/help/2977003/the-latest-supported-visual-c-downloads), and apps may require specific workarounds to run well in Wine environments. Many of these concerns are managed by the app's install script (usually `~/.steam/steam/steamapps/common/[appname]/installscript.vdf`), but `~/.steam/steam/legacycompat/iscriptevaluator.exe` also runs inside the Wine environment during initial provisioning and appears to manage Proton-specific dependencies and tweaks. + +# Provisioning New Apps +Games don't launch with Proton support, but official Proton releases aren't built for tinkering; community-sourced forks of the Proton project have flourished as a result. [Glorious Eggroll](https://github.com/GloriousEggroll/proton-ge-custom) is one popular fork that contains bleeding-edge app compatibility fixes, quality-of-life improvements, and a few workarounds that Valve doesn't enable for legal reasons. + +New apps generally don't work out-of-the-box; a bit of sleuthing is usually required to identify workarounds and missing dependencies. The Proton log is the first point of consult; this log can be [enabled per-app](https://www.reddit.com/r/linux_gaming/comments/9ahd3k/how_do_you_get_steam_to_output_logs_for_proton/e4venek?utm_source=share&utm_medium=web2x&context=3) or by launching Steam with `PROTON_LOG=1` in the environment (just be sure to close all open instances of Steam; the app's environment is derived from the environment in which Steam initially launches). I prefer the CLI, so I use something like this: + +``` +$ killall -w steam && PROTON_LOG=1 steam -applaunch [appid] +``` + +The Steam app ID can be found in the app's Steam URL. For example, 3DMark's URL is https://store.steampowered.com/app/223850/3DMark/ and its app ID is 223850. + +Once an app issue has been root-caused, [protonfixes](https://github.com/simons-public/protonfixes) (available as part of Proton-GE) is a handy framework for applying a fix. protonfixes relies heavily on [the protontricks wrapper for Winetricks](https://github.com/Matoking/protontricks), so common workarounds are generally available as [Winetricks verbs](https://github.com/Winetricks/Winetricks/blob/master/files/verbs/all.txt). For example, if my fun new game requires the VC++ 2015 redistributable, I would use this in `~/.config/protonfixes/localfixes/[appid].py`: + +``` +from protonfixes import util + +def main(): + util.protontricks('vcrun2015') +``` + +protonfixes supports more complicated workarounds as well; see [Writing Gamefixes](https://github.com/simons-public/protonfixes/wiki/Writing-Gamefixes) for details. + +# Provisioning Non-Steam Apps +Proton is mature enough to support apps outside of Steam, though setup is a bit more complicated. I prefer the CLI for such cases; Steam's builtin "Add a Non-Steam Game to My Library" tends to obscure important details. + +When Proton is launched from the command line, one must provide the `STEAM_COMPAT_DATA_PATH` env var and the `SteamGameId` env var must be set for Proton logging to function correctly. The game ID will necessarily be fake; pick whatever suits you. For example, to run a hypothetical FunGame's installer: + +``` +$ SteamGameId=FunGame STEAM_COMPAT_DATA_PATH=~/Games/FunGame/ ~/.steam/steam/compatibilitytools.d/Proton-5.9-GE-6-ST/proton waitforexitandrun ~/Downloads/fun_game_installer.exe +``` + +Then, with the game installed: + +``` +$ SteamGameId=FunGame STEAM_COMPAT_DATA_PATH=~/Games/FunGame/ ~/.steam/steam/compatibilitytools.d/Proton-5.9-GE-6-ST/proton waitforexitandrun ~/Games/FunGame/pfx/drive_c/fungame/install/path/fungame.exe +``` + +# Sleuthing Tips +Crafting a compatibility recipe requires some familiarity with Windows workloads and their requirements, but in general: + +* Some apps work better with older Proton releases; the version of Proton used can be controlled in the app's Properties dialog +* Hacking print statements into the proton script is useful for debugging startup issues (make a backup!) +* Dependency issues often show up in the Proton log (make sure SteamGameId is defined) +* Compatibilty reports from [protondb](https://www.protondb.com/) often include useful tips and tricks +* Take inspiration from officially supported apps where possible: + * Use `ps auxw | grep -i [app]` to identify the app PID, then run `strings /proc/[PID]/environ | sort` to view the process environment. + * Snoop iscriptevaluator.exe operation. For example, iscriptevaluator.exe consumes a file named `~/steam/debian-installation/legacycompat/evaluatorscript_223850.vdf` during 3DMark first run setup; this temporary file describes the procedure for installing VC++ Runtime and DirectX redistributables. +* Install scripts from [Lutris](https://lutris.net/) contain useful provisioning info +* Always test the final version of your recipe in a clean prefix + +# Example: GOG Galaxy + Wasteland 3 +I opted for the DRM-free version of Wasteland 3 so I could exercise Proton's advanced features: + +1. Download and [install](https://github.com/GloriousEggroll/proton-ge-custom#installation) the [Proton-5.9-GE-7-ST](https://github.com/GloriousEggroll/proton-ge-custom/releases/tag/5.9-GE-7-ST) release. +2. Download https://content-system.gog.com/open_link/download?path=/open/galaxy/client/setup_galaxy_2.0.16.187.exe (as seen in the [Lutris install script](https://lutris.net/games/install/17225/view)). +3. Create a compatibility recipe in `~/.config/protonfixes/localfixes/gog.py` (derived from various [protonfix gamefixes](https://github.com/simons-public/protonfixes/tree/master/protonfixes/gamefixes)): +``` +#pylint: disable=C0103 + +from protonfixes import util + +def main(): + util.protontricks('corefonts') + util.protontricks('mfc140') + util.protontricks('win10') +``` +4. Run the GOG Galaxy installer: +``` +$ SteamGameId='gog' STEAM_COMPAT_DATA_PATH=~/Games/GOG/ ~/.steam/steam/compatibilitytools.d/Proton-5.9-GE-7-ST/proton waitforexitandrun ~/Downloads/setup_galaxy_2.0.16.187.exe +``` +5. Click through the installer. You can launch GOG Galaxy immediately, or relaunch with: +``` +$ SteamGameId='gog' STEAM_COMPAT_DATA_PATH=~/Games/GOG/ ~/.steam/steam/compatibilitytools.d/Proton-5.9-GE-7-ST/proton waitforexitandrun ~/Games/GOG/pfx/drive_c/Program Files (x86)/GOG Galaxy/GalaxyClient.exe +``` +6. Disable the GOG in-game overlay (Settings->Game features->Overlay) to prevent in-game stutter +7. Install Wasteland 3 through the GOG Galaxy interface. +8. Launch and enjoy. One can also launch the game though CLI: + +``` +$ SteamGameId='gog' STEAM_COMPAT_DATA_PATH=~/Games/GOG/ ~/.steam/steam/compatibilitytools.d/Proton-5.9-GE-7-ST/proton waitforexitandrun ~/Games/GOG/pfx/drive_c/Program\ Files\ \(x86\)/GOG\ Galaxy/Games/Wasteland\ 3/WL3.exe +``` + +![Screenshot-from-2020-10-09-10-39-29](/blog/content/images/2020/10/Screenshot-from-2020-10-09-10-39-29.png) + +As of this writing, Wasteland 3 will eventually terminate with an error, "eventfd: Too many open files" (observed while launching the game directly). The existing [workaround](https://askubuntu.com/questions/1182021/too-many-open-files) hasn't worked around the issue. + diff --git a/content/blog/running-vulkan-conformance-tests.md b/content/blog/running-vulkan-conformance-tests.md new file mode 100644 index 0000000..d5fe3c1 --- /dev/null +++ b/content/blog/running-vulkan-conformance-tests.md @@ -0,0 +1,22 @@ ++++ +author = "Caleb Callaway" +date = 2019-05-06T10:17:16Z +description = "" +draft = false +slug = "running-vulkan-conformance-tests" +title = "Running Vulkan Conformance Tests on Ubuntu 18.04" + ++++ + + +A random weekend project, as a series of shell commands: + +``` +$ sudo apt install cmake libvulkan +$ cd ~/src && git clone https://github.com/KhronosGroup/VK-GL-CTS.git && cd VK-GL-CTS +$ mkdir build && cd build +$ cmake .. -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_FLAGS=-m64 -DCMAKE_CXX_FLAGS=-m64 +$ make -j +$ ./deqp-vk --deqp-caselist-file=/home/caleb/src/VK-GL-CTS/external/vulkancts/mustpass/master/vk-default.txt --deqp-log-images=disable --deqp-log-shader-sources=disable --deqp-log-flush=disable +``` + diff --git a/content/blog/sequence-to-coordinates.md b/content/blog/sequence-to-coordinates.md new file mode 100644 index 0000000..9b2edd7 --- /dev/null +++ b/content/blog/sequence-to-coordinates.md @@ -0,0 +1,29 @@ ++++ +author = "Caleb Callaway" +date = 2019-02-15T10:46:35Z +description = "" +draft = false +slug = "sequence-to-coordinates" +title = "Sequence to Coordinates" + ++++ + + +I've encountered problems at work and in interviews can be reduced to a problem of translating a integer value (0, 1, 2, 3...n) into [2D coordinates](https://en.wikipedia.org/wiki/Two-dimensional_space). Iterative approaches usually aren't applicable because one generally gets a single sequence number without a loop context. For example, it's often necessary to map an OpenCL work item (identified by an [integer ID](https://www.khronos.org/registry/OpenCL/sdk/1.0/docs/man/xhtml/get_global_id.html)) to a region of the image on which the work item will operate. + +To solve this, one needs to know the width and height of the coordinate space, and the walk direction. For a horizatonal walk, X values increase first: (0,0) (1,0) (2,0) (3,0) etc; for a vertical walk, Y values increase first: (0,0) (0,1) (0,2) (0,3) and so forth. + +I solve this as follows: the coordinate that corresponds to the walk dimension is computed by taking the sequence number [modulo](https://en.wikipedia.org/wiki/Modulo_operation) the maximum walk value (that is, the height or width). The other component is the [integer division](https://en.wikipedia.org/wiki/Division_(mathematics)#Of_integers) of the sequence number and the maximum walk value. + +More concretely, the X coordinate for a horizontal walk is the integer sequence number modulo the width of the coordinate space, and Y coodinate is the sequence number integer divided by the width of the coordinate space. For a vertical walk, the X coordinate is the sequence number integer divided by the height of the coordinate space; the Y coordinate is the sequence number modulo the height of the coordinate space. + +Python 3 code: + +``` +def horizontal_walk(total_coords, width): + return [(i % width, int(i / width)) for i in range(total_coords)] + +def vertical_walk(total_coords, height): + return [(int(i / height), i % height) for i in range(total_coords)] +``` + diff --git a/content/blog/smart-pointer-copy-performance.md b/content/blog/smart-pointer-copy-performance.md new file mode 100644 index 0000000..65150f3 --- /dev/null +++ b/content/blog/smart-pointer-copy-performance.md @@ -0,0 +1,87 @@ ++++ +author = "Caleb Callaway" +date = 2019-05-09T02:45:55Z +description = "" +draft = false +slug = "smart-pointer-copy-performance" +title = "Smart Pointer Copy Performance" + ++++ + + +A casual Google search for [smart pointer](https://docs.microsoft.com/en-us/cpp/cpp/smart-pointers-modern-cpp?view=vs-2019) performance yields [analysis of shared vs. "raw" pointers](http://blog.davidecoppola.com/2016/10/performance-of-raw-pointers-vs-smart-pointers-in-cpp/), which concludes that there is no appreciable performance penalty for using smart pointers. The access, allocation, and deallocation findings seem reasonable; the data supports the use of [std::make_shared](https://en.cppreference.com/w/cpp/memory/shared_ptr/make_shared) for best performance. + +I'm not convinced by the copy results, however; the atomic operations required to increment and decrement ref counts on shared pointers are expensive (see https://stackoverflow.com/a/2783981/577298 and https://htor.inf.ethz.ch/publications/img/atomic-bench.pdf). + +To support my skepticism, I've written a small benchmark that measures the performance of raw pointers passed to iterative and recursive increment functions. The same tests are then repeated with smart pointers. This benchmark is designed to mimic the existing test and also simulate a deeply nested call stacks in which smart pointers seem to shine because it's difficult to reason about resource lifetime. + +Results: + +``` +Raw iterative ops: 8192 +Raw iterative test repetitions: 16384 +Raw iterative results: + Min: 795 ns (0.0970459 ns per op) + Max: 53083 ns (6.47986 ns per op) + Average: 1285.32 ns (0.156899 ns per op) + +Raw recursion ops: 8192 +Raw recursion test repetitions: 16384 +Raw recursive results: + Min: 794 ns (0.0969238 ns per op) + Max: 20638 ns (2.51929 ns per op) + Average: 863.515 ns (0.10541 ns per op) + +SP iterative ops: 8192 +SP iterative test repetitions: 16384 +SP iterative results: + Min: 12301 ns (1.50159 ns per op) + Max: 44715 ns (5.45837 ns per op) + Average: 12627.1 ns (1.54139 ns per op) + +SP recursion ops: 8192 +SP recursion test repetitions: 16384 +SP recursive results: + Min: 27654 ns (3.37573 ns per op) + Max: 110610 ns (13.5022 ns per op) + Average: 29545.5 ns (3.60663 ns per op) +``` + +Compiled with GCC 7.4.0 using the same compiler flags as the existing data (`-O3 -s -Wall -std=c++11`) and tested on Ubuntu 18.04 with an i7-6900K CPU @ 3.20GHz and 64 GB of memory. + +Smart pointers are significantly slower than raw pointers for both tests; disabling compiler optimizations (`-O0`) makes the delta between raw and SP performance even more obvious: + +``` +Raw iterative ops: 8192 +Raw iterative test repetitions: 16384 +Raw iterative results: + Min: 18687 ns (2.28113 ns per op) + Max: 100941 ns (12.3219 ns per op) + Average: 22257 ns (2.71691 ns per op) + +Raw recursion ops: 8192 +Raw recursion test repetitions: 16384 +Raw recursive results: + Min: 29396 ns (3.58838 ns per op) + Max: 115340 ns (14.0796 ns per op) + Average: 31549.7 ns (3.85128 ns per op) + +SP iterative ops: 8192 +SP iterative test repetitions: 16384 +SP iterative results: + Min: 312024 ns (38.0889 ns per op) + Max: 510200 ns (62.2803 ns per op) + Average: 362119 ns (44.2039 ns per op) + +SP recursion ops: 8192 +SP recursion test repetitions: 16384 +SP recursive results: + Min: 410023 ns (50.0516 ns per op) + Max: 576959 ns (70.4296 ns per op) + Average: 414347 ns (50.5795 ns per op) +``` + +[My code is available](https://github.com/cqcallaw/sp-benchmark) + +*Thanks to Rafael Barbalho for inspiring this experiment and to Matthew Lawson for proofreading and feedback* + diff --git a/content/blog/static-types-beneficial.md b/content/blog/static-types-beneficial.md new file mode 100644 index 0000000..3f7843c --- /dev/null +++ b/content/blog/static-types-beneficial.md @@ -0,0 +1,19 @@ ++++ +author = "Caleb Callaway" +date = 2019-02-17T02:23:09Z +description = "" +draft = false +slug = "static-types-beneficial" +title = "Static Type Checking Considered Beneficial" + ++++ + + +Reliance on runtime type checking places the burden of type checking on fallible, stressed, distracted human beings in much the same way that [manual memory management](https://en.wikipedia.org/wiki/Manual_memory_management) does. It's easy to believe that elitism plays a role too ("*real* programmers don't need type checking tools..."). Writing a leak-free program is *possible* without automated leak checks, and it's certainly *possible* to write type-correct code without automated type checks, but the chances of success are extraordinarily low and the cognitive load grows exponentially with the size of the code base. + +For these reasons, I'm supremely disappointed by this section of [Python's type hints enhancement](https://www.python.org/dev/peps/pep-0484/#rationale-and-goals): + +> It should also be emphasized that **Python will remain a dynamically typed language, and the authors have no desire to ever make type hints mandatory, even by convention.** + +I like a lot of things about Python, but this isn't one of them. Many hours of over-confident developer time could have been saved, particularly for novices who might not be savvy enough to seek out tools like [MyPy](http://mypy-lang.org/index.html). + diff --git a/content/blog/straight-edges-part-2.md b/content/blog/straight-edges-part-2.md new file mode 100644 index 0000000..2dfaa4c --- /dev/null +++ b/content/blog/straight-edges-part-2.md @@ -0,0 +1,19 @@ ++++ +author = "Caleb Callaway" +date = 2020-07-25T17:46:02Z +description = "" +draft = false +slug = "straight-edges-part-2" +title = "Straight Edges Part 2" + ++++ + + +I continue to enjoy [shaving with a straigh edge razor](https://www.brainvitamins.net/blog/straight-edges/). A few additional discoveries: + +* A sharp razor can shave arm hair without lather or discomfort. If the razor pulls on the hair or fails to cut it, it's time to sharpen the razor. +* Make sure to use the sharpening tools properly (read the instructions). I tend to use too little water on my waterstone; it should be nicely soaked +* Shaving against the grain hasn't triggered ingrowth of hair for me (though my hair isn't very curly). +* Proper illumination can really help when searching for missed patches. +* [Proraso After Shave Lotion](https://www.amazon.com/gp/product/B0085UECY2/) is excellent. + diff --git a/content/blog/straight-edges.md b/content/blog/straight-edges.md new file mode 100644 index 0000000..afabfab --- /dev/null +++ b/content/blog/straight-edges.md @@ -0,0 +1,21 @@ ++++ +author = "Caleb Callaway" +date = 2020-05-02T05:06:33Z +description = "" +draft = false +slug = "straight-edges" +title = "Straight Edges" + ++++ + + +My [shelter-in-place](https://covid19.ca.gov/stay-home-except-for-essential-needs/) impulse buy was a straight razor kit. I'm still learning how to shave comfortably, but the shaves are quite smooth. Now that the initial anxiety about cutting myself has faded, the process is a lot of fun. + +The Art of Manliness [guide](https://www.artofmanliness.com/articles/how-to-straight-razor-shave/) is a good intro, to which I'd add the following notes: +* As long as I'm reasonably careful, cuts aren't nearly as painful as I expected them to be. +* Make sure to understand the correct angle; 30 degrees is probably closer than is strictly comfortable at first. If you experience a lot of discomfort but don't cut yourself, the angle is probably wrong; the razor is likely tugging at your hair instead of slicing it. +* Use the lightest touch possible; for me, it helps to think of the razor as just barely kissing the skin. +* I've read elsewhere that shaving against the grain isn't advisable if you have curly facial hair. +* Developing left-hand dexterity for hard-to-reach spots is well worth it. +* You haven't really lived until you've shaved your bikini zone with a straight edge. + diff --git a/content/blog/sum-types-continued.md b/content/blog/sum-types-continued.md new file mode 100644 index 0000000..a7dfd83 --- /dev/null +++ b/content/blog/sum-types-continued.md @@ -0,0 +1,39 @@ ++++ +author = "Caleb Callaway" +categories = ["newt"] +date = 2016-02-01T09:36:28Z +description = "" +draft = false +slug = "sum-types-continued" +tags = ["newt"] +title = "Sum Types, continued" + ++++ + + +Regarding [yesterday's post about sum types](https://www.brainvitamins.net/blog/sum-types-structural-vs-nominal/), co-conspirator Lao observed that the verbosity of structural sum types could be overcome by allowing structural sum types to be aliased, e.g.: +``` +sum number { + int + | double +} + +t:number = 7 +``` + +Another issue raised by co-conspirator Lao is the interaction of using widening conversions and function overloads. For example: +``` +# declare an overloaded function +f:= { + ((int|double)) -> unit {} + & ((int|string)) -> unit {} +} + +f(7) # invocation is compatible with both overloads +``` +Using type aliases does not address this ambiguity, since the integer argument can be widened to either type. It appears necessary to declare this particular invocation ambiguous, and require the programmer to explicitly widen the argument to the appropriate type. Attempting to widen to the "widest" conversion seems complicated and error-prone. + +It has also been observed that sum types are a collection of _types_, not symbols, and thus distinct from namespaces, which can contain both types and symbols. + +Finally, if record types (i.e. structs) without members are permitted, enums could be expressed as an aliased structural sum type. Ideally, the empty struct members would be namespaced such that the surrounding namespace was not polluted. + diff --git a/content/blog/sum-types-structural-vs-nominal.md b/content/blog/sum-types-structural-vs-nominal.md new file mode 100644 index 0000000..1126e04 --- /dev/null +++ b/content/blog/sum-types-structural-vs-nominal.md @@ -0,0 +1,83 @@ ++++ +author = "Caleb Callaway" +categories = ["newt"] +date = 2016-01-31T05:56:54Z +description = "" +draft = false +slug = "sum-types-structural-vs-nominal" +tags = ["newt"] +title = "Sum Types: Nominal vs. Structural" + ++++ + + +[newt](https://github.com/cqcallaw/newt/) is strongly influenced by functional programming languages, so it's no accident that many of the language constructs are copied or adapted from functional programming. No existing paradigm is sacrosanct, however, so even the implementation of classic FP constructs should be considered carefully. + +One recent and long-running design discussions is centered around the implementation of sum types (i.e. [tagged unions](https://en.wikipedia.org/wiki/Tagged_union)). This type-safe and succinct method for describing a value that is one of several variants is well-aligned with the language design goals. The common implementation of sum types identifies the variants of the type with a _tag_, which is simply a name for one of the variants. This makes the sum type a _nominal_ type, where equivalence is determined by the name. Rust's [enums](https://doc.rust-lang.org/book/enums.html) are a fine example of this nominal approach: + +``` +enum Message { + Quit, + ChangeColor(i32, i32, i32), + Move { x: i32, y: i32 }, + Write(String), +} +``` + +This snippet defines a Message type that can be a Quit message with no associated data, a ChangeColor message that contains a tuple of integers representing RGB color value, a Move message with associated Cartesian coordinates, or a Write message with an associated string value. Instances are created with a type constructor as follows: + +``` +let x: Message = Message::Move { x: 3, y: 4 }; +``` + +To use a value of a sum type, the specific variant is [matched by name](https://doc.rust-lang.org/book/match.html#matching-on-enums) before use: + + match msg { + Message::Quit => quit(), + Message::ChangeColor(r, g, b) => change_color(r, g, b), + Message::Move { x: x, y: y } => move_cursor(x, y), + Message::Write(s) => println!("{}", s), + }; + +Performing computations with values of a sum type without first performing a `match` decomposition is usually a semantic error. For example, adding to values of type `Message` together is nonsensical, but adding two values of type `Message::Move` could be interpreted as vector addition. + +So far the construct is regular and coherent (although I strongly disagree with the use of the keyword `enum`), but the syntax feels a bit heavy for simple cases. For many simple sum types (such as the Message example from Rust), the variants are each of different types, and can be matched by the _structure_ of the type instead of its name. In small examples, the gains in succinctness can be significant, as illustrated by the following hypothetical newt snippets: + +``` +# nominal approach +sum number { + discrete:int + | continuous:double +} +t:= number::discrete(7) +``` + +``` +# structural approach +t:(int|double)= 7 +``` +This succinctness is offset by verbosity when specifying struct members or function return types: + +``` +# nominal +f:= (a:int) -> number { } +f:= (a:double) -> number { } +``` +``` +# structural +f:= (a:int) -> (int|double) { } +f:= (a:double) -> (int|double) { } +``` + +The verbosity increases with each additional structural variant. Type inference solves the double verbosity of function return types and variable types, but does not address the verbosity of multiple function definitions with the same return type. + +Even so, the nominal typing still feels verbose. My current thinking is to require nominal typing, but allow type inference of the variant where possible. For example, the `int` variant can unambiguously be inferred as follows: + +``` +sum number { + discrete:int + | continuous:double +} +t:number= 7 # t will be of type number::discrete +``` + diff --git a/content/blog/swedish-pancakes.md b/content/blog/swedish-pancakes.md new file mode 100644 index 0000000..3c150da --- /dev/null +++ b/content/blog/swedish-pancakes.md @@ -0,0 +1,28 @@ ++++ +author = "Caleb Callaway" +categories = ["cooking"] +date = 2020-04-03T22:08:45Z +description = "" +draft = false +slug = "swedish-pancakes" +tags = ["cooking"] +title = "Swedish Pancakes" + ++++ + + +I'll spare you the traditional long-winded recipe sales pitch and simply report we've eaten these pancakes with maple syrup almost daily for two months and haven't tired of it. + +For 2 large pancakes: + +* 1/2 cup milk +* 2 eggs +* 1/2 cup flour +* 1/4 teaspoon salt +* 1 tablespoon melted butter or vegetable oil +* 1 teaspoon vanilla extract + +Combine the milk and eggs first, then add the rest of the ingredients. Blend until very smooth. The mixture will be very runny; the cooked pancakes are very thin. + +Cook on a well-greased griddle on medium heat; don't over-cook. Top with any traditional pancake or crepe toppings; maple syrup or blueberry sauce and whipped cream work well. + diff --git a/content/blog/the-dweb-needs-work.md b/content/blog/the-dweb-needs-work.md new file mode 100644 index 0000000..2400d56 --- /dev/null +++ b/content/blog/the-dweb-needs-work.md @@ -0,0 +1,57 @@ ++++ +author = "Caleb Callaway" +categories = ["security", "cryptocurrency", "dweb"] +date = 2020-09-25T10:48:50Z +description = "" +draft = false +slug = "the-dweb-needs-work" +tags = ["security", "cryptocurrency", "dweb"] +title = "The Dweb Needs Work" + ++++ + + +[Balaji Srinivasan](https://balajis.com/) has been evangalizing a decentralized Internet lately, which inspired me to explore [IPFS](https://ipfs.io/) + [ENS](https://app.ens.domains/) as a means of realizing a more resilient and censorship-resistent Internet. Hybrid solutions exist, but I believe decentralization needs work before it's ready for prime time. Updates cost too much, peer relationships are brittle, and authenticity seems largely unmapped. + +# The Basics +This blog post does not replace [the ipfs.io docs](https://docs.ipfs.io/), which are good. Newcomers to IPFS would be particularly well-served by [What is IPFS?](https://docs.ipfs.io/concepts/what-is-ipfs/) and [the CLI quickstart](https://docs.ipfs.io/how-to/command-line-quick-start/). + +# The Name of the Thing +IPFS Content Identifiers (CIDs) are long strings of alphanumeric characters less memorable than a corporate Twitter account, necessitating a map to human-readable names. ENS seems to be [the emerging standard](https://docs.ipfs.io/how-to/websites-on-ipfs/link-a-domain/). Anyone who's used a domain name registrar will have an intuition for how this works; specify a name, pay a fee, wait, and properly configured browsers start resolving your name. If you've got an IPFS node [setup](https://github.com/ipfs-shipyard/ipfs-desktop#install) and the [IFPS companion](https://github.com/ipfs-shipyard/ipfs-companion#ipfs-companion) installed, try accessing http://www.brainvitamins.eth for the Dweb version of my homepage. + +Updates to ENS are _expensive_, though. Holy cow. The [public record](https://etherscan.io/address/0xf7318ac0253b14f703d969483ff2908b42b261cc) of my neophyte stumbling starts with a seemingly sufficient [transfer](https://etherscan.io/tx/0x192bce8910abd0ce5ebee5c0fbc86c843180aaa1b41baba6e9129fbf1daee6b3) of funds, quickly followed by [a second transfer](https://etherscan.io/tx/0x0224f10a3f3a839cadb4d227f5f0b3520a3e8f6970a2ac2b2559e5cd00374251), then a [third](https://etherscan.io/tx/0xabd014496180ffc79549b44d805eb6be0f1ee4934c2d0e8074eeb3209fa400c9) when the true cost of transaction fees is made manifest. As I write this, the registration fee is convertable to slightly north of 10 USD. The transaction fees for each record update are worth $2-$3. + +The price of ENS updates may seem reasonable until one notes that every website update requires an ENS update, because everything in IPFS is stored in [Merkle DAGs](https://docs.ipfs.io/concepts/merkle-dag/), beautiful structures that might inspire site reliability engineers to write poetry and Dweb bloggers to form anti-poetry unions. In the pure and stateless world of Merkle DAGs, every change generates a new DAG. The size and scope of the change matters not; a single-character typo correction and a new blog post both require a new DAG, and every new DAG means a new root node to which the ENS record must point. The high cost of transactions is [a known issue](https://twitter.com/VitalikButerin/status/1285593115672358912); I don't expect ENS to be widely adopted until this issue is resolved. + +If we move our decentralization goalposts, services like [Fleek](https://docs.ipfs.io/how-to/websites-on-ipfs/introducing-fleek/) can help. Fleek hosting workflows depend on centralized services like Github, so it's a hybrid solution that isn't fully censorship-resistant. Hybrid solutions might be an acceptable tradeoff; I'd want a damn good spellchecker if every misuse of "it's" as a possessive pronoun could cost me $5 to fix. + +# Good Peers are Hard to Find +Browsing my Dweb homepage from the local host works. Browsing my Dweb homepage from my LAN-connected laptop works, once the laptop's IPFS service discovers the local peer. Browsing my Dweb homepage from Sweden does not work (so far). The IPFS nodes can't even `ipfs ping` each other. + +The Swedish failure to access doesn't surprise me; discovery in a peer-to-peer network is a hard problem. Even if one has the technical skills to properly configure [firewall, NAT, and port-forwarding](https://docs.ipfs.io/how-to/nat-configuration/), ISPs often hand out dynamic IP address assignments, which means an update to the [DHT](https://docs.ipfs.io/concepts/dht/) must propagate to all interested nodes every time the utility company cuts power. + +IPv6 ought to help but doesn't; ISPs can delegate prefixes dynamically. A static prefix delegation from a service like Hurricane Electric's legitimately awesome [tunnelbroker.net](https://tunnelbroker.net/) solves the problem for those with the relevant skillset, though such a solution is limited to IPv6; IPv4 peers need not apply. Folks that require immediate access to my cleverly captioned cat pictures might also want to [modify their peers list](https://docs.ipfs.io/how-to/modify-bootstrap-list/). + +The solution on which I'm converging is a LAN-attached IPFS node with a publicly routable IP address in which my files are pinned and from which other local hosts fetch data as needed. My uptime is generally lousy, but likely sufficient for aficionados of captioned cat picture. For those with less technical inclination or stronger availability requirements, pinning services like [Pinata](https://pinata.cloud/) solve the availability problem by [pinning](https://docs.ipfs.io/concepts/persistence/) data on publicly accessible nodes; this is another example of a hybrid solution with centralized components. + +# Infosec +Authenticity, confidentiality, and integrity are traditionally addressed (with varying degrees of success) by [TLS](https://en.wikipedia.org/wiki/Transport_Layer_Security). The Dweb's content-addressing model should address any integrity concerns; content tampering is instantly detectable as a change to the CID (if not, every user of distributed version control will have an extremely bad day). [Encrypted transport](https://blog.ipfs.io/2020-08-07-deprecating-secio/) should resolve any confidentially issues. Authenticity seems to be an area of [active research](https://www.researchgate.net/publication/325819333_IPFS-Blockchain-based_Authenticity_of_Online_Publications); applications of [PGP](https://en.wikipedia.org/wiki/Pretty_Good_Privacy) signing are obvious, though [webs of trust](https://en.wikipedia.org/wiki/Web_of_trust) seem beyond the ken of Joe and Jane User. Something akin to SSH's "do you want to trust this server?" prompt might work. [InstaNotary](https://github.com/rekpero/InstaNotary) could be interesting if a desktop app is made available. + +User tracking isn't really a thing, either. As the Dweb search engine [Almonit](http://almonit.eth.link/) says: + +``` +How does Almonit search engine protects [sic] your privacy? +The right question to ask is actually “Can Almonit search engine violate your privacy?”, and the answer is 'No'. The search process is done without any interaction with us or any other third party, hence we have no way to know what you search for. +``` + +On the other hand, there is no "right to be forgotten" on the Dweb. That's a good thing for folks fighting content censorship and a less good thing for folks fighting child pornography. The Dweb protects the extra-naughty as much as it protects the extra-nice. + +Folks clamoring for an edit button on Twitter will have a bad time on the Dweb as well; all potentially embarrassing political opinions are preserved like ants in amber. + +# The Solution Space +The technical hurdles described above would include solutions if I was clever enough to have solved them. The difficulty of maintaining robust peer-to-peer routes is a particularly thorny problem with [old solutions](https://en.wikipedia.org/wiki/STUN). A hybrid solution may be necessary. I'm most encouraged by the work done to establish Dweb authenticity, though the work I've seen depends on the Ethereum block-chain and is therefore limited by Ethereum gas prices. All eyes on the Ethereum community to get that one under control. + +I also wonder if the content distribution mechanisms developed for a centralized web are simply unsuitable for the Dweb; maybe we're better served by new models that "lean in" to the Dweb's stateless eventual propagation. Maybe Martians will remember weblogs as historical curiosities. + +_If you enjoyed this post and would like to support the blog, please feel free to tip me at Ethereum address 0xf7318Ac0253B14f703D969483fF2908b42b261cc_ + diff --git a/content/blog/the-great-filter.md b/content/blog/the-great-filter.md new file mode 100644 index 0000000..4081274 --- /dev/null +++ b/content/blog/the-great-filter.md @@ -0,0 +1,17 @@ ++++ +author = "Caleb Callaway" +date = 2020-08-15T17:39:52Z +description = "" +draft = false +slug = "the-great-filter" +title = "A Confusion of Languages" + ++++ + + +Given the multitude of stars in the sky and what we've observed about the orbiting planets, why haven't we observe alien life? [The Great Filter](https://www.youtube.com/watch?v=UjtOGPJ0URM) is one possible explanataion; perhaps there's some tremendous physical or social hurdle to interstellar travel which no alien life has ever cleared. + +When I learned of the Great Filter hypothesis, I immediately wondered if the Tower of Babel story (https://biblegateway.com/passage/?search=Genesis+11%3A1-9&version=NIV) describes an ancient failure to pass the Filter. A "tower that reaches to the heavens" sounds like interstellar aspirations, particularly when "nothing they plan to do will be impossible for them." + +Regardless, confusion of language strikes me as a fine description of how human endeavor and social cohesion is commonly attacked. When language is confused and ambiguous, people are driven apart. + diff --git a/content/blog/tortillas-from-scratch.md b/content/blog/tortillas-from-scratch.md new file mode 100644 index 0000000..d3fc5a4 --- /dev/null +++ b/content/blog/tortillas-from-scratch.md @@ -0,0 +1,17 @@ ++++ +author = "Caleb Callaway" +date = 2020-01-13T07:04:59Z +description = "" +draft = false +slug = "tortillas-from-scratch" +title = "Tortillas from Scratch" + ++++ + + +[No money can buy you a better Coke](https://kottke.org/10/10/andy-warhol-on-coca-cola), but I'm convinced the best corn tortillas are fresh off the griddle at home. Two ingredients ([masa harina](https://recipes.howstuffworks.com/what-is-masa-harina.htm) and water), 1 minute cook time, and I am quite addicted. + +Fresh tortillas are delicious plain or spread with butter. Sprinkle cinnamon sugar on top of the butter for a sweet snack; cheese melted and mixed with refried beans makes a heartier filling. + +I've heard that day-old tortillas are fed to the pigs in Mexico, and I believe it; the flavor fades fast. Not recommended for leftovers. + diff --git a/content/blog/what-are-computers.md b/content/blog/what-are-computers.md new file mode 100644 index 0000000..7d119fd --- /dev/null +++ b/content/blog/what-are-computers.md @@ -0,0 +1,21 @@ ++++ +author = "Caleb Callaway" +date = 2019-07-11T21:49:34Z +description = "" +draft = false +slug = "what-are-computers" +title = "What are Computers?" + ++++ + + +I was recently challenged to describe computers in non-technical terms; after some thought, here's what I'd say: + +Computers are machines (just like drill presses, steam locomotives, or a toaster oven) built to manifest the rules of some wonderful mathematical models developed in the 1950s, 60s, and 70s. Most computers in use today are _general purpose_ computers, which can perform any conceivable computation, not just 2 + 2 or the square root of pi[^1]. General-purpose computers have the tremendous power to _modify the rules by which they compute_, so a single general-purpose computer is not just a single machine, but an infinity of potential machines. + +This infinity of machines ("programability") enables us to associate a given machine or computational result with any meaning we can imagine. This is precisely what happens when one taps the screen of an iPhone to play a Netflix movie, but this barely scratches the surface of progammability's fractal nature. We describe some machines as hosts to subordinate machine infinities, each given tiny slices of computational time on the host machine and sharing resources in an invisble but exactingly precise dance. Other machines are enable creation and transmission of new subordinate machines ("programs"). Still other machines store computational results so they persist even after the computer has ceased to operate. Each and every one of these infinities is containable in a single physical device. + +For me, the power and beauty of computers is in this vast canvas of programmability. We hold in the palm of our hands the ability to craft systems, simulations, even entire imaginary universes, then share our creations with our fellows. This infinity of infinities is not without its difficulties and perils, but I hold the richness of this experience to be well worth it. + +[^1]: Though there are no guarantees about how long one must wait for a given computation to complete! +