2:I[8000,["576","static/chunks/576-ca9d6d6dcb1419bc.js","986","static/chunks/986-de568bb4a9a5e8b9.js","308","static/chunks/app/blog/%5Bslug%5D/page-8c6bdf8301646b8b.js"],"default"] 4:I[6102,[],""] 6:I[2609,[],""] 7:I[885,["717","static/chunks/b8bc3934-ee45afae3256a18f.js","404","static/chunks/43abe07a-b2dc54c2320d6a3e.js","576","static/chunks/576-ca9d6d6dcb1419bc.js","986","static/chunks/986-de568bb4a9a5e8b9.js","185","static/chunks/app/layout-c32b7cdbec41233c.js"],"default"] 8:I[6196,["576","static/chunks/576-ca9d6d6dcb1419bc.js","986","static/chunks/986-de568bb4a9a5e8b9.js","308","static/chunks/app/blog/%5Bslug%5D/page-8c6bdf8301646b8b.js"],""] 9:I[5619,["717","static/chunks/b8bc3934-ee45afae3256a18f.js","404","static/chunks/43abe07a-b2dc54c2320d6a3e.js","576","static/chunks/576-ca9d6d6dcb1419bc.js","986","static/chunks/986-de568bb4a9a5e8b9.js","185","static/chunks/app/layout-c32b7cdbec41233c.js"],"default"] 5:["slug","a-story-on-how-we-optimized-things-for-performance","d"] 0:["jJX8gC_fpWuPRZPjXcR-X",[[["",{"children":["blog",{"children":[["slug","a-story-on-how-we-optimized-things-for-performance","d"],{"children":["__PAGE__?{\"slug\":\"a-story-on-how-we-optimized-things-for-performance\"}",{}]}]}]},"$undefined","$undefined",true],["",{"children":["blog",{"children":[["slug","a-story-on-how-we-optimized-things-for-performance","d"],{"children":["__PAGE__",{},[["$L1",["$","$L2",null,{"title":"A story on how we optimized things for performance","children":[["$","script",null,{"type":"application/ld+json","suppressHydrationWarning":true,"dangerouslySetInnerHTML":{"__html":"{\"@context\":\"https://schema.org\",\"@type\":\"BlogPosting\",\"headline\":\"A story on how we optimized things for performance\",\"datePublished\":\"2024-10-06\",\"dateModified\":\"2024-10-06\",\"description\":\"In a world where everything moves fast, you can't ignore the significance of performance.\",\"url\":\"https://aelpxy.dev/blog/a-story-on-how-we-optimized-things-for-performance\",\"author\":{\"@type\":\"Person\",\"name\":\"aelpxy\"}}"}}],["$","h2",null,{"className":"text-xl py-6 tracking-tighter text-stone-100 font-mono","children":[["$","span",null,{"className":"select-none","children":"`"}],"/blog/","a-story-on-how-we-optimized-things-for-performance",["$","span",null,{"className":"select-none","children":"`"}]]}],["$","div",null,{"className":"flex justify-between items-center mb-8 text-sm","children":["$","p",null,{"className":"text-sm text-stone-400","children":"October 6, 2024"}]}],["$","article",null,{"className":"prose","children":"$L3"}]]}],null],null],null]},[null,["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children","blog","children","$5","children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L6",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":"$undefined","notFoundStyles":"$undefined"}]],null]},[null,["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children","blog","children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L6",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":"$undefined","notFoundStyles":"$undefined"}]],null]},[[[["$","link","0",{"rel":"stylesheet","href":"/_next/static/css/70ecd251ff2c2a46.css","precedence":"next","crossOrigin":"$undefined"}]],["$","html",null,{"lang":"en","children":["$","body",null,{"className":"__className_4b8806 __variable_d94f73 selection:text-stone-900 selection:bg-stone-100 min-h-screen antialiased","children":[["$","$L7",null,{}],["$","$L4",null,{"parallelRouterKey":"children","segmentPath":["children"],"error":"$undefined","errorStyles":"$undefined","errorScripts":"$undefined","template":["$","$L6",null,{}],"templateStyles":"$undefined","templateScripts":"$undefined","notFound":["$","$L2",null,{"title":"404","children":["$","div",null,{"children":[["$","h1",null,{"className":"text-2xl py-6 font-semibold","children":"The page you requested was not found."}],["$","div",null,{"className":"mt-10"}],["$","$L8",null,{"href":"/","className":"hover:bg-stone-900 hover:text-stone-300 transition-all duration-300 ease-in-out p-2 rounded-xl border border-stone-800","children":"Return to home"}]]}]}],"notFoundStyles":[]}],["$","$L9",null,{}]]}]}]],null],null],["$La",null]]]] 3:[["$","h1",null,{"id":"introduction","children":[["$","a","link-introduction",{"href":"#introduction","className":"anchor"}],"Introduction"]}],"\n",["$","p",null,{"children":"I have always strived to write the most performant code at Pandabase, as we need\nspeed and reliability to handle the large volume of payments we process every\nday. I’ve optimized various components, from database queries to caching, and\nimplemented proper rate limits to ensure that everyone can utilize our API\nwithout sacrificing performance or security."}],"\n",["$","h2",null,{"id":"the-problem","children":[["$","a","link-the-problem",{"href":"#the-problem","className":"anchor"}],"The Problem"]}],"\n",["$","p",null,{"children":"During the development of our API, I faced a significant issue with performance.\nOur serial management system, which is written in TypeScript and Node.js, had\nissues with decrypting and encrypting the thousands of keys we processed. We did\nnot want to compromise on speed, as processing time was reaching up to 5 seconds\nper request."}],"\n",["$","p",null,{"children":"Of course, several techniques could be employed, such as a job queue or\nprocessing in batches in the background. However, that didn’t make sense to me\nin this context, so I decided to try a couple of different approaches together."}],"\n",["$","h1",null,{"id":"solutions","children":[["$","a","link-solutions",{"href":"#solutions","className":"anchor"}],"Solutions"]}],"\n",["$","p",null,{"children":"I came up with a number of ideas to solve this issue during the development of\nthis specific API. Since our stack was limited to Go and Node, here’s what I\ncame up with:"}],"\n",["$","h2",null,{"id":"the-microservice","children":[["$","a","link-the-microservice",{"href":"#the-microservice","className":"anchor"}],"The Microservice"]}],"\n",["$","p",null,{"children":["The serials API has its own database, and everything was already finished; the\nonly bottleneck was the encryption/decryption processing with ",["$","code",null,{"children":"AES-256-CBC"}],".\nThis led me to another idea: what if I wrote an HTTP server dedicated solely to\nencrypting and decrypting keys in a more performant language? I wanted to use\neither Go or Rust for this job, as both are fast enough to handle it."]}],"\n",["$","p",null,{"children":"Since our API mostly follows a microservice-based architecture, it was\nrelatively easy to implement. However, this approach had several caveats and was\nultimately abandoned. I did utilize goroutines in the Go implementation."}],"\n",["$","p",null,{"children":"Here's what the Go results looked like:"}],"\n","$Lb","\n",["$","p",null,{"children":"And the production binary"}],"\n","$Lc","\n",["$","p",null,{"children":"I don't see a significant difference other than a couple of milliseconds between\nthe two."}],"\n",["$","h2",null,{"id":"nodejs-worker-threads","children":[["$","a","link-nodejs-worker-threads",{"href":"#nodejs-worker-threads","className":"anchor"}],"Node.js Worker Threads"]}],"\n",["$","p",null,{"children":"Worker threads can spin up multiple workers, so I thought we could utilize this\nand speed up performance. I wrote an implementation that takes in batches of\nkeys and decrypts them; the same applies for encryption. This seemed like a\nfeasible solution, and performance improvements were as follows for one million\nkeys."}],"\n","$Ld","\n",["$","h1",null,{"id":"things-to-note","children":[["$","a","link-things-to-note",{"href":"#things-to-note","className":"anchor"}],"Things to note"]}],"\n",["$","p",null,{"children":"While worker threads do indeed perform better than single-threaded processing,\nsingle-threaded execution excels with smaller workloads, which we largely deal\nwith. I thought using a combination of both would give us the best of both\nworlds."}],"\n",["$","p",null,{"children":"From a technical aspect, this is due to how the CPU operates. Smaller key sets\nfit easily into the CPU cache, allowing single-threaded processing to perform\nbetter with fewer keys. In contrast, with a larger number of keys, cache misses\nbecome frequent, impacting performance. Overhead also plays a vital role."}],"\n",["$","p",null,{"children":"For larger key sets, worker threads are the best solution as they allow for\nparallel execution and distribution across multiple CPU cores. However, for\nfewer keys, the overhead of managing threads can dominate, meaning the time\nspent managing threads may exceed the time spent on actual processing of keys."}]] a:[["$","meta","0",{"name":"viewport","content":"width=device-width, initial-scale=1"}],["$","meta","1",{"charSet":"utf-8"}],["$","title","2",{"children":"A story on how we optimized things for performance"}],["$","meta","3",{"name":"description","content":"In a world where everything moves fast, you can't ignore the significance of performance."}],["$","meta","4",{"property":"og:title","content":"A story on how we optimized things for performance"}],["$","meta","5",{"property":"og:description","content":"In a world where everything moves fast, you can't ignore the significance of performance."}],["$","meta","6",{"property":"og:url","content":"https://aelpxy.dev/blog/a-story-on-how-we-optimized-things-for-performance"}],["$","meta","7",{"property":"og:type","content":"article"}],["$","meta","8",{"property":"article:published_time","content":"2024-10-06"}],["$","meta","9",{"name":"twitter:card","content":"summary_large_image"}],["$","meta","10",{"name":"twitter:title","content":"A story on how we optimized things for performance"}],["$","meta","11",{"name":"twitter:description","content":"In a world where everything moves fast, you can't ignore the significance of performance."}],["$","meta","12",{"name":"next-size-adjust"}]] 1:null b:["$","div",null,{"className":"my-6","children":["$","div",null,{"className":"rounded-lg border border-stone-700 bg-stone-900","children":["$","div",null,{"className":"overflow-x-auto","children":["$","div",null,{"dangerouslySetInnerHTML":{"__html":"
~/Workspace/ed-benchmark via Go v1.23.2\n go run benchmark.go\nProcessed 1000000 keys in 1.152051375s
"}}]}]}]}] c:["$","div",null,{"className":"my-6","children":["$","div",null,{"className":"rounded-lg border border-stone-700 bg-stone-900","children":["$","div",null,{"className":"overflow-x-auto","children":["$","div",null,{"dangerouslySetInnerHTML":{"__html":"
~/Workspace/ed-benchmark via Go v1.23.2\n ./benchmark\nProcessed 1000000 keys in 1.118548291s
"}}]}]}]}] d:["$","div",null,{"className":"my-6","children":["$","div",null,{"className":"rounded-lg border border-stone-700 bg-stone-900","children":["$","div",null,{"className":"overflow-x-auto","children":["$","div",null,{"dangerouslySetInnerHTML":{"__html":"
~/Workspace/ed-benchmark via Node.js v22.9.0\n node benchmark.js\nSingle Thread: 5.494s\nMulti Thread: 3.908s
"}}]}]}]}]