TL;DR
Create a backup:
pg_dumpall > mybackup.sqlPerform the upgrade:
sudo pg_dropcluster 9.4 main --stop| const std = @import("std"); | |
| const os = std.os; | |
| const net = std.net; | |
| const mem = std.mem; | |
| const log = std.log.scoped(.server); | |
| const assert = std.debug.assert; | |
| pub const io_mode = .evented; |
| { pkgs ? import <nixpkgs> {} }: | |
| with pkgs; | |
| let | |
| su_exec = pkgs.stdenv.mkDerivation { | |
| name = "su-exec-0.2"; | |
| src = fetchurl { | |
| url = https://github.com/ncopa/su-exec/archive/v0.2.tar.gz; | |
| sha256 = "09ayhm4w7ahvwk6wpjimvgv8lx89qx31znkywqmypkp6rpccnjpc"; |
| # The following example shows a way to use iptables for basic round-robin load balancing, by redirecting | |
| # packets two one of three ports based on a statistic counter. | |
| # | |
| # TCP packets for new sessions arriving on port 9000 will rotate between ports 9001, 9002 and 9003, where | |
| # three identical copies of some application are expected to be listening. | |
| # | |
| # Packets that aren't TCP or that related to an already-established connection are left untouched, letting | |
| # the standard iptables connection tracking machinery send it to the appropriate port. | |
| # | |
| # For this to work well, connections need to be relatively short. Ideally there would be an extra layer |
| # -*- coding: utf-8 -*- | |
| import asyncio | |
| import re | |
| import asyncio_redis | |
| import tornado.concurrent | |
| import tornado.httpclient | |
| import tornado.web | |
| import tornado.platform.asyncio |
| defmodule MyStream do | |
| def mutate(enum, user_acc, user) do | |
| step = fn val, _acc -> {:suspend, val} end | |
| next = &Enumerable.reduce(enum, &1, step) | |
| &do_mutate([], user_acc, user, next, &1, &2) | |
| end | |
| defp do_mutate(values, user_acc, user, next, {:suspend, acc}, fun) do | |
| {:suspended, acc, &do_mutate(values, user_acc, user, next, &1, fun)} | |
| end |
TL;DR
Create a backup:
pg_dumpall > mybackup.sqlPerform the upgrade:
sudo pg_dropcluster 9.4 main --stop| defmodule Curried do | |
| defmacro defc({name, _, args}, [do: body]) do | |
| curried_args = Enum.map(Enum.with_index(args), fn({_, index}) -> | |
| Enum.take(args, index + 1) | |
| end) | |
| for a <- curried_args do | |
| if a == Enum.at(curried_args, Enum.count(curried_args) - 1) do | |
| quote do | |
| def unquote(name)(unquote_splicing(a)) do | |
| unquote(body) |
| # defines a generic proxy | |
| defmodule GenProxy do | |
| defmacro __using__(_) do | |
| quote location: :keep do | |
| use GenServer | |
| def handle_call(msg, from={_process, ref}, state) do | |
| case proxy_call(msg, from, state) do | |
| {:forward, server, new_state} -> | |
| :erlang.send(server, {:"$gen_call", {self, ref}, msg}, [:noconnect]) |
(by @andrestaltz)
If you prefer to watch video tutorials with live-coding, then check out this series I recorded with the same contents as in this article: Egghead.io - Introduction to Reactive Programming.
| import asyncio | |
| import tornado.concurrent | |
| import tornado.ioloop | |
| import tornado.web | |
| import tornado.platform.asyncio | |
| import tornado.httpclient | |
| class ReqHandler(tornado.web.RequestHandler): | |
| async def get(self): |