rusty

Jan 4, 2026

I'm a bit rusty, and in a few days I have a technical interview. Time to drag those skeletons into the sun for a proper sunbath.

We are going to code (raw-dogging mode) a few elixir modules:

  • LRU Cache
  • Top-K frequent (map + heap/PQ)
  • Subarray sum equals K (prefix sums)
  • Merge intervals
  • BFS shortest path (grid)
  • Binary search variants
  • Rate limiter (token bucket, in-memory)

LRU Cache

link: code

To run:

$ elixir raw-dogged-lru.exs
defmodule LRU do
  @moduledoc """
  Immutable (pure) LRU cache.

  This cache is **persistent/immutable**: every `get/2` returns `{value, new_lru}` and every
  `put/3` returns a **new** `LRU.t()`.

  Internally it maintains:
  - `k2id`: key -> node id
  - `nodes`: node id -> node data (`%{k, v, prev, next}`)
  - `head`/`tail`: ids of a doubly-linked list in **MRU -> LRU** order

  Average complexity:
  - `get/2`: O(1)
  - `put/3`: O(1)

  ## Examples

      iex> lru = LRU.new(2) |> LRU.put(:a, 1) |> LRU.put(:b, 2)
      iex> {1, lru} = LRU.get(lru, :a)     # :a becomes MRU
      iex> lru = LRU.put(lru, :c, 3)       # evicts LRU (:b)
      iex> {nil, _} = LRU.get(lru, :b)
      iex> {1, _} = LRU.get(lru, :a)
  """

  alias __MODULE__, as: LRU

  @typedoc "Cache key."
  @type key :: term()

  @typedoc "Cache value."
  @type value :: term()

  @typedoc "Internal node id."
  @type id :: pos_integer()

  @typedoc "Internal doubly-linked-list node."
  @type n :: %{
          k: key(),
          v: value(),
          prev: id() | nil,
          next: id() | nil
        }

  @typedoc """
  LRU cache struct.

  Invariants (expected to hold after any public operation):
  - `map_size(k2id) == map_size(nodes)`
  - If size == 0: `head == nil` and `tail == nil`
  - If size > 0: `head` and `tail` are valid node ids, and list pointers are consistent
  """
  @type t :: %LRU{
          cap: non_neg_integer(),
          k2id: %{optional(key()) => id()},
          nodes: %{optional(id()) => n()},
          head: id() | nil,
          tail: id() | nil,
          next_id: id()
        }

  defstruct cap: 0,
            k2id: %{},
            nodes: %{},
            head: nil,
            tail: nil,
            next_id: 1

  @doc """
  Creates a new cache with the given `capacity`.

  Capacity may be `0` (a cache that never stores anything).

  ## Examples

      iex> lru = LRU.new(0)
      iex> lru.cap
      0

      iex> lru = LRU.new(2) |> LRU.put(:a, 1)
      iex> {1, _} = LRU.get(lru, :a)
  """
  @spec new(non_neg_integer()) :: t()
  def new(capacity) when is_integer(capacity) and capacity >= 0, do: %LRU{cap: capacity}

  @doc """
  Reads `key` from the cache.

  Returns `{value_or_nil, new_lru}`.
  If the key exists, it is promoted to **most recently used**.

  ## Examples

      iex> lru = LRU.new(2) |> LRU.put(:a, 1) |> LRU.put(:b, 2)
      iex> {1, lru} = LRU.get(lru, :a)  # :a becomes MRU
      iex> lru = LRU.put(lru, :c, 3)    # evicts :b (LRU)
      iex> {nil, _} = LRU.get(lru, :b)
  """
  @spec get(t(), key()) :: {value() | nil, t()}
  def get(lru, key) do
    case Map.get(lru.k2id, key) do
      nil ->
        {nil, lru}

      id ->
        v = lru.nodes[id].v
        lru2 = lru |> detach(id) |> attach_head(id)
        {v, lru2}
    end
  end

  @doc """
  Inserts or updates `key` with `val`.

  - New keys are inserted as **MRU**.
  - Existing keys are updated and promoted to **MRU**.
  - If inserting exceeds capacity, the **LRU** entry is evicted.

  If `cap == 0`, this is a no-op.

  ## Examples

      iex> lru = LRU.new(1) |> LRU.put(:a, 1) |> LRU.put(:b, 2)
      iex> {nil, _} = LRU.get(lru, :a)
      iex> {2, _} = LRU.get(lru, :b)

      iex> lru = LRU.new(2) |> LRU.put(:a, 1) |> LRU.put(:a, 99)
      iex> {99, _} = LRU.get(lru, :a)
  """
  @spec put(t(), key(), value()) :: t()
  def put(%{cap: 0} = lru, _k, _v), do: lru

  def put(lru, key, val) do
    case Map.get(lru.k2id, key) do
      nil ->
        {id, lru1} = alloc_node(lru, key, val)
        lru1 |> attach_head(id) |> evict_if_needed()

      id ->
        lru
        |> put_in([Access.key!(:nodes), id, :v], val)
        |> detach(id)
        |> attach_head(id)
    end
  end

  @spec alloc_node(t(), key(), value()) :: {id(), t()}
  defp alloc_node(lru, key, val) do
    id = lru.next_id
    node = %{k: key, v: val, prev: nil, next: nil}

    lru2 =
      lru
      |> Map.update!(:k2id, &Map.put(&1, key, id))
      |> Map.update!(:nodes, &Map.put(&1, id, node))
      |> Map.put(:next_id, id + 1)

    {id, lru2}
  end

  @spec detach(t(), id()) :: t()
  defp detach(lru, id) do
    node = lru.nodes[id]
    prev = node.prev
    nxt = node.next

    lru =
      lru
      |> put_in([Access.key!(:nodes), id, :prev], nil)
      |> put_in([Access.key!(:nodes), id, :next], nil)

    lru =
      if prev do
        put_in(lru, [Access.key!(:nodes), prev, :next], nxt)
      else
        lru
      end

    lru =
      if nxt do
        put_in(lru, [Access.key!(:nodes), nxt, :prev], prev)
      else
        lru
      end

    lru = if lru.head == id, do: %{lru | head: nxt}, else: lru
    lru = if lru.tail == id, do: %{lru | tail: prev}, else: lru
    lru
  end

  @spec attach_head(t(), id()) :: t()
  defp attach_head(%{head: nil} = lru, id) do
    lru
    |> put_in([Access.key!(:nodes), id, :prev], nil)
    |> put_in([Access.key!(:nodes), id, :next], nil)
    |> Map.put(:head, id)
    |> Map.put(:tail, id)
  end

  defp attach_head(lru, id) do
    old = lru.head

    lru
    |> put_in([Access.key!(:nodes), id, :prev], nil)
    |> put_in([Access.key!(:nodes), id, :next], old)
    |> put_in([Access.key!(:nodes), old, :prev], id)
    |> Map.put(:head, id)
  end

  @spec evict_if_needed(t()) :: t()
  defp evict_if_needed(lru) do
    if map_size(lru.k2id) <= lru.cap do
      lru
    else
      tid = lru.tail
      key = lru.nodes[tid].k

      lru
      |> detach(tid)
      |> Map.update!(:k2id, &Map.delete(&1, key))
      |> Map.update!(:nodes, &Map.delete(&1, tid))
    end
  end
end

ExUnit.start()

defmodule LRUTest do
  use ExUnit.Case, async: true

  defp keys(lru), do: lru.k2id |> Map.keys() |> Enum.sort()

  defp assert_integrity(lru) do
    size = map_size(lru.k2id)

    assert map_size(lru.nodes) == size

    Enum.each(lru.k2id, fn {k, id} ->
      node = Map.fetch!(lru.nodes, id)
      assert node.k == k
    end)

    cond do
      size == 0 ->
        assert lru.head == nil
        assert lru.tail == nil

      true ->
        assert is_integer(lru.head)
        assert is_integer(lru.tail)
        assert Map.has_key?(lru.nodes, lru.head)
        assert Map.has_key?(lru.nodes, lru.tail)

        head_node = Map.fetch!(lru.nodes, lru.head)
        tail_node = Map.fetch!(lru.nodes, lru.tail)

        assert head_node.prev == nil
        assert tail_node.next == nil

        {ids, last} =
          Stream.unfold({lru.head, nil}, fn
            {nil, _prev} ->
              nil

            {id, prev} ->
              node = Map.fetch!(lru.nodes, id)
              assert node.prev == prev
              {{id, node}, {node.next, id}}
          end)
          |> Enum.reduce({[], nil}, fn {id, _node}, {acc, _} -> {[id | acc], id} end)

        ids = Enum.reverse(ids)

        assert Enum.uniq(ids) == ids

        assert length(ids) == size

        assert last == lru.tail
    end

    lru
  end

  test "new/1 sets capacity and empty state" do
    lru = LRU.new(3)
    assert lru.cap == 3
    assert_integrity(lru)
  end

  test "cap=0: put is no-op and get always misses" do
    lru = LRU.new(0)
    lru2 = LRU.put(lru, :a, 1)
    assert lru2 == lru

    assert {nil, ^lru} = LRU.get(lru, :a)
    assert_integrity(lru)
  end

  test "get miss returns {nil, same_lru}" do
    lru = LRU.new(2) |> LRU.put(:a, 1)
    assert {nil, ^lru} = LRU.get(lru, :missing)
    assert_integrity(lru)
  end

  test "put inserts and get hits; get promotes to MRU" do
    lru =
      LRU.new(2)
      |> LRU.put(:a, 1)
      |> LRU.put(:b, 2)

    assert_integrity(lru)

    assert {1, lru2} = LRU.get(lru, :a)
    assert_integrity(lru2)

    lru3 = LRU.put(lru2, :c, 3)
    assert_integrity(lru3)

    assert {nil, _} = LRU.get(lru3, :b)
    assert {1, _} = LRU.get(lru3, :a)
    assert {3, _} = LRU.get(lru3, :c)
  end

  test "put updates existing key value and promotes to MRU without changing size" do
    lru =
      LRU.new(2)
      |> LRU.put(:a, 1)
      |> LRU.put(:b, 2)

    assert keys(lru) == [:a, :b]
    assert_integrity(lru)

    lru2 = LRU.put(lru, :a, 99)
    assert keys(lru2) == [:a, :b]
    assert_integrity(lru2)

    assert {99, _} = LRU.get(lru2, :a)
  end

  test "capacity = 1 evicts on second distinct insert" do
    lru =
      LRU.new(1)
      |> LRU.put(:a, 1)
      |> LRU.put(:b, 2)

    assert_integrity(lru)

    assert {nil, _} = LRU.get(lru, :a)
    assert {2, _} = LRU.get(lru, :b)
  end

  test "longer sequence maintains invariants and never exceeds capacity" do
    lru0 = LRU.new(3)

    lru =
      Enum.reduce(1..50, lru0, fn i, lru ->
        lru =
          lru
          |> LRU.put(rem(i, 5), i) # keys: 0..4
          |> (fn lru ->
                {_v, lru} = LRU.get(lru, rem(i + 2, 5))
                lru
              end).()

        assert_integrity(lru)
        assert map_size(lru.k2id) <= lru.cap
        lru
      end)

    assert_integrity(lru)
  end
end

Top-K frequent

link: code

To run:

$ elixir raw-dogged-topk.exs
defmodule TopK do
  @moduledoc """
  Utilities for selecting the `k` most frequent **distinct** values from a list.

  ## Approach

  * Build a frequency map in `O(n)`.
  * Keep a min-priority set (size ≤ `k`) using `:gb_sets` keyed by `{count, seq, value}`.
    When the set grows beyond `k`, the smallest `{count, seq, value}` is removed.

  ## Notes on ties

  If multiple values share the same frequency, the function still returns a valid top-`k`,
  but the *relative order among tied items is unspecified*.
  """

  @typedoc "A value from the input list."
  @type value :: term()

  @doc """
  Returns up to `k` values that occur most frequently in `nums`.

  The result is ordered from **most frequent to least frequent**.
  Ties have unspecified order.

  ## Examples

      iex> TopK.top_k_frequent([1, 1, 1, 2, 2, 3], 2)
      [1, 2]

      iex> TopK.top_k_frequent([:a, :b, :a], 10) |> Enum.sort()
      [:a, :b]

      iex> TopK.top_k_frequent([], 3)
      []

      iex> TopK.top_k_frequent([1, 2, 3], 0)
      []
  """
  @spec top_k_frequent([value()], integer()) :: [value()]
  def top_k_frequent(_nums, k) when is_integer(k) and k <= 0, do: []

  def top_k_frequent(nums, k) when is_list(nums) and is_integer(k) do
    freq =
      Enum.reduce(nums, %{}, fn x, acc ->
        Map.update(acc, x, 1, &(&1 + 1))
      end)

    {set, _seq} =
      Enum.reduce(freq, {:gb_sets.empty(), 0}, fn {v, c}, {set, seq} ->
        seq = seq + 1
        set = :gb_sets.add({c, seq, v}, set)

        set =
          if :gb_sets.size(set) > k do
            {_smallest, set2} = :gb_sets.take_smallest(set)
            set2
          else
            set
          end

        {set, seq}
      end)

    extract_desc(set, [])
  end

  @spec extract_desc(term(), [value()]) :: [value()]
  defp extract_desc(set, acc) do
    if :gb_sets.is_empty(set) do
      Enum.reverse(acc)
    else
      {{_c, _seq, v}, set2} = :gb_sets.take_largest(set)
      extract_desc(set2, [v | acc])
    end
  end
end

ExUnit.start()

defmodule TopKTest do
  use ExUnit.Case, async: true

  describe "top_k_frequent/2" do
    test "returns [] when k <= 0" do
      assert TopK.top_k_frequent([1, 2, 3], 0) == []
      assert TopK.top_k_frequent([1, 2, 3], -10) == []
    end

    test "returns [] for empty input" do
      assert TopK.top_k_frequent([], 1) == []
      assert TopK.top_k_frequent([], 10) == []
    end

    test "returns the k most frequent values, ordered by frequency desc" do
      nums = [1, 1, 1, 2, 2, 3]
      assert TopK.top_k_frequent(nums, 2) == [1, 2]
    end

    test "when k exceeds number of distinct values, returns all distinct values" do
      nums = [:a, :b, :a]
      res = TopK.top_k_frequent(nums, 10)

      assert length(res) == 2
      assert MapSet.new(res) == MapSet.new([:a, :b])
    end

    test "works with non-numeric terms" do
      nums = ["x", "y", "x", "z", "x", "y"]
      assert TopK.top_k_frequent(nums, 2) == ["x", "y"]
    end

    test "ties: returns any valid top-k (order among ties unspecified)" do
      nums = [1, 1, 2, 2, 3, 3]
      res = TopK.top_k_frequent(nums, 2)

      assert length(res) == 2
      assert Enum.uniq(res) == res
      assert Enum.all?(res, &(&1 in [1, 2, 3]))

      freq = Enum.frequencies(nums)
      kth = freq |> Map.values() |> Enum.sort(:desc) |> Enum.at(1)
      assert Enum.all?(res, fn v -> Map.fetch!(freq, v) >= kth end)
    end
  end
end

Subarray sum equals K

link: code

To run:

$ elixir raw-dogged-sub.exs
defmodule SubarraySum do
  @moduledoc """
  Counts how many contiguous subarrays sum to a target `k`.

  ## Idea

  Use prefix sums and a frequency map:

  * Let `prefix[i] = nums[0] + ... + nums[i]`.
  * A subarray `(j+1..i)` sums to `k` iff `prefix[i] - prefix[j] = k`,
    i.e. `prefix[j] = prefix[i] - k`.
  * Keep a map `seen` from prefix sum to how many times we've seen it so far.

  Time: `O(n)`
  Space: `O(n)` (in the worst case, all prefix sums are distinct)

  Works with negative numbers and zeros.
  """

  @typedoc "An integer input list element."
  @type num :: integer()

  @doc """
  Returns the number of contiguous subarrays of `nums` whose sum equals `k`.

  ## Examples

      iex> SubarraySum.count([1, 1, 1], 2)
      2

      iex> SubarraySum.count([1, 2, 3], 3)
      2

      iex> SubarraySum.count([1, -1, 0], 0)
      3

      iex> SubarraySum.count([], 0)
      0
  """
  @spec count([num()], integer()) :: non_neg_integer()
  def count(nums, k) when is_list(nums) and is_integer(k) do
    {ans, _prefix, _seen} =
      Enum.reduce(nums, {0, 0, %{0 => 1}}, fn x, {ans, prefix, seen} ->
        prefix = prefix + x
        ans = ans + Map.get(seen, prefix - k, 0)
        seen = Map.update(seen, prefix, 1, &(&1 + 1))
        {ans, prefix, seen}
      end)

    ans
  end
end

ExUnit.start()

defmodule SubarraySumTest do
  use ExUnit.Case, async: true

  describe "count/2" do
    test "classic example" do
      assert SubarraySum.count([1, 1, 1], 2) == 2
    end

    test "multiple matches across different lengths" do
      assert SubarraySum.count([1, 2, 3], 3) == 2
    end

    test "handles negative numbers" do
      assert SubarraySum.count([1, -1, 0], 0) == 3
    end

    test "all zeros: many subarrays match" do
      assert SubarraySum.count([0, 0, 0, 0], 0) == 10
    end

    test "single element cases" do
      assert SubarraySum.count([5], 5) == 1
      assert SubarraySum.count([5], 0) == 0
    end

    test "empty list" do
      assert SubarraySum.count([], 7) == 0
      assert SubarraySum.count([], 0) == 0
    end

    test "property check vs brute force on small random-ish inputs" do
      cases = [
        {[2, -2, 2, -2], 0},
        {[-1, -1, 1], -1},
        {[3, 4, -7, 1, 3, 3, 1, -4], 7},
        {[1, 2, 1, 2, 1], 3},
        {[10, -10, 10], 10}
      ]

      Enum.each(cases, fn {nums, k} ->
        assert SubarraySum.count(nums, k) == brute_count(nums, k)
      end)
    end
  end

  defp brute_count(nums, k) do
    n = length(nums)

    for i <- 0..(n - 1),
        j <- i..(n - 1),
        reduce: 0 do
      acc ->
        sum =
          nums
          |> Enum.slice(i..j)
          |> Enum.sum()

        if sum == k, do: acc + 1, else: acc
    end
  end
end

Merge intervals

link: code

To run:

$ elixir raw-dogged-merge.exs
defmodule Intervals do
  @moduledoc """
  Merges overlapping (or touching) intervals.

  Intervals are represented as `{start, end}` tuples (typically integers).
  The merge rule used here is:

  * Two intervals overlap/touch if `next_start <= prev_end`.
  * When they do, they merge into `{prev_start, max(prev_end, next_end)}`.

  The result is sorted by start (and end) in ascending order.

  ## Complexity

  * Time: `O(n log n)` due to sorting
  * Space: `O(n)` for the output

  ## Assumptions / input validation

  This module assumes each interval is a 2-tuple and that `start <= end`.
  If you may receive reversed intervals like `{5, 2}`, normalize them before calling `merge/1`.
  """

  @typedoc "An interval represented as `{start, end}`."
  @type interval(t) :: {t, t}
  @type interval :: interval(integer())

  @doc """
  Merges a list of intervals.

  Returns a list of merged intervals, sorted ascending.

  Touching intervals are merged:

      iex> Intervals.merge([{1, 2}, {2, 3}])
      [{1, 3}]

  ## Examples

      iex> Intervals.merge([])
      []

      iex> Intervals.merge([{1, 3}, {2, 6}, {8, 10}, {15, 18}])
      [{1, 6}, {8, 10}, {15, 18}]

      iex> Intervals.merge([{5, 7}, {1, 2}, {3, 4}])
      [{1, 2}, {3, 4}, {5, 7}]
  """
  @spec merge([interval(integer())]) :: [interval(integer())]
  def merge([]), do: []

  def merge(intervals) when is_list(intervals) do
    intervals
    |> Enum.sort_by(fn {s, e} -> {s, e} end)
    |> Enum.reduce([], fn {s, e}, acc ->
      case acc do
        [] ->
          [{s, e}]

        [{ps, pe} | rest] ->
          if s <= pe do
            [{ps, max(pe, e)} | rest]
          else
            [{s, e} | acc]
          end
      end
    end)
    |> Enum.reverse()
  end
end

ExUnit.start()

defmodule IntervalsTest do
  use ExUnit.Case, async: true

  describe "merge/1" do
    test "empty input" do
      assert Intervals.merge([]) == []
    end

    test "single interval" do
      assert Intervals.merge([{1, 2}]) == [{1, 2}]
    end

    test "already sorted, overlapping" do
      assert Intervals.merge([{1, 3}, {2, 6}, {8, 10}, {15, 18}]) ==
               [{1, 6}, {8, 10}, {15, 18}]
    end

    test "unsorted input is handled" do
      assert Intervals.merge([{8, 10}, {1, 3}, {2, 6}]) == [{1, 6}, {8, 10}]
    end

    test "non-overlapping intervals remain separate and sorted" do
      assert Intervals.merge([{5, 7}, {1, 2}, {3, 4}]) == [{1, 2}, {3, 4}, {5, 7}]
    end

    test "touching intervals merge (s <= previous_end)" do
      assert Intervals.merge([{1, 2}, {2, 3}]) == [{1, 3}]
      assert Intervals.merge([{1, 1}, {1, 2}]) == [{1, 2}]
    end

    test "contained intervals collapse into the outer one" do
      assert Intervals.merge([{1, 10}, {2, 3}, {4, 8}]) == [{1, 10}]
    end

    test "duplicates collapse" do
      assert Intervals.merge([{1, 2}, {1, 2}, {1, 2}]) == [{1, 2}]
    end

    test "property check vs brute merge on small cases" do
      cases = [
        [{1, 3}, {4, 6}],
        [{1, 4}, {2, 3}],
        [{5, 6}, {1, 2}, {2, 4}],
        [{1, 2}, {3, 5}, {4, 4}],
        [{0, 0}, {0, 1}, {2, 2}]
      ]

      Enum.each(cases, fn intervals ->
        assert Intervals.merge(intervals) == brute_merge(intervals)
      end)
    end
  end

  defp brute_merge(intervals) do
    intervals
    |> Enum.sort_by(fn {s, e} -> {s, e} end)
    |> Enum.reduce([], fn {s, e}, acc ->
      case acc do
        [] ->
          [{s, e}]

        [{ps, pe} | rest] ->
          if s <= pe do
            [{ps, max(pe, e)} | rest]
          else
            [{s, e} | acc]
          end
      end
    end)
    |> Enum.reverse()
  end
end

BFS shortest path

link: code

To run:

$ elixir raw-dogged-bfs.exs
defmodule BFSGrid do
  @moduledoc """
  Shortest path on a 2D grid using BFS (4-directional movement).

  The grid is a list of rows, where each cell is:

  * `0` = open
  * `1` = blocked (wall)

  Coordinates are `{row, col}` with zero-based indices.

  Returns the length (number of steps) of the shortest path from `start` to `goal`,
  or `-1` if no path exists or the input is invalid.

  ## Notes / assumptions

  * Requires a rectangular grid (all rows same length).
  * Movement is 4-neighborhood: up, down, left, right (no diagonals).
  * If `start == goal`, returns `0` (as long as the cell is in-bounds and not blocked).

  ## Complexity

  BFS visits each reachable cell at most once.

  * Time: `O(rows * cols)`
  * Space: `O(rows * cols)` for the queue + visited set
  """

  @typedoc "Grid where 0=open and 1=blocked."
  @type grid :: [[0 | 1]]

  @typedoc "Coordinate in `{row, col}` form (0-based)."
  @type coord :: {integer(), integer()}

  @doc """
  Computes the shortest path length from `{sr, sc}` to `{gr, gc}`.

  Returns `-1` when:

  * grid is empty
  * start/goal is out of bounds
  * start/goal is blocked (`1`)
  * no path exists

  ## Examples

      iex> grid = [
      ...>   [0, 0, 0],
      ...>   [1, 1, 0],
      ...>   [0, 0, 0]
      ...> ]
      iex> BFSGrid.shortest_path(grid, {0, 0}, {2, 2})
      4

      iex> BFSGrid.shortest_path([], {0, 0}, {0, 0})
      -1

      iex> BFSGrid.shortest_path([[1]], {0, 0}, {0, 0})
      -1

      iex> BFSGrid.shortest_path([[0]], {0, 0}, {0, 0})
      0
  """
  @spec shortest_path(grid(), coord(), coord()) :: integer()
  def shortest_path(grid, {sr, sc}, {gr, gc})
      when is_list(grid) and is_integer(sr) and is_integer(sc) and is_integer(gr) and
             is_integer(gc) do
    r = length(grid)
    if r == 0, do: -1, else: do_bfs(grid, {sr, sc}, {gr, gc})
  end

  @spec do_bfs(grid(), coord(), coord()) :: integer()
  defp do_bfs(grid, start, goal) do
    rows = length(grid)
    cols = length(hd(grid))

    in_bounds = fn {r, c} -> r >= 0 and r < rows and c >= 0 and c < cols end
    cell = fn {r, c} -> grid |> Enum.at(r) |> Enum.at(c) end

    cond do
      not in_bounds.(start) or not in_bounds.(goal) ->
        -1

      cell.(start) == 1 or cell.(goal) == 1 ->
        -1

      start == goal ->
        0

      true ->
        q = :queue.in({start, 0}, :queue.new())
        vis = MapSet.new([start])
        bfs_loop(q, vis, in_bounds, cell, goal)
    end
  end

  @spec bfs_loop(:queue.queue(), MapSet.t(), (coord() -> boolean()), (coord() -> 0 | 1), coord()) ::
          integer()
  defp bfs_loop(q, vis, in_bounds, cell, goal) do
    case :queue.out(q) do
      {{:value, {{r, c}, d}}, q2} ->
        dirs = [{1, 0}, {-1, 0}, {0, 1}, {0, -1}]

        {q3, vis3, found} =
          Enum.reduce(dirs, {q2, vis, nil}, fn {dr, dc}, {qq, vv, found} ->
            if found do
              {qq, vv, found}
            else
              nxt = {r + dr, c + dc}

              cond do
                not in_bounds.(nxt) -> {qq, vv, nil}
                MapSet.member?(vv, nxt) -> {qq, vv, nil}
                cell.(nxt) == 1 -> {qq, vv, nil}
                nxt == goal -> {qq, vv, d + 1}
                true -> {:queue.in({nxt, d + 1}, qq), MapSet.put(vv, nxt), nil}
              end
            end
          end)

        if found, do: found, else: bfs_loop(q3, vis3, in_bounds, cell, goal)

      {:empty, _} ->
        -1
    end
  end
end

ExUnit.start()

defmodule BFSGridTest do
  use ExUnit.Case, async: true

  describe "shortest_path/3" do
    test "empty grid returns -1" do
      assert BFSGrid.shortest_path([], {0, 0}, {0, 0}) == -1
    end

    test "start == goal returns 0 when in-bounds and open" do
      assert BFSGrid.shortest_path([[0]], {0, 0}, {0, 0}) == 0
    end

    test "start == goal returns -1 when blocked" do
      assert BFSGrid.shortest_path([[1]], {0, 0}, {0, 0}) == -1
    end

    test "out-of-bounds start or goal returns -1" do
      grid = [
        [0, 0],
        [0, 0]
      ]

      assert BFSGrid.shortest_path(grid, {-1, 0}, {1, 1}) == -1
      assert BFSGrid.shortest_path(grid, {0, 0}, {2, 0}) == -1
    end

    test "blocked start or goal returns -1" do
      grid = [
        [1, 0],
        [0, 0]
      ]

      assert BFSGrid.shortest_path(grid, {0, 0}, {1, 1}) == -1

      grid2 = [
        [0, 0],
        [0, 1]
      ]

      assert BFSGrid.shortest_path(grid2, {0, 0}, {1, 1}) == -1
    end

    test "finds a shortest path in a simple open grid (Manhattan distance)" do
      grid = [
        [0, 0, 0],
        [0, 0, 0],
        [0, 0, 0]
      ]

      assert BFSGrid.shortest_path(grid, {0, 0}, {2, 2}) == 4
      assert BFSGrid.shortest_path(grid, {1, 1}, {2, 0}) == 2
    end

    test "navigates around walls" do
      grid = [
        [0, 0, 0],
        [1, 1, 0],
        [0, 0, 0]
      ]

      assert BFSGrid.shortest_path(grid, {0, 0}, {2, 2}) == 4
    end

    test "returns -1 when no path exists" do
      grid = [
        [0, 1, 0],
        [1, 1, 1],
        [0, 1, 0]
      ]

      assert BFSGrid.shortest_path(grid, {0, 0}, {2, 2}) == -1
    end

    test "handles non-square grids" do
      grid = [
        [0, 0, 1, 0],
        [0, 0, 1, 0]
      ]

      assert BFSGrid.shortest_path(grid, {0, 0}, {1, 3}) == -1
    end

    test "agrees with a brute-force BFS implementation on small cases" do
      cases = [
        {[[0]], {0, 0}, {0, 0}},
        {[[0, 0], [0, 0]], {0, 0}, {1, 1}},
        {[[0, 1], [0, 0]], {0, 0}, {1, 1}},
        {[[0, 1, 0], [0, 1, 0], [0, 0, 0]], {0, 0}, {2, 2}}
      ]

      Enum.each(cases, fn {grid, s, g} ->
        assert BFSGrid.shortest_path(grid, s, g) == brute_bfs(grid, s, g)
      end)
    end
  end

  defp brute_bfs(grid, start, goal) do
    rows = length(grid)
    if rows == 0, do: -1, else: :ok
    cols = length(hd(grid))

    in_bounds? = fn {r, c} -> r in 0..(rows - 1) and c in 0..(cols - 1) end
    cell = fn {r, c} -> grid |> Enum.at(r) |> Enum.at(c) end

    cond do
      rows == 0 ->
        -1

      not in_bounds?.(start) or not in_bounds?.(goal) ->
        -1

      cell.(start) == 1 or cell.(goal) == 1 ->
        -1

      start == goal ->
        0

      true ->
        q = :queue.in({start, 0}, :queue.new())
        vis = MapSet.new([start])
        dirs = [{1, 0}, {-1, 0}, {0, 1}, {0, -1}]
        brute_loop(q, vis, dirs, in_bounds?, cell, goal)
    end
  end

  defp brute_loop(q, vis, dirs, in_bounds?, cell, goal) do
    case :queue.out(q) do
      {{:value, {pos, d}}, q2} ->
        if pos == goal do
          d
        else
          {q3, vis3} =
            Enum.reduce(dirs, {q2, vis}, fn {dr, dc}, {qq, vv} ->
              {r, c} = pos
              nxt = {r + dr, c + dc}

              cond do
                not in_bounds?.(nxt) -> {qq, vv}
                MapSet.member?(vv, nxt) -> {qq, vv}
                cell.(nxt) == 1 -> {qq, vv}
                true -> {:queue.in({nxt, d + 1}, qq), MapSet.put(vv, nxt)}
              end
            end)

          brute_loop(q3, vis3, dirs, in_bounds?, cell, goal)
        end

      {:empty, _} ->
        -1
    end
  end
end

Binary search variants

link: code

To run:

$ elixir raw-dogged-bin.exs
defmodule BinSearch do
  @moduledoc """
  Binary search utilities over sorted lists and monotone predicates.

  Provided functions:

  * `lower_bound/2`: first index `i` such that `a[i] >= x`
  * `upper_bound/2`: first index `i` such that `a[i] > x`
  * `first_true/3`: first integer `i` in `[lo, hi)` where a monotone predicate becomes `true`
  * `last_true/3`: last integer `i` in `[lo, hi)` where a monotone predicate is `true`

  ## Conventions

  * Indices are 0-based.
  * Ranges are half-open: `[lo, hi)`.
  * For bounds functions, if no element satisfies the condition, the returned index is `length(a)`.

  ## Requirements

  * `lower_bound/2` and `upper_bound/2` require `a` to be sorted ascending according to Erlang/Elixir term order.
  * `first_true/3` and `last_true/3` require `pred` to be **monotone** on `[lo, hi)`:
    there exists some threshold `t` such that `pred(i)` is false for `i < t` and true for `i >= t`.
  """

  @typedoc "0-based index into a list."
  @type index :: non_neg_integer()

  @typedoc "A half-open integer range `[lo, hi)`."
  @type lo :: integer()
  @type hi :: integer()

  @doc """
  Returns the first index `i` in `0..length(a)` where `Enum.at(a, i) >= x`.

  If all elements are `< x`, returns `length(a)`.

  ## Examples

      iex> BinSearch.lower_bound([1, 2, 2, 4], 2)
      1

      iex> BinSearch.lower_bound([1, 2, 2, 4], 3)
      3

      iex> BinSearch.lower_bound([], 10)
      0
  """
  @spec lower_bound([term()], term()) :: index()
  def lower_bound(a, x), do: lb(a, x, 0, length(a))

  @spec lb([term()], term(), index(), index()) :: index()
  defp lb(_a, _x, lo, hi) when lo >= hi, do: lo

  defp lb(a, x, lo, hi) do
    mid = lo + div(hi - lo, 2)
    if Enum.at(a, mid) >= x, do: lb(a, x, lo, mid), else: lb(a, x, mid + 1, hi)
  end

  @doc """
  Returns the first index `i` in `0..length(a)` where `Enum.at(a, i) > x`.

  If all elements are `<= x`, returns `length(a)`.

  ## Examples

      iex> BinSearch.upper_bound([1, 2, 2, 4], 2)
      3

      iex> BinSearch.upper_bound([1, 2, 2, 4], 4)
      4

      iex> BinSearch.upper_bound([], 10)
      0
  """
  @spec upper_bound([term()], term()) :: index()
  def upper_bound(a, x), do: ub(a, x, 0, length(a))

  @spec ub([term()], term(), index(), index()) :: index()
  defp ub(_a, _x, lo, hi) when lo >= hi, do: lo

  defp ub(a, x, lo, hi) do
    mid = lo + div(hi - lo, 2)
    if Enum.at(a, mid) > x, do: ub(a, x, lo, mid), else: ub(a, x, mid + 1, hi)
  end

  @doc """
  Given integers `lo` and `hi`, returns the smallest integer `i` in `[lo, hi)`
  such that `pred.(i)` is `true`.

  If `pred` is `false` for all `i` in `[lo, hi)`, returns `hi`.

  ## Requirements

  `pred` must be monotone on `[lo, hi)` (false...false, then true...true).

  ## Examples

      iex> BinSearch.first_true(0, 10, fn i -> i >= 7 end)
      7

      iex> BinSearch.first_true(0, 5, fn _ -> false end)
      5
  """
  @spec first_true(lo(), hi(), (integer() -> as_boolean(term()))) :: integer()
  def first_true(lo, hi, pred) when is_integer(lo) and is_integer(hi) and is_function(pred, 1) do
    if lo >= hi do
      lo
    else
      mid = lo + div(hi - lo, 2)
      if pred.(mid), do: first_true(lo, mid, pred), else: first_true(mid + 1, hi, pred)
    end
  end

  @doc """
  Returns the greatest integer `i` in `[lo, hi)` such that `pred.(i)` is `true`.

  If `pred` is `false` for all `i` in `[lo, hi)`, returns `lo - 1`.

  This is implemented via `first_true/3` by searching for the first index where `pred` becomes false.

  ## Requirements

  `pred` must be monotone on `[lo, hi)`.

  ## Examples

      iex> BinSearch.last_true(0, 10, fn i -> i < 7 end)
      6

      iex> BinSearch.last_true(0, 10, fn _ -> false end)
      -1
  """
  @spec last_true(lo(), hi(), (integer() -> as_boolean(term()))) :: integer()
  def last_true(lo, hi, pred) when is_integer(lo) and is_integer(hi) and is_function(pred, 1) do
    i = first_true(lo, hi, fn j -> not pred.(j) end)
    i - 1
  end
end

ExUnit.start()

defmodule BinSearchTest do
  use ExUnit.Case, async: true

  describe "lower_bound/2" do
    test "empty list" do
      assert BinSearch.lower_bound([], 10) == 0
    end

    test "basic positions" do
      a = [1, 2, 2, 4]
      assert BinSearch.lower_bound(a, 0) == 0
      assert BinSearch.lower_bound(a, 1) == 0
      assert BinSearch.lower_bound(a, 2) == 1
      assert BinSearch.lower_bound(a, 3) == 3
      assert BinSearch.lower_bound(a, 4) == 3
      assert BinSearch.lower_bound(a, 5) == 4
    end

    test "works with arbitrary terms (term ordering)" do
      a = [:a, :b, :b, :c]
      assert BinSearch.lower_bound(a, :b) == 1
      assert BinSearch.lower_bound(a, :bb) == 3
    end

    test "agrees with brute force" do
      cases = [
        {[], 1},
        {[1], 0},
        {[1], 1},
        {[1], 2},
        {[1, 1, 1], 1},
        {[1, 2, 2, 4], 2},
        {[1, 2, 2, 4], 3}
      ]

      Enum.each(cases, fn {a, x} ->
        assert BinSearch.lower_bound(a, x) == brute_lower_bound(a, x)
      end)
    end
  end

  describe "upper_bound/2" do
    test "empty list" do
      assert BinSearch.upper_bound([], 10) == 0
    end

    test "basic positions" do
      a = [1, 2, 2, 4]
      assert BinSearch.upper_bound(a, 0) == 0
      assert BinSearch.upper_bound(a, 1) == 1
      assert BinSearch.upper_bound(a, 2) == 3
      assert BinSearch.upper_bound(a, 3) == 3
      assert BinSearch.upper_bound(a, 4) == 4
      assert BinSearch.upper_bound(a, 5) == 4
    end

    test "agrees with brute force" do
      cases = [
        {[], 1},
        {[1], 0},
        {[1], 1},
        {[1], 2},
        {[1, 1, 1], 1},
        {[1, 2, 2, 4], 2},
        {[1, 2, 2, 4], 3}
      ]

      Enum.each(cases, fn {a, x} ->
        assert BinSearch.upper_bound(a, x) == brute_upper_bound(a, x)
      end)
    end
  end

  describe "first_true/3" do
    test "returns threshold when predicate flips false->true" do
      assert BinSearch.first_true(0, 10, fn i -> i >= 7 end) == 7
      assert BinSearch.first_true(-5, 6, fn i -> i >= 0 end) == 0
    end

    test "all false returns hi" do
      assert BinSearch.first_true(0, 5, fn _ -> false end) == 5
    end

    test "all true returns lo" do
      assert BinSearch.first_true(0, 5, fn _ -> true end) == 0
    end

    test "lo >= hi returns lo (empty range)" do
      assert BinSearch.first_true(3, 3, fn _ -> true end) == 3
      assert BinSearch.first_true(5, 2, fn _ -> true end) == 5
    end
  end

  describe "last_true/3" do
    test "returns last index satisfying monotone predicate" do
      assert BinSearch.last_true(0, 10, fn i -> i < 7 end) == 6
      assert BinSearch.last_true(0, 10, fn i -> i <= 0 end) == 0
    end

    test "all false returns lo-1" do
      assert BinSearch.last_true(0, 10, fn _ -> false end) == -1
      assert BinSearch.last_true(5, 10, fn _ -> false end) == 4
    end

    test "all true returns hi-1" do
      assert BinSearch.last_true(0, 10, fn _ -> true end) == 9
      assert BinSearch.last_true(-3, 2, fn _ -> true end) == 1
    end
  end

  defp brute_lower_bound(a, x) do
    a
    |> Enum.with_index()
    |> Enum.find_value(length(a), fn {v, i} -> if v >= x, do: i, else: nil end)
  end

  defp brute_upper_bound(a, x) do
    a
    |> Enum.with_index()
    |> Enum.find_value(length(a), fn {v, i} -> if v > x, do: i, else: nil end)
  end
end

Rate limiter

link: code

To run:

$ elixir raw-dogged-rate.exs
defmodule TokenBucket do
  @moduledoc """
  In-memory token bucket rate limiter.

  The bucket holds up to `cap` tokens. Each `allow/1` attempt:

  * refills tokens based on elapsed time since `last_ms` using `refill_per_sec`
  * caps tokens at `cap`
  * consumes **1.0** token if available and returns `{true, updated_bucket}`
  * otherwise returns `{false, updated_bucket}` (with refilled/capped tokens)

  Uses `System.monotonic_time(:millisecond)` to avoid issues with wall-clock jumps.

  ## Notes

  * Tokens are stored as floats (`0.0`, `1.0`, etc.). This allows fractional refill.
  * Initial tokens are full: `tokens == cap`.
  """

  alias __MODULE__, as: TokenBucket

  @typedoc "Token bucket state."
  @type t :: %TokenBucket{
          cap: float(),
          tokens: float(),
          refill_per_sec: float(),
          last_ms: integer()
        }

  @typedoc "Capacity (tokens) or refill rate (tokens/sec)."
  @type rate :: number()

  defstruct cap: 0.0,
            tokens: 0.0,
            refill_per_sec: 0.0,
            last_ms: nil

  @doc """
  Creates a new bucket with a maximum capacity and refill rate (tokens per second).

  The bucket starts full.

  ## Examples

      iex> b = TokenBucket.new(2, 0)
      iex> {a1, b} = TokenBucket.allow(b)
      iex> {a2, b} = TokenBucket.allow(b)
      iex> {a3, _b} = TokenBucket.allow(b)
      iex> {a1, a2, a3}
      {true, true, false}
  """
  @spec new(rate(), rate()) :: t()
  def new(capacity, refill_per_sec) do
    now = now_ms()

    %TokenBucket{
      cap: capacity * 1.0,
      tokens: capacity * 1.0,
      refill_per_sec: refill_per_sec * 1.0,
      last_ms: now
    }
  end

  @doc """
  Attempts to consume one token.

  Returns `{allowed?, updated_bucket}`.

  * If there is at least 1 token after refilling, it consumes 1 token and returns `true`.
  * Otherwise returns `false` without consuming a token.

  This function always updates `last_ms` to the current monotonic time.
  """
  @spec allow(t()) :: {boolean(), t()}
  def allow(bucket) do
    now = now_ms()
    elapsed = max(now - bucket.last_ms, 0) / 1000.0

    tokens =
      (bucket.tokens + elapsed * bucket.refill_per_sec)
      |> min(bucket.cap)

    if tokens >= 1.0 do
      {true, %{bucket | tokens: tokens - 1.0, last_ms: now}}
    else
      {false, %{bucket | tokens: tokens, last_ms: now}}
    end
  end

  @spec now_ms() :: integer()
  defp now_ms, do: System.monotonic_time(:millisecond)
end

ExUnit.start()

defmodule TokenBucketTest do
  use ExUnit.Case, async: true

  describe "new/2" do
    test "starts full and stores rates as floats" do
      b = TokenBucket.new(5, 2)

      assert b.cap == 5.0
      assert b.tokens == 5.0
      assert b.refill_per_sec == 2.0
      assert is_integer(b.last_ms)
    end
  end

  describe "allow/1" do
    test "consumes one token when available (no refill)" do
      b0 = TokenBucket.new(3, 0)

      {ok, b1} = TokenBucket.allow(b0)
      assert ok == true
      assert b1.tokens == 2.0

      {ok, b2} = TokenBucket.allow(b1)
      assert ok == true
      assert b2.tokens == 1.0

      {ok, b3} = TokenBucket.allow(b2)
      assert ok == true
      assert b3.tokens == 0.0

      {ok, b4} = TokenBucket.allow(b3)
      assert ok == false
      assert b4.tokens == 0.0
    end

    test "denies when < 1 token (no refill)" do
      now = System.monotonic_time(:millisecond)

      b =
        %TokenBucket{
          cap: 10.0,
          tokens: 0.5,
          refill_per_sec: 0.0,
          last_ms: now
        }

      {ok, b2} = TokenBucket.allow(b)
      assert ok == false
      assert b2.tokens == 0.5
    end

    test "refills based on elapsed time and then consumes" do
      now = System.monotonic_time(:millisecond)

      b =
        %TokenBucket{
          cap: 5.0,
          tokens: 0.0,
          refill_per_sec: 2.0,
          last_ms: now - 1_500
        }

      {ok, b2} = TokenBucket.allow(b)
      assert ok == true

      assert_in_delta b2.tokens, 2.0, 0.05
      assert b2.last_ms >= b.last_ms
    end

    test "caps tokens at capacity before consuming" do
      now = System.monotonic_time(:millisecond)

      b =
        %TokenBucket{
          cap: 5.0,
          tokens: 4.9,
          refill_per_sec: 10.0,
          last_ms: now - 1_000
        }

      {ok, b2} = TokenBucket.allow(b)
      assert ok == true

      assert_in_delta b2.tokens, 4.0, 0.05
    end

    test "handles time going backwards by treating elapsed as 0" do
      now = System.monotonic_time(:millisecond)

      b =
        %TokenBucket{
          cap: 2.0,
          tokens: 1.0,
          refill_per_sec: 100.0,
          last_ms: now + 10_000
        }

      {ok, b2} = TokenBucket.allow(b)
      assert ok == true
      assert_in_delta b2.tokens, 0.0, 0.001
    end
  end
end
RSS
https://idunnowhatiamdoing.engineering/posts/feed.xml