rusty
Jan 4, 2026
I'm a bit rusty, and in a few days I have a technical interview. Time to drag those skeletons into the sun for a proper sunbath.
We are going to code (raw-dogging mode) a few elixir modules:
- LRU Cache
- Top-K frequent (map + heap/PQ)
- Subarray sum equals K (prefix sums)
- Merge intervals
- BFS shortest path (grid)
- Binary search variants
- Rate limiter (token bucket, in-memory)
LRU Cache
link: code
To run:
@moduledoc """
Immutable (pure) LRU cache.
This cache is **persistent/immutable**: every `get/2` returns `{value, new_lru}` and every
`put/3` returns a **new** `LRU.t()`.
Internally it maintains:
- `k2id`: key -> node id
- `nodes`: node id -> node data (`%{k, v, prev, next}`)
- `head`/`tail`: ids of a doubly-linked list in **MRU -> LRU** order
Average complexity:
- `get/2`: O(1)
- `put/3`: O(1)
## Examples
iex> lru = LRU.new(2) |> LRU.put(:a, 1) |> LRU.put(:b, 2)
iex> {1, lru} = LRU.get(lru, :a) # :a becomes MRU
iex> lru = LRU.put(lru, :c, 3) # evicts LRU (:b)
iex> {nil, _} = LRU.get(lru, :b)
iex> {1, _} = LRU.get(lru, :a)
"""
alias __MODULE__, as: LRU
@typedoc "Cache key."
@type key :: term()
@typedoc "Cache value."
@type value :: term()
@typedoc "Internal node id."
@type id :: pos_integer()
@typedoc "Internal doubly-linked-list node."
@type n :: %
@typedoc """
LRU cache struct.
Invariants (expected to hold after any public operation):
- `map_size(k2id) == map_size(nodes)`
- If size == 0: `head == nil` and `tail == nil`
- If size > 0: `head` and `tail` are valid node ids, and list pointers are consistent
"""
@type t :: %LRU
defstruct cap: 0,
k2id: %,
nodes: %,
head: nil,
tail: nil,
next_id: 1
@doc """
Creates a new cache with the given `capacity`.
Capacity may be `0` (a cache that never stores anything).
## Examples
iex> lru = LRU.new(0)
iex> lru.cap
0
iex> lru = LRU.new(2) |> LRU.put(:a, 1)
iex> {1, _} = LRU.get(lru, :a)
"""
@spec new(non_neg_integer()) :: t()
%LRU
@doc """
Reads `key` from the cache.
Returns `{value_or_nil, new_lru}`.
If the key exists, it is promoted to **most recently used**.
## Examples
iex> lru = LRU.new(2) |> LRU.put(:a, 1) |> LRU.put(:b, 2)
iex> {1, lru} = LRU.get(lru, :a) # :a becomes MRU
iex> lru = LRU.put(lru, :c, 3) # evicts :b (LRU)
iex> {nil, _} = LRU.get(lru, :b)
"""
@spec get(t(), key()) ::
case Map.get(lru.k2id, key) do
nil ->
id ->
v = lru.nodes[id].v
lru2 = lru |> detach(id) |> attach_head(id)
end
end
@doc """
Inserts or updates `key` with `val`.
- New keys are inserted as **MRU**.
- Existing keys are updated and promoted to **MRU**.
- If inserting exceeds capacity, the **LRU** entry is evicted.
If `cap == 0`, this is a no-op.
## Examples
iex> lru = LRU.new(1) |> LRU.put(:a, 1) |> LRU.put(:b, 2)
iex> {nil, _} = LRU.get(lru, :a)
iex> {2, _} = LRU.get(lru, :b)
iex> lru = LRU.new(2) |> LRU.put(:a, 1) |> LRU.put(:a, 99)
iex> {99, _} = LRU.get(lru, :a)
"""
@spec put(t(), key(), value()) :: t()
lru
case Map.get(lru.k2id, key) do
nil ->
= alloc_node(lru, key, val)
lru1 |> attach_head(id) |> evict_if_needed()
id ->
lru
|> put_in([Access.key!(:nodes), id, :v], val)
|> detach(id)
|> attach_head(id)
end
end
@spec alloc_node(t(), key(), value()) ::
id = lru.next_id
node = %
lru2 =
lru
|> Map.update!(:k2id, &Map.put(&1, key, id))
|> Map.update!(:nodes, &Map.put(&1, id, node))
|> Map.put(:next_id, id + 1)
end
@spec detach(t(), id()) :: t()
node = lru.nodes[id]
prev = node.prev
nxt = node.next
lru =
lru
|> put_in([Access.key!(:nodes), id, :prev], nil)
|> put_in([Access.key!(:nodes), id, :next], nil)
lru =
if prev do
put_in(lru, [Access.key!(:nodes), prev, :next], nxt)
else
lru
end
lru =
if nxt do
put_in(lru, [Access.key!(:nodes), nxt, :prev], prev)
else
lru
end
lru = if lru.head == id, do: %, else: lru
lru = if lru.tail == id, do: %, else: lru
lru
end
@spec attach_head(t(), id()) :: t()
lru
|> put_in([Access.key!(:nodes), id, :prev], nil)
|> put_in([Access.key!(:nodes), id, :next], nil)
|> Map.put(:head, id)
|> Map.put(:tail, id)
end
old = lru.head
lru
|> put_in([Access.key!(:nodes), id, :prev], nil)
|> put_in([Access.key!(:nodes), id, :next], old)
|> put_in([Access.key!(:nodes), old, :prev], id)
|> Map.put(:head, id)
end
@spec evict_if_needed(t()) :: t()
if map_size(lru.k2id) <= lru.cap do
lru
else
tid = lru.tail
key = lru.nodes[tid].k
lru
|> detach(tid)
|> Map.update!(:k2id, &Map.delete(&1, key))
|> Map.update!(:nodes, &Map.delete(&1, tid))
end
end
end
ExUnit.start()
use ExUnit.Case, async: true
lru.k2id |> Map.keys() |> Enum.sort()
size = map_size(lru.k2id)
assert map_size(lru.nodes) == size
Enum.each(lru.k2id, fn ->
node = Map.fetch!(lru.nodes, id)
assert node.k == k
end)
cond do
size == 0 ->
assert lru.head == nil
assert lru.tail == nil
true ->
assert is_integer(lru.head)
assert is_integer(lru.tail)
assert Map.has_key?(lru.nodes, lru.head)
assert Map.has_key?(lru.nodes, lru.tail)
head_node = Map.fetch!(lru.nodes, lru.head)
tail_node = Map.fetch!(lru.nodes, lru.tail)
assert head_node.prev == nil
assert tail_node.next == nil
=
Stream.unfold(, fn
->
nil
->
node = Map.fetch!(lru.nodes, id)
assert node.prev == prev
end)
|> Enum.reduce(, fn , -> end)
ids = Enum.reverse(ids)
assert Enum.uniq(ids) == ids
assert length(ids) == size
assert last == lru.tail
end
lru
end
test do
lru = LRU.new(3)
assert lru.cap == 3
assert_integrity(lru)
end
test do
lru = LRU.new(0)
lru2 = LRU.put(lru, :a, 1)
assert lru2 == lru
assert = LRU.get(lru, :a)
assert_integrity(lru)
end
test do
lru = LRU.new(2) |> LRU.put(:a, 1)
assert = LRU.get(lru, :missing)
assert_integrity(lru)
end
test do
lru =
LRU.new(2)
|> LRU.put(:a, 1)
|> LRU.put(:b, 2)
assert_integrity(lru)
assert = LRU.get(lru, :a)
assert_integrity(lru2)
lru3 = LRU.put(lru2, :c, 3)
assert_integrity(lru3)
assert = LRU.get(lru3, :b)
assert = LRU.get(lru3, :a)
assert = LRU.get(lru3, :c)
end
test do
lru =
LRU.new(2)
|> LRU.put(:a, 1)
|> LRU.put(:b, 2)
assert keys(lru) == [:a, :b]
assert_integrity(lru)
lru2 = LRU.put(lru, :a, 99)
assert keys(lru2) == [:a, :b]
assert_integrity(lru2)
assert = LRU.get(lru2, :a)
end
test do
lru =
LRU.new(1)
|> LRU.put(:a, 1)
|> LRU.put(:b, 2)
assert_integrity(lru)
assert = LRU.get(lru, :a)
assert = LRU.get(lru, :b)
end
test do
lru0 = LRU.new(3)
lru =
Enum.reduce(1..50, lru0, fn i, lru ->
lru =
lru
|> LRU.put(rem(i, 5), i) # keys: 0..4
|> (fn lru ->
= LRU.get(lru, rem(i + 2, 5))
lru
end).()
assert_integrity(lru)
assert map_size(lru.k2id) <= lru.cap
lru
end)
assert_integrity(lru)
end
end
Top-K frequent
link: code
To run:
@moduledoc """
Utilities for selecting the `k` most frequent **distinct** values from a list.
## Approach
* Build a frequency map in `O(n)`.
* Keep a min-priority set (size ≤ `k`) using `:gb_sets` keyed by `{count, seq, value}`.
When the set grows beyond `k`, the smallest `{count, seq, value}` is removed.
## Notes on ties
If multiple values share the same frequency, the function still returns a valid top-`k`,
but the *relative order among tied items is unspecified*.
"""
@typedoc "A value from the input list."
@type value :: term()
@doc """
Returns up to `k` values that occur most frequently in `nums`.
The result is ordered from **most frequent to least frequent**.
Ties have unspecified order.
## Examples
iex> TopK.top_k_frequent([1, 1, 1, 2, 2, 3], 2)
[1, 2]
iex> TopK.top_k_frequent([:a, :b, :a], 10) |> Enum.sort()
[:a, :b]
iex> TopK.top_k_frequent([], 3)
[]
iex> TopK.top_k_frequent([1, 2, 3], 0)
[]
"""
@spec top_k_frequent([value()], integer()) :: [value()]
[]
freq =
Enum.reduce(nums, %, fn x, acc ->
Map.update(acc, x, 1, &(&1 + 1))
end)
=
Enum.reduce(freq, , fn , ->
seq = seq + 1
set = :gb_sets.add(, set)
set =
if :gb_sets.size(set) > k do
= :gb_sets.take_smallest(set)
set2
else
set
end
end)
extract_desc(set, [])
end
@spec extract_desc(term(), [value()]) :: [value()]
if :gb_sets.is_empty(set) do
Enum.reverse(acc)
else
= :gb_sets.take_largest(set)
extract_desc(set2, [v | acc])
end
end
end
ExUnit.start()
use ExUnit.Case, async: true
describe do
test do
assert TopK.top_k_frequent([1, 2, 3], 0) == []
assert TopK.top_k_frequent([1, 2, 3], -10) == []
end
test do
assert TopK.top_k_frequent([], 1) == []
assert TopK.top_k_frequent([], 10) == []
end
test do
nums = [1, 1, 1, 2, 2, 3]
assert TopK.top_k_frequent(nums, 2) == [1, 2]
end
test do
nums = [:a, :b, :a]
res = TopK.top_k_frequent(nums, 10)
assert length(res) == 2
assert MapSet.new(res) == MapSet.new([:a, :b])
end
test do
nums = [, , , , , ]
assert TopK.top_k_frequent(nums, 2) == [, ]
end
test do
nums = [1, 1, 2, 2, 3, 3]
res = TopK.top_k_frequent(nums, 2)
assert length(res) == 2
assert Enum.uniq(res) == res
assert Enum.all?(res, &(&1 in [1, 2, 3]))
freq = Enum.frequencies(nums)
kth = freq |> Map.values() |> Enum.sort(:desc) |> Enum.at(1)
assert Enum.all?(res, fn v -> Map.fetch!(freq, v) >= kth end)
end
end
end
Subarray sum equals K
link: code
To run:
@moduledoc """
Counts how many contiguous subarrays sum to a target `k`.
## Idea
Use prefix sums and a frequency map:
* Let `prefix[i] = nums[0] + ... + nums[i]`.
* A subarray `(j+1..i)` sums to `k` iff `prefix[i] - prefix[j] = k`,
i.e. `prefix[j] = prefix[i] - k`.
* Keep a map `seen` from prefix sum to how many times we've seen it so far.
Time: `O(n)`
Space: `O(n)` (in the worst case, all prefix sums are distinct)
Works with negative numbers and zeros.
"""
@typedoc "An integer input list element."
@type num :: integer()
@doc """
Returns the number of contiguous subarrays of `nums` whose sum equals `k`.
## Examples
iex> SubarraySum.count([1, 1, 1], 2)
2
iex> SubarraySum.count([1, 2, 3], 3)
2
iex> SubarraySum.count([1, -1, 0], 0)
3
iex> SubarraySum.count([], 0)
0
"""
@spec count([num()], integer()) :: non_neg_integer()
=
Enum.reduce(nums, , fn x, ->
prefix = prefix + x
ans = ans + Map.get(seen, prefix - k, 0)
seen = Map.update(seen, prefix, 1, &(&1 + 1))
end)
ans
end
end
ExUnit.start()
use ExUnit.Case, async: true
describe do
test do
assert SubarraySum.count([1, 1, 1], 2) == 2
end
test do
assert SubarraySum.count([1, 2, 3], 3) == 2
end
test do
assert SubarraySum.count([1, -1, 0], 0) == 3
end
test do
assert SubarraySum.count([0, 0, 0, 0], 0) == 10
end
test do
assert SubarraySum.count([5], 5) == 1
assert SubarraySum.count([5], 0) == 0
end
test do
assert SubarraySum.count([], 7) == 0
assert SubarraySum.count([], 0) == 0
end
test do
cases = [
,
,
,
,
]
Enum.each(cases, fn ->
assert SubarraySum.count(nums, k) == brute_count(nums, k)
end)
end
end
n = length(nums)
for i <- 0..(n - 1),
j <- i..(n - 1),
reduce: 0 do
acc ->
sum =
nums
|> Enum.slice(i..j)
|> Enum.sum()
if sum == k, do: acc + 1, else: acc
end
end
end
Merge intervals
link: code
To run:
@moduledoc """
Merges overlapping (or touching) intervals.
Intervals are represented as `{start, end}` tuples (typically integers).
The merge rule used here is:
* Two intervals overlap/touch if `next_start <= prev_end`.
* When they do, they merge into `{prev_start, max(prev_end, next_end)}`.
The result is sorted by start (and end) in ascending order.
## Complexity
* Time: `O(n log n)` due to sorting
* Space: `O(n)` for the output
## Assumptions / input validation
This module assumes each interval is a 2-tuple and that `start <= end`.
If you may receive reversed intervals like `{5, 2}`, normalize them before calling `merge/1`.
"""
@typedoc "An interval represented as `{start, end}`."
@type interval(t) ::
@type interval :: interval(integer())
@doc """
Merges a list of intervals.
Returns a list of merged intervals, sorted ascending.
Touching intervals are merged:
iex> Intervals.merge([{1, 2}, {2, 3}])
[{1, 3}]
## Examples
iex> Intervals.merge([])
[]
iex> Intervals.merge([{1, 3}, {2, 6}, {8, 10}, {15, 18}])
[{1, 6}, {8, 10}, {15, 18}]
iex> Intervals.merge([{5, 7}, {1, 2}, {3, 4}])
[{1, 2}, {3, 4}, {5, 7}]
"""
@spec merge([interval(integer())]) :: [interval(integer())]
[]
intervals
|> Enum.sort_by(fn -> end)
|> Enum.reduce([], fn , acc ->
case acc do
[] ->
[]
[ | rest] ->
if s <= pe do
[ | rest]
else
[ | acc]
end
end
end)
|> Enum.reverse()
end
end
ExUnit.start()
use ExUnit.Case, async: true
describe do
test do
assert Intervals.merge([]) == []
end
test do
assert Intervals.merge([]) == []
end
test do
assert Intervals.merge([, , , ]) ==
[, , ]
end
test do
assert Intervals.merge([, , ]) == [, ]
end
test do
assert Intervals.merge([, , ]) == [, , ]
end
test do
assert Intervals.merge([, ]) == []
assert Intervals.merge([, ]) == []
end
test do
assert Intervals.merge([, , ]) == []
end
test do
assert Intervals.merge([, , ]) == []
end
test do
cases = [
[, ],
[, ],
[, , ],
[, , ],
[, , ]
]
Enum.each(cases, fn intervals ->
assert Intervals.merge(intervals) == brute_merge(intervals)
end)
end
end
intervals
|> Enum.sort_by(fn -> end)
|> Enum.reduce([], fn , acc ->
case acc do
[] ->
[]
[ | rest] ->
if s <= pe do
[ | rest]
else
[ | acc]
end
end
end)
|> Enum.reverse()
end
end
BFS shortest path
link: code
To run:
@moduledoc """
Shortest path on a 2D grid using BFS (4-directional movement).
The grid is a list of rows, where each cell is:
* `0` = open
* `1` = blocked (wall)
Coordinates are `{row, col}` with zero-based indices.
Returns the length (number of steps) of the shortest path from `start` to `goal`,
or `-1` if no path exists or the input is invalid.
## Notes / assumptions
* Requires a rectangular grid (all rows same length).
* Movement is 4-neighborhood: up, down, left, right (no diagonals).
* If `start == goal`, returns `0` (as long as the cell is in-bounds and not blocked).
## Complexity
BFS visits each reachable cell at most once.
* Time: `O(rows * cols)`
* Space: `O(rows * cols)` for the queue + visited set
"""
@typedoc "Grid where 0=open and 1=blocked."
@type grid :: [[0 | 1]]
@typedoc "Coordinate in `{row, col}` form (0-based)."
@type coord ::
@doc """
Computes the shortest path length from `{sr, sc}` to `{gr, gc}`.
Returns `-1` when:
* grid is empty
* start/goal is out of bounds
* start/goal is blocked (`1`)
* no path exists
## Examples
iex> grid = [
...> [0, 0, 0],
...> [1, 1, 0],
...> [0, 0, 0]
...> ]
iex> BFSGrid.shortest_path(grid, {0, 0}, {2, 2})
4
iex> BFSGrid.shortest_path([], {0, 0}, {0, 0})
-1
iex> BFSGrid.shortest_path([[1]], {0, 0}, {0, 0})
-1
iex> BFSGrid.shortest_path([[0]], {0, 0}, {0, 0})
0
"""
@spec shortest_path(grid(), coord(), coord()) :: integer()
r = length(grid)
if r == 0, do: -1, else: do_bfs(grid, , )
end
@spec do_bfs(grid(), coord(), coord()) :: integer()
rows = length(grid)
cols = length(hd(grid))
in_bounds = fn -> r >= 0 and r < rows and c >= 0 and c < cols end
cell = fn -> grid |> Enum.at(r) |> Enum.at(c) end
cond do
not in_bounds.(start) or not in_bounds.(goal) ->
-1
cell.(start) == 1 or cell.(goal) == 1 ->
-1
start == goal ->
0
true ->
q = :queue.in(, :queue.new())
vis = MapSet.new([start])
bfs_loop(q, vis, in_bounds, cell, goal)
end
end
@spec bfs_loop(:queue.queue(), MapSet.t(), (coord() -> boolean()), (coord() -> 0 | 1), coord()) ::
integer()
case :queue.out(q) do
->
dirs = [, , , ]
=
Enum.reduce(dirs, , fn , ->
if found do
else
nxt =
cond do
not in_bounds.(nxt) ->
MapSet.member?(vv, nxt) ->
cell.(nxt) == 1 ->
nxt == goal ->
true ->
end
end
end)
if found, do: found, else: bfs_loop(q3, vis3, in_bounds, cell, goal)
->
-1
end
end
end
ExUnit.start()
use ExUnit.Case, async: true
describe do
test do
assert BFSGrid.shortest_path([], , ) == -1
end
test do
assert BFSGrid.shortest_path([[0]], , ) == 0
end
test do
assert BFSGrid.shortest_path([[1]], , ) == -1
end
test do
grid = [
[0, 0],
[0, 0]
]
assert BFSGrid.shortest_path(grid, , ) == -1
assert BFSGrid.shortest_path(grid, , ) == -1
end
test do
grid = [
[1, 0],
[0, 0]
]
assert BFSGrid.shortest_path(grid, , ) == -1
grid2 = [
[0, 0],
[0, 1]
]
assert BFSGrid.shortest_path(grid2, , ) == -1
end
test do
grid = [
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]
]
assert BFSGrid.shortest_path(grid, , ) == 4
assert BFSGrid.shortest_path(grid, , ) == 2
end
test do
grid = [
[0, 0, 0],
[1, 1, 0],
[0, 0, 0]
]
assert BFSGrid.shortest_path(grid, , ) == 4
end
test do
grid = [
[0, 1, 0],
[1, 1, 1],
[0, 1, 0]
]
assert BFSGrid.shortest_path(grid, , ) == -1
end
test do
grid = [
[0, 0, 1, 0],
[0, 0, 1, 0]
]
assert BFSGrid.shortest_path(grid, , ) == -1
end
test do
cases = [
,
,
,
]
Enum.each(cases, fn ->
assert BFSGrid.shortest_path(grid, s, g) == brute_bfs(grid, s, g)
end)
end
end
rows = length(grid)
if rows == 0, do: -1, else: :ok
cols = length(hd(grid))
in_bounds? = fn -> r in 0..(rows - 1) and c in 0..(cols - 1) end
cell = fn -> grid |> Enum.at(r) |> Enum.at(c) end
cond do
rows == 0 ->
-1
not in_bounds?.(start) or not in_bounds?.(goal) ->
-1
cell.(start) == 1 or cell.(goal) == 1 ->
-1
start == goal ->
0
true ->
q = :queue.in(, :queue.new())
vis = MapSet.new([start])
dirs = [, , , ]
brute_loop(q, vis, dirs, in_bounds?, cell, goal)
end
end
case :queue.out(q) do
->
if pos == goal do
d
else
=
Enum.reduce(dirs, , fn , ->
= pos
nxt =
cond do
not in_bounds?.(nxt) ->
MapSet.member?(vv, nxt) ->
cell.(nxt) == 1 ->
true ->
end
end)
brute_loop(q3, vis3, dirs, in_bounds?, cell, goal)
end
->
-1
end
end
end
Binary search variants
link: code
To run:
@moduledoc """
Binary search utilities over sorted lists and monotone predicates.
Provided functions:
* `lower_bound/2`: first index `i` such that `a[i] >= x`
* `upper_bound/2`: first index `i` such that `a[i] > x`
* `first_true/3`: first integer `i` in `[lo, hi)` where a monotone predicate becomes `true`
* `last_true/3`: last integer `i` in `[lo, hi)` where a monotone predicate is `true`
## Conventions
* Indices are 0-based.
* Ranges are half-open: `[lo, hi)`.
* For bounds functions, if no element satisfies the condition, the returned index is `length(a)`.
## Requirements
* `lower_bound/2` and `upper_bound/2` require `a` to be sorted ascending according to Erlang/Elixir term order.
* `first_true/3` and `last_true/3` require `pred` to be **monotone** on `[lo, hi)`:
there exists some threshold `t` such that `pred(i)` is false for `i < t` and true for `i >= t`.
"""
@typedoc "0-based index into a list."
@type index :: non_neg_integer()
@typedoc "A half-open integer range `[lo, hi)`."
@type lo :: integer()
@type hi :: integer()
@doc """
Returns the first index `i` in `0..length(a)` where `Enum.at(a, i) >= x`.
If all elements are `< x`, returns `length(a)`.
## Examples
iex> BinSearch.lower_bound([1, 2, 2, 4], 2)
1
iex> BinSearch.lower_bound([1, 2, 2, 4], 3)
3
iex> BinSearch.lower_bound([], 10)
0
"""
@spec lower_bound([term()], term()) :: index()
lb(a, x, 0, length(a))
@spec lb([term()], term(), index(), index()) :: index()
lo
mid = lo + div(hi - lo, 2)
if Enum.at(a, mid) >= x, do: lb(a, x, lo, mid), else: lb(a, x, mid + 1, hi)
end
@doc """
Returns the first index `i` in `0..length(a)` where `Enum.at(a, i) > x`.
If all elements are `<= x`, returns `length(a)`.
## Examples
iex> BinSearch.upper_bound([1, 2, 2, 4], 2)
3
iex> BinSearch.upper_bound([1, 2, 2, 4], 4)
4
iex> BinSearch.upper_bound([], 10)
0
"""
@spec upper_bound([term()], term()) :: index()
ub(a, x, 0, length(a))
@spec ub([term()], term(), index(), index()) :: index()
lo
mid = lo + div(hi - lo, 2)
if Enum.at(a, mid) > x, do: ub(a, x, lo, mid), else: ub(a, x, mid + 1, hi)
end
@doc """
Given integers `lo` and `hi`, returns the smallest integer `i` in `[lo, hi)`
such that `pred.(i)` is `true`.
If `pred` is `false` for all `i` in `[lo, hi)`, returns `hi`.
## Requirements
`pred` must be monotone on `[lo, hi)` (false...false, then true...true).
## Examples
iex> BinSearch.first_true(0, 10, fn i -> i >= 7 end)
7
iex> BinSearch.first_true(0, 5, fn _ -> false end)
5
"""
@spec first_true(lo(), hi(), (integer() -> as_boolean(term()))) :: integer()
if lo >= hi do
lo
else
mid = lo + div(hi - lo, 2)
if pred.(mid), do: first_true(lo, mid, pred), else: first_true(mid + 1, hi, pred)
end
end
@doc """
Returns the greatest integer `i` in `[lo, hi)` such that `pred.(i)` is `true`.
If `pred` is `false` for all `i` in `[lo, hi)`, returns `lo - 1`.
This is implemented via `first_true/3` by searching for the first index where `pred` becomes false.
## Requirements
`pred` must be monotone on `[lo, hi)`.
## Examples
iex> BinSearch.last_true(0, 10, fn i -> i < 7 end)
6
iex> BinSearch.last_true(0, 10, fn _ -> false end)
-1
"""
@spec last_true(lo(), hi(), (integer() -> as_boolean(term()))) :: integer()
i = first_true(lo, hi, fn j -> not pred.(j) end)
i - 1
end
end
ExUnit.start()
use ExUnit.Case, async: true
describe do
test do
assert BinSearch.lower_bound([], 10) == 0
end
test do
a = [1, 2, 2, 4]
assert BinSearch.lower_bound(a, 0) == 0
assert BinSearch.lower_bound(a, 1) == 0
assert BinSearch.lower_bound(a, 2) == 1
assert BinSearch.lower_bound(a, 3) == 3
assert BinSearch.lower_bound(a, 4) == 3
assert BinSearch.lower_bound(a, 5) == 4
end
test do
a = [:a, :b, :b, :c]
assert BinSearch.lower_bound(a, :b) == 1
assert BinSearch.lower_bound(a, :bb) == 3
end
test do
cases = [
,
,
,
,
,
,
]
Enum.each(cases, fn ->
assert BinSearch.lower_bound(a, x) == brute_lower_bound(a, x)
end)
end
end
describe do
test do
assert BinSearch.upper_bound([], 10) == 0
end
test do
a = [1, 2, 2, 4]
assert BinSearch.upper_bound(a, 0) == 0
assert BinSearch.upper_bound(a, 1) == 1
assert BinSearch.upper_bound(a, 2) == 3
assert BinSearch.upper_bound(a, 3) == 3
assert BinSearch.upper_bound(a, 4) == 4
assert BinSearch.upper_bound(a, 5) == 4
end
test do
cases = [
,
,
,
,
,
,
]
Enum.each(cases, fn ->
assert BinSearch.upper_bound(a, x) == brute_upper_bound(a, x)
end)
end
end
describe do
test do
assert BinSearch.first_true(0, 10, fn i -> i >= 7 end) == 7
assert BinSearch.first_true(-5, 6, fn i -> i >= 0 end) == 0
end
test do
assert BinSearch.first_true(0, 5, fn _ -> false end) == 5
end
test do
assert BinSearch.first_true(0, 5, fn _ -> true end) == 0
end
test do
assert BinSearch.first_true(3, 3, fn _ -> true end) == 3
assert BinSearch.first_true(5, 2, fn _ -> true end) == 5
end
end
describe do
test do
assert BinSearch.last_true(0, 10, fn i -> i < 7 end) == 6
assert BinSearch.last_true(0, 10, fn i -> i <= 0 end) == 0
end
test do
assert BinSearch.last_true(0, 10, fn _ -> false end) == -1
assert BinSearch.last_true(5, 10, fn _ -> false end) == 4
end
test do
assert BinSearch.last_true(0, 10, fn _ -> true end) == 9
assert BinSearch.last_true(-3, 2, fn _ -> true end) == 1
end
end
a
|> Enum.with_index()
|> Enum.find_value(length(a), fn -> if v >= x, do: i, else: nil end)
end
a
|> Enum.with_index()
|> Enum.find_value(length(a), fn -> if v > x, do: i, else: nil end)
end
end
Rate limiter
link: code
To run:
@moduledoc """
In-memory token bucket rate limiter.
The bucket holds up to `cap` tokens. Each `allow/1` attempt:
* refills tokens based on elapsed time since `last_ms` using `refill_per_sec`
* caps tokens at `cap`
* consumes **1.0** token if available and returns `{true, updated_bucket}`
* otherwise returns `{false, updated_bucket}` (with refilled/capped tokens)
Uses `System.monotonic_time(:millisecond)` to avoid issues with wall-clock jumps.
## Notes
* Tokens are stored as floats (`0.0`, `1.0`, etc.). This allows fractional refill.
* Initial tokens are full: `tokens == cap`.
"""
alias __MODULE__, as: TokenBucket
@typedoc "Token bucket state."
@type t :: %TokenBucket
@typedoc "Capacity (tokens) or refill rate (tokens/sec)."
@type rate :: number()
defstruct cap: 0.0,
tokens: 0.0,
refill_per_sec: 0.0,
last_ms: nil
@doc """
Creates a new bucket with a maximum capacity and refill rate (tokens per second).
The bucket starts full.
## Examples
iex> b = TokenBucket.new(2, 0)
iex> {a1, b} = TokenBucket.allow(b)
iex> {a2, b} = TokenBucket.allow(b)
iex> {a3, _b} = TokenBucket.allow(b)
iex> {a1, a2, a3}
{true, true, false}
"""
@spec new(rate(), rate()) :: t()
now = now_ms()
%TokenBucket
end
@doc """
Attempts to consume one token.
Returns `{allowed?, updated_bucket}`.
* If there is at least 1 token after refilling, it consumes 1 token and returns `true`.
* Otherwise returns `false` without consuming a token.
This function always updates `last_ms` to the current monotonic time.
"""
@spec allow(t()) ::
now = now_ms()
elapsed = max(now - bucket.last_ms, 0) / 1000.0
tokens =
(bucket.tokens + elapsed * bucket.refill_per_sec)
|> min(bucket.cap)
if tokens >= 1.0 do
else
end
end
@spec now_ms() :: integer()
System.monotonic_time(:millisecond)
end
ExUnit.start()
use ExUnit.Case, async: true
describe do
test do
b = TokenBucket.new(5, 2)
assert b.cap == 5.0
assert b.tokens == 5.0
assert b.refill_per_sec == 2.0
assert is_integer(b.last_ms)
end
end
describe do
test do
b0 = TokenBucket.new(3, 0)
= TokenBucket.allow(b0)
assert ok == true
assert b1.tokens == 2.0
= TokenBucket.allow(b1)
assert ok == true
assert b2.tokens == 1.0
= TokenBucket.allow(b2)
assert ok == true
assert b3.tokens == 0.0
= TokenBucket.allow(b3)
assert ok == false
assert b4.tokens == 0.0
end
test do
now = System.monotonic_time(:millisecond)
b =
%TokenBucket
= TokenBucket.allow(b)
assert ok == false
assert b2.tokens == 0.5
end
test do
now = System.monotonic_time(:millisecond)
b =
%TokenBucket
= TokenBucket.allow(b)
assert ok == true
assert_in_delta b2.tokens, 2.0, 0.05
assert b2.last_ms >= b.last_ms
end
test do
now = System.monotonic_time(:millisecond)
b =
%TokenBucket
= TokenBucket.allow(b)
assert ok == true
assert_in_delta b2.tokens, 4.0, 0.05
end
test do
now = System.monotonic_time(:millisecond)
b =
%TokenBucket
= TokenBucket.allow(b)
assert ok == true
assert_in_delta b2.tokens, 0.0, 0.001
end
end
end