diff --git a/404.html b/404.html index 948904f..b9c8786 100644 --- a/404.html +++ b/404.html @@ -14,8 +14,8 @@ - - + +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

diff --git a/algorithms/algorithms-correctness/postcondition-ambiguity/index.html b/algorithms/algorithms-correctness/postcondition-ambiguity/index.html index 0bd5ed1..36b21ea 100644 --- a/algorithms/algorithms-correctness/postcondition-ambiguity/index.html +++ b/algorithms/algorithms-correctness/postcondition-ambiguity/index.html @@ -16,8 +16,8 @@ - - + +
Skip to main content

Vague postconditions and proving correctness of algorithms

Introduction

@@ -26,7 +26,7 @@

To implement select sort from the exercises and make it as easy to read as possible, I have implemented maximum function that returns index of the biggest element from the first nn elements.

For the sake of time and memory complexity, I am also using itertools.islice, which makes a slice, but does not copy the elements into the memory like normal slice does.

There is also a check_loop_invariant function that will be described later.

-
def compare_by_value(pair):
index, value = pair
return value


def maximum(arr, n):
first_n_elements = itertools.islice(enumerate(arr), n)
index, value = max(first_n_elements, key=compare_by_value)
return index


def select_sort(arr, n):
assert n == len(arr)

check_loop_invariant(arr, n, n)
for i in reversed(range(1, n)):
j = maximum(arr, i + 1)
arr[i], arr[j] = arr[j], arr[i]

check_loop_invariant(arr, n, i)

return arr
+
def compare_by_value(pair):
index, value = pair
return value


def maximum(arr, n):
first_n_elements = itertools.islice(enumerate(arr), n)
index, value = max(first_n_elements, key=compare_by_value)
return index


def select_sort(arr, n):
assert n == len(arr)

check_loop_invariant(arr, n, n)
for i in reversed(range(1, n)):
j = maximum(arr, i + 1)
arr[i], arr[j] = arr[j], arr[i]

check_loop_invariant(arr, n, i)

return arr

Discussed preconditions, loop invariants and postconditions

You can safely replace A with arr or array for list.

Precondition

@@ -66,26 +66,26 @@
  • For each index from the end, I will assign maximum + index. This will ensure that even if the maximum in the original array was the first element, I will always satisfy that 2nd part of the loop invariant.
  • -
    def broken_select_sort(arr, n):
    assert n == len(arr)

    if not arr:
    return

    max_value = max(arr)

    check_loop_invariant(arr, n, n)
    for i in reversed(range(n)):
    arr[i] = max_value + i

    check_loop_invariant(arr, n, i)

    return arr
    +
    def broken_select_sort(arr, n):
    assert n == len(arr)

    if not arr:
    return

    max_value = max(arr)

    check_loop_invariant(arr, n, n)
    for i in reversed(range(n)):
    arr[i] = max_value + i

    check_loop_invariant(arr, n, i)

    return arr
    tip

    There is also an easier way to break this, I leave that as an exercise ;)

    Property-based tests for our sorts

    Since we have talked a lot about proofs at the seminar, I would like to demonstrate it on the testing of the sorts. In the following text I will cover implementation of the loop invariant and both postconditions we have talked about and then test our sorts using them.

    Loop invariant

    To check loop invariant I have implemented this function:

    -
    def check_loop_invariant(arr, n, i):
    # A[i + 1 : n] is sorted
    for x, y in zip(itertools.islice(arr, i + 1, n), itertools.islice(arr, i + 2, n)):
    assert x <= y

    # all elements of A[i + 1 : n] are bigger or equal to the other elements
    if i + 1 >= n:
    # in case there are no elements
    return

    # otherwise, since the "tail" is sorted, we can assume that it is enough to
    # check the other elements to the smallest value of the tail
    smallest = arr[i + 1]
    for element in itertools.islice(arr, i + 1):
    assert smallest >= element
    +
    def check_loop_invariant(arr, n, i):
    # A[i + 1 : n] is sorted
    for x, y in zip(itertools.islice(arr, i + 1, n), itertools.islice(arr, i + 2, n)):
    assert x <= y

    # all elements of A[i + 1 : n] are bigger or equal to the other elements
    if i + 1 >= n:
    # in case there are no elements
    return

    # otherwise, since the "tail" is sorted, we can assume that it is enough to
    # check the other elements to the smallest value of the tail
    smallest = arr[i + 1]
    for element in itertools.islice(arr, i + 1):
    assert smallest >= element

    First part checks if the "ending" of the array is sorted.

    In second part I have used a dirty trick of taking just the first element that is the smallest and compared the rest of the elements to it. Why is it enough? I leave it as an exercise ;)

    Postcondition(s)

    I have defined both the vague and explicit postconditions:

    -
    def check_vague_postcondition(original_arr, arr):
    if not arr:
    return

    # check ordering
    for x, y in zip(arr, itertools.islice(arr, 1, len(arr))):
    assert x <= y


    def check_postcondition(original_arr, arr):
    if not arr:
    return

    # check ordering
    for x, y in zip(arr, itertools.islice(arr, 1, len(arr))):
    assert x <= y

    # get counts from original list
    original_counts = {}
    for value in original_arr:
    original_counts[value] = 1 + original_counts.get(value, 0)

    # get counts from resulting list
    counts = {}
    for value in arr:
    counts[value] = 1 + counts.get(value, 0)

    # if arr is permutation of original_arr then all counts must be the same
    assert counts == original_counts
    +
    def check_vague_postcondition(original_arr, arr):
    if not arr:
    return

    # check ordering
    for x, y in zip(arr, itertools.islice(arr, 1, len(arr))):
    assert x <= y


    def check_postcondition(original_arr, arr):
    if not arr:
    return

    # check ordering
    for x, y in zip(arr, itertools.islice(arr, 1, len(arr))):
    assert x <= y

    # get counts from original list
    original_counts = {}
    for value in original_arr:
    original_counts[value] = 1 + original_counts.get(value, 0)

    # get counts from resulting list
    counts = {}
    for value in arr:
    counts[value] = 1 + counts.get(value, 0)

    # if arr is permutation of original_arr then all counts must be the same
    assert counts == original_counts

    Putting it together

    Now that we have everything implement, we can move on to the implementation of the tests:

    -
    from hypothesis import given, settings
    from hypothesis.strategies import integers, lists
    import pytest

    @given(lists(integers()))
    @settings(max_examples=1000)
    @pytest.mark.parametrize(
    "postcondition", [check_vague_postcondition, check_postcondition]
    )
    @pytest.mark.parametrize("sorting_function", [select_sort, broken_select_sort])
    def test_select_sort(sorting_function, postcondition, numbers):
    result = sorting_function(numbers[:], len(numbers))
    postcondition(numbers, result)
    +
    from hypothesis import given, settings
    from hypothesis.strategies import integers, lists
    import pytest

    @given(lists(integers()))
    @settings(max_examples=1000)
    @pytest.mark.parametrize(
    "postcondition", [check_vague_postcondition, check_postcondition]
    )
    @pytest.mark.parametrize("sorting_function", [select_sort, broken_select_sort])
    def test_select_sort(sorting_function, postcondition, numbers):
    result = sorting_function(numbers[:], len(numbers))
    postcondition(numbers, result)

    Since it might seem a bit scary, I will disect it by parts.

    1. Parameters of test function

      -
      def test_select_sort(sorting_function, postcondition, numbers):
      +
      def test_select_sort(sorting_function, postcondition, numbers):

      We are given 3 parameters:

      • sorting_function - as the name suggests is the sorting function we test
      • @@ -95,7 +95,7 @@ This will ensure that even if the maximum in the original array was the first el
      • Body of the test

        -
        result = sorting_function(numbers[:], len(numbers))
        postcondition(numbers, result)
        +
        result = sorting_function(numbers[:], len(numbers))
        postcondition(numbers, result)

        We pass to the sorting function copy of the numbers we got, this ensures that once we are checking the more strict postcondition, we can gather the necessary information even after sorting the list in-situ, i.e. we can check if the result is really a permutation of the numbers even though the sorting functions has modified the passed in list.

    @@ -103,7 +103,7 @@ This will ensure that even if the maximum in the original array was the first el
    1. 1st parametrize from the bottom

      -
      @pytest.mark.parametrize("sorting_function", [select_sort, broken_select_sort])
      +
      @pytest.mark.parametrize("sorting_function", [select_sort, broken_select_sort])

      This tells pytest, that we want to pass the values from the list to the parameter sorting_function. In other words, this lets us use the same test function for both the correct and incorrect select sort.

    2. @@ -120,7 +120,7 @@ This means hypothesis is randomly creating lists of integers and passing them to

    Let's run the tests!

    In case you want to experiment locally, you should install pytest and hypothesis from the PyPI.

    -
    % pytest -v test_sort.py
    =================================== test session starts ====================================
    platform linux -- Python 3.6.8, pytest-3.8.2, py-1.7.0, pluggy-0.13.1 -- /usr/bin/python3
    cachedir: .pytest_cache
    rootdir: /home/xfocko/git/xfocko/ib002/postcondition-ambiguity, inifile:
    plugins: hypothesis-5.16.1
    collected 4 items

    test_sort.py::test_select_sort[select_sort-check_vague_postcondition] PASSED [ 25%]
    test_sort.py::test_select_sort[select_sort-check_postcondition] PASSED [ 50%]
    test_sort.py::test_select_sort[broken_select_sort-check_vague_postcondition] PASSED [ 75%]
    test_sort.py::test_select_sort[broken_select_sort-check_postcondition] FAILED [100%]

    ========================================= FAILURES =========================================
    _________________ test_select_sort[broken_select_sort-check_postcondition] _________________

    sorting_function = <function broken_select_sort at 0x7fac179308c8>
    postcondition = <function check_postcondition at 0x7fac1786d1e0>

    @given(lists(integers()))
    > @settings(max_examples=1000)
    @pytest.mark.parametrize(
    "postcondition", [check_vague_postcondition, check_postcondition]
    )
    @pytest.mark.parametrize("sorting_function", [select_sort, broken_select_sort])
    def test_select_sort(sorting_function, postcondition, numbers):

    test_sort.py:132:
    _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
    test_sort.py:139: in test_select_sort
    postcondition(numbers, result)
    _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

    original_arr = [0, 0], arr = [0, 1]

    def check_postcondition(original_arr, arr):
    if not arr:
    return

    # check ordering
    for x, y in zip(arr, itertools.islice(arr, 1, len(arr))):
    assert x <= y

    # get counts from original list
    original_counts = {}
    for value in original_arr:
    original_counts[value] = 1 + original_counts.get(value, 0)

    # get counts from resulting list
    counts = {}
    for value in arr:
    counts[value] = 1 + counts.get(value, 0)

    # if arr is permutation of original_arr then all counts must be the same
    > assert counts == original_counts
    E assert {0: 1, 1: 1} == {0: 2}
    E Differing items:
    E {0: 1} != {0: 2}
    E Left contains more items:
    E {1: 1}
    E Full diff:
    E - {0: 1, 1: 1}
    E + {0: 2}

    test_sort.py:128: AssertionError
    ----------------------------------- Captured stdout call -----------------------------------
    Falsifying example: test_select_sort(
    sorting_function=<function test_sort.broken_select_sort>,
    postcondition=<function test_sort.check_postcondition>,
    numbers=[0, 0],
    )
    ============================ 1 failed, 3 passed in 6.84 seconds ============================
    +
    % pytest -v test_sort.py
    =================================== test session starts ====================================
    platform linux -- Python 3.6.8, pytest-3.8.2, py-1.7.0, pluggy-0.13.1 -- /usr/bin/python3
    cachedir: .pytest_cache
    rootdir: /home/xfocko/git/xfocko/ib002/postcondition-ambiguity, inifile:
    plugins: hypothesis-5.16.1
    collected 4 items

    test_sort.py::test_select_sort[select_sort-check_vague_postcondition] PASSED [ 25%]
    test_sort.py::test_select_sort[select_sort-check_postcondition] PASSED [ 50%]
    test_sort.py::test_select_sort[broken_select_sort-check_vague_postcondition] PASSED [ 75%]
    test_sort.py::test_select_sort[broken_select_sort-check_postcondition] FAILED [100%]

    ========================================= FAILURES =========================================
    _________________ test_select_sort[broken_select_sort-check_postcondition] _________________

    sorting_function = <function broken_select_sort at 0x7fac179308c8>
    postcondition = <function check_postcondition at 0x7fac1786d1e0>

    @given(lists(integers()))
    > @settings(max_examples=1000)
    @pytest.mark.parametrize(
    "postcondition", [check_vague_postcondition, check_postcondition]
    )
    @pytest.mark.parametrize("sorting_function", [select_sort, broken_select_sort])
    def test_select_sort(sorting_function, postcondition, numbers):

    test_sort.py:132:
    _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
    test_sort.py:139: in test_select_sort
    postcondition(numbers, result)
    _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _

    original_arr = [0, 0], arr = [0, 1]

    def check_postcondition(original_arr, arr):
    if not arr:
    return

    # check ordering
    for x, y in zip(arr, itertools.islice(arr, 1, len(arr))):
    assert x <= y

    # get counts from original list
    original_counts = {}
    for value in original_arr:
    original_counts[value] = 1 + original_counts.get(value, 0)

    # get counts from resulting list
    counts = {}
    for value in arr:
    counts[value] = 1 + counts.get(value, 0)

    # if arr is permutation of original_arr then all counts must be the same
    > assert counts == original_counts
    E assert {0: 1, 1: 1} == {0: 2}
    E Differing items:
    E {0: 1} != {0: 2}
    E Left contains more items:
    E {1: 1}
    E Full diff:
    E - {0: 1, 1: 1}
    E + {0: 2}

    test_sort.py:128: AssertionError
    ----------------------------------- Captured stdout call -----------------------------------
    Falsifying example: test_select_sort(
    sorting_function=<function test_sort.broken_select_sort>,
    postcondition=<function test_sort.check_postcondition>,
    numbers=[0, 0],
    )
    ============================ 1 failed, 3 passed in 6.84 seconds ============================

    We can clearly see that our broken select sort has passed the vague postcondition, but the explicit one was not satisfied.

    Summary

    For proving the correctness of the algorithm it is better to be explicit than prove that algorithm is correct even though it is not. Being explicit also allows you to test smaller chunks of code better.

    diff --git a/algorithms/category/algorithms-and-correctness/index.html b/algorithms/category/algorithms-and-correctness/index.html index 37b4598..750a0cf 100644 --- a/algorithms/category/algorithms-and-correctness/index.html +++ b/algorithms/category/algorithms-and-correctness/index.html @@ -18,8 +18,8 @@ correctness. - - + +
    Skip to main content

    Algorithms and Correctness

    Materials related to basic ideas behind algorithms and proofs of their diff --git a/algorithms/category/asymptotic-notation-and-time-complexity/index.html b/algorithms/category/asymptotic-notation-and-time-complexity/index.html index 06a3216..924f91c 100644 --- a/algorithms/category/asymptotic-notation-and-time-complexity/index.html +++ b/algorithms/category/asymptotic-notation-and-time-complexity/index.html @@ -16,8 +16,8 @@ - - + +

    Asymptotic Notation and Time Complexity

    Materials related to asymptotic notation and time complexity. diff --git a/algorithms/category/graphs/index.html b/algorithms/category/graphs/index.html index 081e9a1..4740efc 100644 --- a/algorithms/category/graphs/index.html +++ b/algorithms/category/graphs/index.html @@ -16,8 +16,8 @@ - - + +

    Graphs

    Materials related to basic graph algorithms and graph problems. diff --git a/algorithms/category/recursion/index.html b/algorithms/category/recursion/index.html index 01c7c8e..add734b 100644 --- a/algorithms/category/recursion/index.html +++ b/algorithms/category/recursion/index.html @@ -16,8 +16,8 @@ - - + +

    Recursion

    Materials related to recursive algorithms and their time complexity. diff --git a/algorithms/category/red-black-trees/index.html b/algorithms/category/red-black-trees/index.html index a768522..a77fe9f 100644 --- a/algorithms/category/red-black-trees/index.html +++ b/algorithms/category/red-black-trees/index.html @@ -16,8 +16,8 @@ - - + +

    Red-Black Trees

    Materials related to red-black trees. diff --git a/algorithms/graphs/bfs-tree/index.html b/algorithms/graphs/bfs-tree/index.html index a8cfc0d..7c80cd7 100644 --- a/algorithms/graphs/bfs-tree/index.html +++ b/algorithms/graphs/bfs-tree/index.html @@ -16,8 +16,8 @@ - - + +

    Distance boundaries from BFS tree on undirected graphs

    Introduction

    diff --git a/algorithms/graphs/iterative-and-iterators/index.html b/algorithms/graphs/iterative-and-iterators/index.html index 6893e68..1b06b9e 100644 --- a/algorithms/graphs/iterative-and-iterators/index.html +++ b/algorithms/graphs/iterative-and-iterators/index.html @@ -16,8 +16,8 @@ - - + +

    Iterative algorithms via iterators

    Introduction

    @@ -28,17 +28,17 @@

    On the other hand, we have seen iterative implementation in the exercises and I have also prepared two from which one was similar to recursive implementation without colors from exercises and the other one used features of high-level languages.

    Different implementations

    Recursive DFS implementation from exercises without colors

    -
    function VisitedDFS(u: Vertex, visited: VertexSet) return VertexSet is
    v: Vertex;
    begin
    visited.Union(To_Set(u));

    for v in u.successors loop
    if not Contains(visited, v) then
    visited := visitedDFS(v, Visited);
    end if;
    end loop;

    return visited;
    end VisitedDFS;
    +
    function VisitedDFS(u: Vertex, visited: VertexSet) return VertexSet is
    v: Vertex;
    begin
    visited.Union(To_Set(u));

    for v in u.successors loop
    if not Contains(visited, v) then
    visited := visitedDFS(v, Visited);
    end if;
    end loop;

    return visited;
    end VisitedDFS;

    This implementation is correct, does the DFS traversal as it should, however it has one “smallish” downside and that is the time complexity. The usage of set raises the time complexity, of course it is implementation dependant. However in case of either RB-tree or hash-table implementation, we get look-up in time O(n)\mathcal{O}(n) for hash-table in worst-case or O(logn)\mathcal{O}(\log n) for the other in the worst-case. Both are not ideal compared to checking color on vertex.

    Iterative DFS from the exercises

    -
    procedure IterDFS(u: Vertex) is
    stack: StateVector;
    i, time: Integer;
    v: Vertex;
    begin
    stack.Append(VertexState(u, 0));
    u.color := Gray;
    time := 1;
    u.d := time;

    while not stack.Is_Empty loop
    u := stack.Last_Element.Vertex;
    i := stack.Last_Element.NextIndex;
    stack.Delete_Last;

    if i < u.successors.Length then
    -- search is not finished, is pushed back to stack
    stack.Append(VertexState(u, k + 1));

    v := u.successors.Element(i);
    if v.color = White then
    stack.Append(VertexState(v, 0));
    v.color := Gray;
    time := time + 1;
    v.d := time;
    end if;
    else
    -- u has no other successors, we can finish the search
    time := time + 1;
    u.f := time;
    u.color := Black;
    end if;
    end loop;

    end IterDFS;
    +
    procedure IterDFS(u: Vertex) is
    stack: StateVector;
    i, time: Integer;
    v: Vertex;
    begin
    stack.Append(VertexState(u, 0));
    u.color := Gray;
    time := 1;
    u.d := time;

    while not stack.Is_Empty loop
    u := stack.Last_Element.Vertex;
    i := stack.Last_Element.NextIndex;
    stack.Delete_Last;

    if i < u.successors.Length then
    -- search is not finished, is pushed back to stack
    stack.Append(VertexState(u, k + 1));

    v := u.successors.Element(i);
    if v.color = White then
    stack.Append(VertexState(v, 0));
    v.color := Gray;
    time := time + 1;
    v.d := time;
    end if;
    else
    -- u has no other successors, we can finish the search
    time := time + 1;
    u.f := time;
    u.color := Black;
    end if;
    end loop;

    end IterDFS;

    As we can see, there is some ordering in which we search through the successors. Time complexity is OK, stack holds at most all vertices (they must be on the current path).

    My iterative with path in stack

    -
    procedure DFS(start: Vertex) is
    path: VertexVector;
    time: Integer;
    hasSuccessor: Bool;
    successor: Vertex;
    begin
    path.Append(start);
    time := 1;

    start.d := time;
    start.color := Gray;

    while not path.Is_Empty loop
    hasSuccessor := false;

    for successor in path.Last_Element.successors loop
    if successor.color = White then
    hasSuccessor := true;

    successor.d := time + 1;
    successor.color := Gray;
    time := time + 1;

    path.Append(successor);

    exit;
    end if;
    end loop;

    if not hasSuccessor then
    path.Last_Element.f := time + 1;
    path.Last_Element.color := Black;

    time := time + 1;
    path.Delete_Last;
    end if;

    end loop;
    end DFS;
    +
    procedure DFS(start: Vertex) is
    path: VertexVector;
    time: Integer;
    hasSuccessor: Bool;
    successor: Vertex;
    begin
    path.Append(start);
    time := 1;

    start.d := time;
    start.color := Gray;

    while not path.Is_Empty loop
    hasSuccessor := false;

    for successor in path.Last_Element.successors loop
    if successor.color = White then
    hasSuccessor := true;

    successor.d := time + 1;
    successor.color := Gray;
    time := time + 1;

    path.Append(successor);

    exit;
    end if;
    end loop;

    if not hasSuccessor then
    path.Last_Element.f := time + 1;
    path.Last_Element.color := Black;

    time := time + 1;
    path.Delete_Last;
    end if;

    end loop;
    end DFS;

    This approach is similar to the iterative solution from the exercises, but it does not keep the index of the next successor, therefore it always iterates through all of them, which raises the time complexity.

    My iterative solution with iterators

    On the other hand, we do not actually have to depend on the representation of the graph. In this case, we just somehow obtain the iterator (which yields all of the succesors) and keep it in the stack.

    -
    procedure DFS(start: Vertex) is
    path: StateVector;
    time: Integer;
    current: State;
    nextVertex: Vertex;
    begin
    path.Append(State(start));
    time := 1;

    start.d := time;
    start.color := Gray;

    while not path.Is_Empty loop
    current := path.Last_Element;

    if not Move_Next(current.successors) then
    path.Delete_Last;

    time := time + 1;
    current.vertex.f := time;

    current.vertex.color := Black;
    else if current.successors.Value.color = white then
    nextVertex := current.successors.Value;

    time := time + 1;
    nextVertex.d := time;

    nextVertex.color := Gray;

    path.Append(State(nextVertex));
    end if;
    end loop;
    end DFS;
    +
    procedure DFS(start: Vertex) is
    path: StateVector;
    time: Integer;
    current: State;
    nextVertex: Vertex;
    begin
    path.Append(State(start));
    time := 1;

    start.d := time;
    start.color := Gray;

    while not path.Is_Empty loop
    current := path.Last_Element;

    if not Move_Next(current.successors) then
    path.Delete_Last;

    time := time + 1;
    current.vertex.f := time;

    current.vertex.color := Black;
    else if current.successors.Value.color = white then
    nextVertex := current.successors.Value;

    time := time + 1;
    nextVertex.d := time;

    nextVertex.color := Gray;

    path.Append(State(nextVertex));
    end if;
    end loop;
    end DFS;

    ( The way we manipulate with the iterators is closest to the C# implementation. Apart from the Iterator thing :) In case you tried to implement it in C++, you would more than likely need to change the check, since you would get first successor right at the beginning )

    So here we don't keep indices, but the iterators. We can also check existence of other successors easily: by the iterator moving after the last successor.

    Closer explanation of the iterator shenanigans follows. In the beginning, either start or when pushing new vertex, we are pushing an iterator that points just before the first successor. When populating lastVertex and successors in the while-loop, we take the element from the top of the stack. MoveNext returns true if there is an element, i.e. successor in this case. If it returns false we have nothing to do and we pop the vertex from the stack (also set finishing time and color). If we have successor we check if it has been already visited or not. If has not, we set discovery time and color accordingly, also we add it to stack.

    diff --git a/algorithms/index.html b/algorithms/index.html index c7577fc..17b50d5 100644 --- a/algorithms/index.html +++ b/algorithms/index.html @@ -14,8 +14,8 @@ - - + +

    Introduction

    In this part you can find “random” additional materials I have written over the @@ -23,6 +23,6 @@ course of teaching Algorithms and data structures I.

    It is a various mix of stuff that may have been produced as a follow-up on some question asked at the seminar or spontanously.

    If you have some ideas for posts, please do not hesitate to submit them as issues -in the linked GitLab.

    +in the linked GitLab.

    \ No newline at end of file diff --git a/algorithms/rb-trees/applications/index.html b/algorithms/rb-trees/applications/index.html index 1aba1e4..5ca88c7 100644 --- a/algorithms/rb-trees/applications/index.html +++ b/algorithms/rb-trees/applications/index.html @@ -16,8 +16,8 @@ - - + +

    Použití červeno-černých stromů

    Použití

    @@ -43,16 +43,16 @@ jsme vybrali několik jazyků.

    • map

      -
      template <class _Key, class _Tp, class _Compare = less<_Key>,
      class _Allocator = allocator<pair<const _Key, _Tp> > >
      class _LIBCPP_TEMPLATE_VIS map
      {
      public:
      // types:
      typedef _Key key_type;
      typedef _Tp mapped_type;
      typedef pair<const key_type, mapped_type> value_type;

      // …

      private:
      typedef __tree<__value_type, __vc, __allocator_type> __base;
      +
      template <class _Key, class _Tp, class _Compare = less<_Key>,
      class _Allocator = allocator<pair<const _Key, _Tp> > >
      class _LIBCPP_TEMPLATE_VIS map
      {
      public:
      // types:
      typedef _Key key_type;
      typedef _Tp mapped_type;
      typedef pair<const key_type, mapped_type> value_type;

      // …

      private:
      typedef __tree<__value_type, __vc, __allocator_type> __base;
    • set

      -
      template <class _Key, class _Compare = less<_Key>,
      class _Allocator = allocator<_Key> >
      class _LIBCPP_TEMPLATE_VIS set
      {
      public:
      // types:
      typedef _Key key_type;
      typedef key_type value_type;

      // …

      private:
      typedef __tree<value_type, value_compare, allocator_type> __base;
      +
      template <class _Key, class _Compare = less<_Key>,
      class _Allocator = allocator<_Key> >
      class _LIBCPP_TEMPLATE_VIS set
      {
      public:
      // types:
      typedef _Key key_type;
      typedef key_type value_type;

      // …

      private:
      typedef __tree<value_type, value_compare, allocator_type> __base;

    U obou hlaviček si můžeme všimnout, že deklarují nějaký soukromý typ __base, který je aliasem pro __tree. Ten nás pak vede k hlavičce __tree.

    Výňatek:

    -
    /*

    _NodePtr algorithms

    The algorithms taking _NodePtr are red black tree algorithms. Those
    algorithms taking a parameter named __root should assume that __root
    points to a proper red black tree (unless otherwise specified).



    */
    +
    /*

    _NodePtr algorithms

    The algorithms taking _NodePtr are red black tree algorithms. Those
    algorithms taking a parameter named __root should assume that __root
    points to a proper red black tree (unless otherwise specified).



    */

    gcc

    Pro gcc je postup téměř stejný. Pro změnu v hlavičkách map a set nenajdeme nic, deklarace jsou až v hlavičkových souborech:

    V obou se zase odkazuje na nějakou hlavičku bits/stl_tree.h, zase výňatek:

    -
      // Red-black tree class, designed for use in implementing STL
    // associative containers (set, multiset, map, and multimap). The
    // insertion and deletion algorithms are based on those in Cormen,
    // Leiserson, and Rivest, Introduction to Algorithms (MIT Press,
    // 1990), except that
    //
    // (1) the header cell is maintained with links not only to the root
    // but also to the leftmost node of the tree, to enable constant
    // time begin(), and to the rightmost node of the tree, to enable
    // linear time performance when used with the generic set algorithms
    // (set_union, etc.)
    //
    // (2) when a node being deleted has two children its successor node
    // is relinked into its place, rather than copied, so that the only
    // iterators invalidated are those referring to the deleted node.

    enum _Rb_tree_color { _S_red = false, _S_black = true };

    struct _Rb_tree_node_base
    {
    typedef _Rb_tree_node_base* _Base_ptr;
    typedef const _Rb_tree_node_base* _Const_Base_ptr;

    _Rb_tree_color _M_color;
    _Base_ptr _M_parent;
    _Base_ptr _M_left;
    _Base_ptr _M_right;

    static _Base_ptr
    _S_minimum(_Base_ptr __x) _GLIBCXX_NOEXCEPT
    {
    while (__x->_M_left != 0) __x = __x->_M_left;
    return __x;
    }

    static _Const_Base_ptr
    _S_minimum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPT
    {
    while (__x->_M_left != 0) __x = __x->_M_left;
    return __x;
    }

    static _Base_ptr
    _S_maximum(_Base_ptr __x) _GLIBCXX_NOEXCEPT
    {
    while (__x->_M_right != 0) __x = __x->_M_right;
    return __x;
    }

    static _Const_Base_ptr
    _S_maximum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPT
    {
    while (__x->_M_right != 0) __x = __x->_M_right;
    return __x;
    }
    +
      // Red-black tree class, designed for use in implementing STL
    // associative containers (set, multiset, map, and multimap). The
    // insertion and deletion algorithms are based on those in Cormen,
    // Leiserson, and Rivest, Introduction to Algorithms (MIT Press,
    // 1990), except that
    //
    // (1) the header cell is maintained with links not only to the root
    // but also to the leftmost node of the tree, to enable constant
    // time begin(), and to the rightmost node of the tree, to enable
    // linear time performance when used with the generic set algorithms
    // (set_union, etc.)
    //
    // (2) when a node being deleted has two children its successor node
    // is relinked into its place, rather than copied, so that the only
    // iterators invalidated are those referring to the deleted node.

    enum _Rb_tree_color { _S_red = false, _S_black = true };

    struct _Rb_tree_node_base
    {
    typedef _Rb_tree_node_base* _Base_ptr;
    typedef const _Rb_tree_node_base* _Const_Base_ptr;

    _Rb_tree_color _M_color;
    _Base_ptr _M_parent;
    _Base_ptr _M_left;
    _Base_ptr _M_right;

    static _Base_ptr
    _S_minimum(_Base_ptr __x) _GLIBCXX_NOEXCEPT
    {
    while (__x->_M_left != 0) __x = __x->_M_left;
    return __x;
    }

    static _Const_Base_ptr
    _S_minimum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPT
    {
    while (__x->_M_left != 0) __x = __x->_M_left;
    return __x;
    }

    static _Base_ptr
    _S_maximum(_Base_ptr __x) _GLIBCXX_NOEXCEPT
    {
    while (__x->_M_right != 0) __x = __x->_M_right;
    return __x;
    }

    static _Const_Base_ptr
    _S_maximum(_Const_Base_ptr __x) _GLIBCXX_NOEXCEPT
    {
    while (__x->_M_right != 0) __x = __x->_M_right;
    return __x;
    }

    Tady už taky vidíme nějaký kód pro nalezení minima/maxima ve stromě. Mimo jiné ještě existuje tree.cc, kde je lze nalézt třeba funkci s následující hlavičkou:

    -
    void
    _Rb_tree_insert_and_rebalance(const bool __insert_left,
    _Rb_tree_node_base* __x,
    _Rb_tree_node_base* __p,
    _Rb_tree_node_base& __header) throw ();
    +
    void
    _Rb_tree_insert_and_rebalance(const bool __insert_left,
    _Rb_tree_node_base* __x,
    _Rb_tree_node_base* __p,
    _Rb_tree_node_base& __header) throw ();

    Java

    V Javě jsou pro nás klíčové implementace TreeSet a TreeMap.

    V implementaci TreeSet si můžete povšimnout:

    -
    public class TreeSet<E> extends AbstractSet<E>
    implements NavigableSet<E>, Cloneable, java.io.Serializable
    {
    /**
    * The backing map.
    */
    private transient NavigableMap<E,Object> m;

    // Dummy value to associate with an Object in the backing Map
    private static final Object PRESENT = new Object();
    +
    public class TreeSet<E> extends AbstractSet<E>
    implements NavigableSet<E>, Cloneable, java.io.Serializable
    {
    /**
    * The backing map.
    */
    private transient NavigableMap<E,Object> m;

    // Dummy value to associate with an Object in the backing Map
    private static final Object PRESENT = new Object();

    TreeSet v Javě tedy používá na pozadí TreeMap (což je vidět ve výchozím konstruktoru, kde se volá konstruktor přebírající NavigableMap<E, Object>, a je mu předáno new TreeMap<>()).

    Co se týče TreeMap, tak hned ze začátku definice TreeMap je vidět:

    -
    public class TreeMap<K,V>
    extends AbstractMap<K,V>
    implements NavigableMap<K,V>, Cloneable, java.io.Serializable
    {
    /**
    * The comparator used to maintain order in this tree map, or
    * null if it uses the natural ordering of its keys.
    *
    * @serial
    */
    @SuppressWarnings("serial") // Conditionally serializable
    private final Comparator<? super K> comparator;

    private transient Entry<K,V> root;
    +
    public class TreeMap<K,V>
    extends AbstractMap<K,V>
    implements NavigableMap<K,V>, Cloneable, java.io.Serializable
    {
    /**
    * The comparator used to maintain order in this tree map, or
    * null if it uses the natural ordering of its keys.
    *
    * @serial
    */
    @SuppressWarnings("serial") // Conditionally serializable
    private final Comparator<? super K> comparator;

    private transient Entry<K,V> root;

    Takže máme „nějaký kořen“ typu Entry<K,V>. Zkusíme si najít definici daného typu…

    -
        // Red-black mechanics

    private static final boolean RED = false;
    private static final boolean BLACK = true;

    /**
    * Node in the Tree. Doubles as a means to pass key-value pairs back to
    * user (see Map.Entry).
    */

    static final class Entry<K,V> implements Map.Entry<K,V> {
    K key;
    V value;
    Entry<K,V> left;
    Entry<K,V> right;
    Entry<K,V> parent;
    boolean color = BLACK;
    +
        // Red-black mechanics

    private static final boolean RED = false;
    private static final boolean BLACK = true;

    /**
    * Node in the Tree. Doubles as a means to pass key-value pairs back to
    * user (see Map.Entry).
    */

    static final class Entry<K,V> implements Map.Entry<K,V> {
    K key;
    V value;
    Entry<K,V> left;
    Entry<K,V> right;
    Entry<K,V> parent;
    boolean color = BLACK;

    A máme RB-tree.

    (Implementace vychází z projektu OpenJDK.)

    C#

    V C# se zaměříme na nejnovější vydání (.NET), které je open-source a podporováno i na operačních systémech založených na Linuxu.

    Nejdříve se podíváme na implementaci slovníku (SortedDictionary).

    -
        public class SortedDictionary<TKey, TValue> : IDictionary<TKey, TValue>, IDictionary, IReadOnlyDictionary<TKey, TValue> where TKey : notnull
    {
    [NonSerialized]
    private KeyCollection? _keys;
    [NonSerialized]
    private ValueCollection? _values;

    private readonly TreeSet<KeyValuePair<TKey, TValue>> _set; // Do not rename (binary serialization)
    +
        public class SortedDictionary<TKey, TValue> : IDictionary<TKey, TValue>, IDictionary, IReadOnlyDictionary<TKey, TValue> where TKey : notnull
    {
    [NonSerialized]
    private KeyCollection? _keys;
    [NonSerialized]
    private ValueCollection? _values;

    private readonly TreeSet<KeyValuePair<TKey, TValue>> _set; // Do not rename (binary serialization)

    Na první pohled máme problém, protože TreeSet není SortedSet, který by jsme čekali. Když se přesuneme na konec souboru, tak zjistíme, že TreeSet je jenom backward-compatible wrapper pro SortedSet.

    Přesuneme se k SortedSet. A hned ze začátku vidíme:

    -
        // A binary search tree is a red-black tree if it satisfies the following red-black properties:
    // 1. Every node is either red or black
    // 2. Every leaf (nil node) is black
    // 3. If a node is red, the both its children are black
    // 4. Every simple path from a node to a descendant leaf contains the same number of black nodes
    //
    // The basic idea of a red-black tree is to represent 2-3-4 trees as standard BSTs but to add one extra bit of information
    // per node to encode 3-nodes and 4-nodes.
    // 4-nodes will be represented as: B
    // R R
    //
    // 3 -node will be represented as: B or B
    // R B B R
    //
    // For a detailed description of the algorithm, take a look at "Algorithms" by Robert Sedgewick.

    internal enum NodeColor : byte
    {
    Black,
    Red
    }

    internal delegate bool TreeWalkPredicate<T>(SortedSet<T>.Node node);

    internal enum TreeRotation : byte
    {
    Left,
    LeftRight,
    Right,
    RightLeft
    }
    +
        // A binary search tree is a red-black tree if it satisfies the following red-black properties:
    // 1. Every node is either red or black
    // 2. Every leaf (nil node) is black
    // 3. If a node is red, the both its children are black
    // 4. Every simple path from a node to a descendant leaf contains the same number of black nodes
    //
    // The basic idea of a red-black tree is to represent 2-3-4 trees as standard BSTs but to add one extra bit of information
    // per node to encode 3-nodes and 4-nodes.
    // 4-nodes will be represented as: B
    // R R
    //
    // 3 -node will be represented as: B or B
    // R B B R
    //
    // For a detailed description of the algorithm, take a look at "Algorithms" by Robert Sedgewick.

    internal enum NodeColor : byte
    {
    Black,
    Red
    }

    internal delegate bool TreeWalkPredicate<T>(SortedSet<T>.Node node);

    internal enum TreeRotation : byte
    {
    Left,
    LeftRight,
    Right,
    RightLeft
    }

    Vysvětlení v komentáři trochu předbíhá náplň cvičení zaměřeného na B-stromy ;)

    Vztah mezi množinou a mapou

    Při každé implementaci ve standardní knihovně jsme si mohli všimnout, že strom implementuje vždy jenom jeden typ:

    JazykZpůsob implementace
    C++mapa ukládá dvojice do množiny
    Javamnožina ukládá prvky s „dummy“ hodnotou do mapy
    C#mapa ukládá dvojice do množiny

    Mapa vyžaduje, aby každý klíč měl přiřazenou právě jednu hodnotu, tedy klíče jsou navzájem mezi sebou unikátní. To nám umožňuje organizovat klíče do množiny, zde ale narazíme na nepříjemný problém spočívající v tom, že musíme do množiny vkladat dvojice prvků: (key, value). Tenhle přístup má ale zásadní problém:

    -
    # let's represent dictionary/map as a set
    set_of_values = set()

    # let's insert few pairs
    set_of_values.add((1, 2))
    set_of_values.add((0, 42))

    # let's set key 1 to value 6
    set_of_values.add((1, 6))

    set_of_values
    +
    # let's represent dictionary/map as a set
    set_of_values = set()

    # let's insert few pairs
    set_of_values.add((1, 2))
    set_of_values.add((0, 42))

    # let's set key 1 to value 6
    set_of_values.add((1, 6))

    set_of_values

    A dostaneme:

    -
    {(1, 6), (1, 2), (0, 42)}
    +
    {(1, 6), (1, 2), (0, 42)}

    V jednotlivých implementacích, které jste mohli vidět výše, se využívá nasledující, když:

    • mapa ukládá dvojice do množiny: Dvojice je obalená v samostatním typu, který porovnává jenom klíče
    • diff --git a/algorithms/rb-trees/rules/index.html b/algorithms/rb-trees/rules/index.html index ef5b888..5bb93f3 100644 --- a/algorithms/rb-trees/rules/index.html +++ b/algorithms/rb-trees/rules/index.html @@ -16,8 +16,8 @@ - - + +

      On the rules of the red-black tree

      Introduction

      @@ -88,7 +88,7 @@ different tree, so we keep this note in just as a “hack”.

      This rule might seem like a very important one, but overall is not. You can safely omit this rule, but you also need to deal with the consequences.

      Let's refresh our memory with the algorithm of insert fixup:

      -
      WHILE z.p.color == Red
      IF z.p == z.p.p.left
      y = z.p.p.right

      IF y.color == Red
      z.p.color = Black
      y.color = Black
      z.p.p.color = Red
      z = z.p.p
      ELSE
      IF z == z.p.right
      z = z.p
      Left-Rotate(T, z)
      z.p.color = Black
      z.p.p.color = Red
      Right-Rotate(T, z.p.p)
      ELSE (same as above with “right” and “left” exchanged)

      T.root.color = Black
      +
      WHILE z.p.color == Red
      IF z.p == z.p.p.left
      y = z.p.p.right

      IF y.color == Red
      z.p.color = Black
      y.color = Black
      z.p.p.color = Red
      z = z.p.p
      ELSE
      IF z == z.p.right
      z = z.p
      Left-Rotate(T, z)
      z.p.color = Black
      z.p.p.color = Red
      Right-Rotate(T, z.p.p)
      ELSE (same as above with “right” and “left” exchanged)

      T.root.color = Black
      tip

      If you have tried to implement any of the more complex data structures, such as red-black trees, etc., in a statically typed language that also checks you for NULL-correctness (e.g. mypy or even C# with nullable reference types), you diff --git a/algorithms/recursion/karel-1/index.html b/algorithms/recursion/karel-1/index.html index 6340b19..aa47b3d 100644 --- a/algorithms/recursion/karel-1/index.html +++ b/algorithms/recursion/karel-1/index.html @@ -16,8 +16,8 @@ - - + +

      Recursion and backtracking with Robot Karel

        @@ -149,7 +149,7 @@ majority of Python installations)
      • skeleton.py - skeleton for your solution, needs to be put in the same directory as karel_tk.py and takes path to the world as a first argument, example usage:

        -
        $ python3 skeleton.py stairs.kw
        +
        $ python3 skeleton.py stairs.kw
        • of course, this file can be renamed ;)
        diff --git a/algorithms/recursion/pyramid-slide-down/index.html b/algorithms/recursion/pyramid-slide-down/index.html index 3350853..4c5efec 100644 --- a/algorithms/recursion/pyramid-slide-down/index.html +++ b/algorithms/recursion/pyramid-slide-down/index.html @@ -16,8 +16,8 @@ - - + +

        Introduction to dynamic programming

        In this post we will try to solve one problem in different ways.

        @@ -27,12 +27,12 @@

        We are given a 2D array of integers and we are to find the slide down. Slide down is a maximum sum of consecutive numbers from the top to the bottom.

        Let's have a look at few examples. Consider the following pyramid:

        -
           3
        7 4
        2 4 6
        8 5 9 3
        +
           3
        7 4
        2 4 6
        8 5 9 3

        This pyramid has following slide down:

        -
           *3
        *7 4
        2 *4 6
        8 5 *9 3
        +
           *3
        *7 4
        2 *4 6
        8 5 *9 3

        And its value is 23.

        We can also have a look at a bigger example:

        -
                        75
        95 64
        17 47 82
        18 35 87 10
        20 4 82 47 65
        19 1 23 3 34
        88 2 77 73 7 63 67
        99 65 4 28 6 16 70 92
        41 41 26 56 83 40 80 70 33
        41 48 72 33 47 32 37 16 94 29
        53 71 44 65 25 43 91 52 97 51 14
        70 11 33 28 77 73 17 78 39 68 17 57
        91 71 52 38 17 14 91 43 58 50 27 29 48
        63 66 4 68 89 53 67 30 73 16 69 87 40 31
        4 62 98 27 23 9 70 98 73 93 38 53 60 4 23
        +
                        75
        95 64
        17 47 82
        18 35 87 10
        20 4 82 47 65
        19 1 23 3 34
        88 2 77 73 7 63 67
        99 65 4 28 6 16 70 92
        41 41 26 56 83 40 80 70 33
        41 48 72 33 47 32 37 16 94 29
        53 71 44 65 25 43 91 52 97 51 14
        70 11 33 28 77 73 17 78 39 68 17 57
        91 71 52 38 17 14 91 43 58 50 27 29 48
        63 66 4 68 89 53 67 30 73 16 69 87 40 31
        4 62 98 27 23 9 70 98 73 93 38 53 60 4 23

        Slide down in this case is equal to 1074.

        Solving the problem

        caution

        I will describe the following ways you can approach this problem and implement @@ -41,13 +41,13 @@ them in Javatrue/false based on the expected output of our algorithm. Any other differences will lie only in the solutions of the problem. You can see the main here:

        -
        public static void main(String[] args) {
        System.out.print("Test #1: ");
        System.out.println(longestSlideDown(new int[][] {
        { 3 },
        { 7, 4 },
        { 2, 4, 6 },
        { 8, 5, 9, 3 }
        }) == 23 ? "passed" : "failed");

        System.out.print("Test #2: ");
        System.out.println(longestSlideDown(new int[][] {
        { 75 },
        { 95, 64 },
        { 17, 47, 82 },
        { 18, 35, 87, 10 },
        { 20, 4, 82, 47, 65 },
        { 19, 1, 23, 75, 3, 34 },
        { 88, 2, 77, 73, 7, 63, 67 },
        { 99, 65, 4, 28, 6, 16, 70, 92 },
        { 41, 41, 26, 56, 83, 40, 80, 70, 33 },
        { 41, 48, 72, 33, 47, 32, 37, 16, 94, 29 },
        { 53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14 },
        { 70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57 },
        { 91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48 },
        { 63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31 },
        { 4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23 },
        }) == 1074 ? "passed" : "failed");
        }
        +
        public static void main(String[] args) {
        System.out.print("Test #1: ");
        System.out.println(longestSlideDown(new int[][] {
        { 3 },
        { 7, 4 },
        { 2, 4, 6 },
        { 8, 5, 9, 3 }
        }) == 23 ? "passed" : "failed");

        System.out.print("Test #2: ");
        System.out.println(longestSlideDown(new int[][] {
        { 75 },
        { 95, 64 },
        { 17, 47, 82 },
        { 18, 35, 87, 10 },
        { 20, 4, 82, 47, 65 },
        { 19, 1, 23, 75, 3, 34 },
        { 88, 2, 77, 73, 7, 63, 67 },
        { 99, 65, 4, 28, 6, 16, 70, 92 },
        { 41, 41, 26, 56, 83, 40, 80, 70, 33 },
        { 41, 48, 72, 33, 47, 32, 37, 16, 94, 29 },
        { 53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14 },
        { 70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57 },
        { 91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48 },
        { 63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31 },
        { 4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23 },
        }) == 1074 ? "passed" : "failed");
        }

        Naïve solution

        Our naïve solution consists of trying out all the possible slides and finding the one with maximum sum.

        -
        public static int longestSlideDown(int[][] pyramid, int row, int col) {
        if (row >= pyramid.length || col < 0 || col >= pyramid[row].length) {
        // BASE: We have gotten out of bounds, there's no reasonable value to
        // return, so we just return the ‹MIN_VALUE› to ensure that it cannot
        // be maximum.
        return Integer.MIN_VALUE;
        }

        if (row == pyramid.length - 1) {
        // BASE: Bottom of the pyramid, we just return the value, there's
        // nowhere to slide anymore.
        return pyramid[row][col];
        }

        // Otherwise we account for the current position and return maximum of the
        // available “slides”.
        return pyramid[row][col] + Math.max(
        longestSlideDown(pyramid, row + 1, col),
        longestSlideDown(pyramid, row + 1, col + 1));
        }

        public static int longestSlideDown(int[][] pyramid) {
        // We start the slide in the top cell of the pyramid.
        return longestSlideDown(pyramid, 0, 0);
        }
        +
        public static int longestSlideDown(int[][] pyramid, int row, int col) {
        if (row >= pyramid.length || col < 0 || col >= pyramid[row].length) {
        // BASE: We have gotten out of bounds, there's no reasonable value to
        // return, so we just return the ‹MIN_VALUE› to ensure that it cannot
        // be maximum.
        return Integer.MIN_VALUE;
        }

        if (row == pyramid.length - 1) {
        // BASE: Bottom of the pyramid, we just return the value, there's
        // nowhere to slide anymore.
        return pyramid[row][col];
        }

        // Otherwise we account for the current position and return maximum of the
        // available “slides”.
        return pyramid[row][col] + Math.max(
        longestSlideDown(pyramid, row + 1, col),
        longestSlideDown(pyramid, row + 1, col + 1));
        }

        public static int longestSlideDown(int[][] pyramid) {
        // We start the slide in the top cell of the pyramid.
        return longestSlideDown(pyramid, 0, 0);
        }

        As you can see, we have 2 overloads:

        -
        int longestSlideDown(int[][] pyramid);
        int longestSlideDown(int[][] pyramid, int row, int col);
        +
        int longestSlideDown(int[][] pyramid);
        int longestSlideDown(int[][] pyramid, int row, int col);

        First one is used as a public interface to the solution, you just pass in the pyramid itself. Second one is the recursive “algorithm” that finds the slide down.

        @@ -60,12 +60,12 @@ are wondering about the time complexity of the proposed solution and, since it really is a naïve solution, the time complexity is pretty bad. Let's find the worst case scenario.

        Let's start with the first overload:

        -
        public static int longestSlideDown(int[][] pyramid) {
        return longestSlideDown(pyramid, 0, 0);
        }
        +
        public static int longestSlideDown(int[][] pyramid) {
        return longestSlideDown(pyramid, 0, 0);
        }

        There's not much to do here, so we can safely say that the time complexity of this function is bounded by T(n)T(n), where TT is our second overload. This doesn't tell us anything, so let's move on to the second overload where we are going to define the T(n)T(n) function.

        -
        public static int longestSlideDown(int[][] pyramid, int row, int col) {
        if (row >= pyramid.length || col < 0 || col >= pyramid[row].length) {
        // BASE: We have gotten out of bounds, there's no reasonable value to
        // return, so we just return the ‹MIN_VALUE› to ensure that it cannot
        // be maximum.
        return Integer.MIN_VALUE;
        }

        if (row == pyramid.length - 1) {
        // BASE: Bottom of the pyramid, we just return the value, there's
        // nowhere to slide anymore.
        return pyramid[row][col];
        }

        // Otherwise we account for the current position and return maximum of the
        // available “slides”.
        return pyramid[row][col] + Math.max(
        longestSlideDown(pyramid, row + 1, col),
        longestSlideDown(pyramid, row + 1, col + 1));
        }
        +
        public static int longestSlideDown(int[][] pyramid, int row, int col) {
        if (row >= pyramid.length || col < 0 || col >= pyramid[row].length) {
        // BASE: We have gotten out of bounds, there's no reasonable value to
        // return, so we just return the ‹MIN_VALUE› to ensure that it cannot
        // be maximum.
        return Integer.MIN_VALUE;
        }

        if (row == pyramid.length - 1) {
        // BASE: Bottom of the pyramid, we just return the value, there's
        // nowhere to slide anymore.
        return pyramid[row][col];
        }

        // Otherwise we account for the current position and return maximum of the
        // available “slides”.
        return pyramid[row][col] + Math.max(
        longestSlideDown(pyramid, row + 1, col),
        longestSlideDown(pyramid, row + 1, col + 1));
        }

        Fun fact is that the whole “algorithm” consists of just 2 return statements and nothing else. Let's dissect them!

        First return statement is the base case, so it has a constant time complexity.

        @@ -104,7 +104,7 @@ approach.

        optimal option at the moment.

        We can try to adjust the naïve solution. The most problematic part are the recursive calls. Let's apply the greedy approach there:

        -
        public static int longestSlideDown(int[][] pyramid, int row, int col) {
        if (row == pyramid.length - 1) {
        // BASE: We're at the bottom
        return pyramid[row][col];
        }

        if (col + 1 >= pyramid[row + 1].length
        || pyramid[row + 1][col] > pyramid[row + 1][col + 1]) {
        // If we cannot go right or it's not feasible, we continue to the left.
        return pyramid[row][col] + longestSlideDown(pyramid, row + 1, col);
        }

        // Otherwise we just move to the right.
        return pyramid[row][col] + longestSlideDown(pyramid, row + 1, col + 1);
        }
        +
        public static int longestSlideDown(int[][] pyramid, int row, int col) {
        if (row == pyramid.length - 1) {
        // BASE: We're at the bottom
        return pyramid[row][col];
        }

        if (col + 1 >= pyramid[row + 1].length
        || pyramid[row + 1][col] > pyramid[row + 1][col + 1]) {
        // If we cannot go right or it's not feasible, we continue to the left.
        return pyramid[row][col] + longestSlideDown(pyramid, row + 1, col);
        }

        // Otherwise we just move to the right.
        return pyramid[row][col] + longestSlideDown(pyramid, row + 1, col + 1);
        }

        OK, if we cannot go right or the right path adds smaller value to the sum, we simply go left.

        Time complexity

        @@ -115,12 +115,12 @@ the way to the bottom. Therefore we are getting:

        We have managed to convert our exponential solution into a linear one.

        Running the tests

        However, if we run the tests, we notice that the second test failed:

        -
        Test #1: passed
        Test #2: failed
        +
        Test #1: passed
        Test #2: failed

        What's going on? Well, we have improved the time complexity, but greedy algorithms are not the ideal solution to all problems. In this case there may be a solution that is bigger than the one found using the greedy algorithm.

        Imagine the following pyramid:

        -
              1
        2 3
        5 6 7
        8 9 10 11
        99 13 14 15 16
        +
              1
        2 3
        5 6 7
        8 9 10 11
        99 13 14 15 16

        We start at the top:

        1. Current cell: 1, we can choose from 2 and 3, 3 looks better, so we @@ -141,7 +141,7 @@ least looks like) is the easiest to implement. The whole point is avoiding the unnecessary computations that we have already done.

          In our case, we can use our naïve solution and put a cache on top of it that will make sure, we don't do unnecessary calculations.

          -
          // This “structure” is required, since I have decided to use ‹TreeMap› which
          // requires the ordering on the keys. It represents one position in the pyramid.
          record Position(int row, int col) implements Comparable<Position> {
          public int compareTo(Position r) {
          if (row != r.row) {
          return Integer.valueOf(row).compareTo(r.row);
          }

          if (col != r.col) {
          return Integer.valueOf(col).compareTo(r.col);
          }

          return 0;
          }
          }

          public static int longestSlideDown(
          int[][] pyramid,
          TreeMap<Position, Integer> cache,
          Position position) {
          int row = position.row;
          int col = position.col;

          if (row >= pyramid.length || col < 0 || col >= pyramid[row].length) {
          // BASE: out of bounds
          return Integer.MIN_VALUE;
          }

          if (row == pyramid.length - 1) {
          // BASE: bottom of the pyramid
          return pyramid[position.row][position.col];
          }

          if (!cache.containsKey(position)) {
          // We haven't computed the position yet, so we run the same “formula” as
          // in the naïve version »and« we put calculated slide into the cache.
          // Next time we want the slide down from given position, it will be just
          // retrieved from the cache.
          int slideDown = Math.max(
          longestSlideDown(pyramid, cache, new Position(row + 1, col)),
          longestSlideDown(pyramid, cache, new Position(row + 1, col + 1)));
          cache.put(position, pyramid[row][col] + slideDown);
          }

          return cache.get(position);
          }

          public static int longestSlideDown(int[][] pyramid) {
          // At the beginning we need to create a cache and share it across the calls.
          TreeMap<Position, Integer> cache = new TreeMap<>();
          return longestSlideDown(pyramid, cache, new Position(0, 0));
          }
          +
          // This “structure” is required, since I have decided to use ‹TreeMap› which
          // requires the ordering on the keys. It represents one position in the pyramid.
          record Position(int row, int col) implements Comparable<Position> {
          public int compareTo(Position r) {
          if (row != r.row) {
          return Integer.valueOf(row).compareTo(r.row);
          }

          if (col != r.col) {
          return Integer.valueOf(col).compareTo(r.col);
          }

          return 0;
          }
          }

          public static int longestSlideDown(
          int[][] pyramid,
          TreeMap<Position, Integer> cache,
          Position position) {
          int row = position.row;
          int col = position.col;

          if (row >= pyramid.length || col < 0 || col >= pyramid[row].length) {
          // BASE: out of bounds
          return Integer.MIN_VALUE;
          }

          if (row == pyramid.length - 1) {
          // BASE: bottom of the pyramid
          return pyramid[position.row][position.col];
          }

          if (!cache.containsKey(position)) {
          // We haven't computed the position yet, so we run the same “formula” as
          // in the naïve version »and« we put calculated slide into the cache.
          // Next time we want the slide down from given position, it will be just
          // retrieved from the cache.
          int slideDown = Math.max(
          longestSlideDown(pyramid, cache, new Position(row + 1, col)),
          longestSlideDown(pyramid, cache, new Position(row + 1, col + 1)));
          cache.put(position, pyramid[row][col] + slideDown);
          }

          return cache.get(position);
          }

          public static int longestSlideDown(int[][] pyramid) {
          // At the beginning we need to create a cache and share it across the calls.
          TreeMap<Position, Integer> cache = new TreeMap<>();
          return longestSlideDown(pyramid, cache, new Position(0, 0));
          }

          You have probably noticed that record Position have appeared. Since we are caching the already computed values, we need a “reasonable” key. In this case we share the cache only for one run (i.e. pyramid) of the longestSlideDown, so @@ -251,7 +251,7 @@ DP (unless the cached function has complicated parameters, in that case it might get messy).

          Bottom-up dynamic programming can be more effective, but may be more complicated to implement right from the beginning.

        Let's see how we can implement it:

        -
        public static int longestSlideDown(int[][] pyramid) {
        // In the beginning we declare new array. At this point it is easier to just
        // work with the one dimension, i.e. just allocating the space for the rows.
        int[][] slideDowns = new int[pyramid.length][];

        // Bottom row gets just copied, there's nothing else to do… It's the base
        // case.
        slideDowns[pyramid.length - 1] = Arrays.copyOf(pyramid[pyramid.length - 1],
        pyramid[pyramid.length - 1].length);

        // Then we need to propagate the found slide downs for each of the levels
        // above.
        for (int y = pyramid.length - 2; y >= 0; --y) {
        // We start by copying the values lying in the row we're processing.
        // They get included in the final sum and we need to allocate the space
        // for the precalculated slide downs anyways.
        int[] row = Arrays.copyOf(pyramid[y], pyramid[y].length);

        // At this we just need to “fetch” the partial results from “neighbours”
        for (int x = 0; x < row.length; ++x) {
        // We look under our position, since we expect the rows to get
        // shorter, we can safely assume such position exists.
        int under = slideDowns[y + 1][x];

        // Then we have a look to the right, such position doesn't have to
        // exist, e.g. on the right edge, so we validate the index, and if
        // it doesn't exist, we just assign minimum of the ‹int› which makes
        // sure that it doesn't get picked in the ‹Math.max()› call.
        int toRight = x + 1 < slideDowns[y + 1].length
        ? slideDowns[y + 1][x + 1]
        : Integer.MIN_VALUE;

        // Finally we add the best choice at this point.
        row[x] += Math.max(under, toRight);
        }

        // And save the row we've just calculated partial results for to the
        // “table”.
        slideDowns[y] = row;
        }

        // At the end we can find our seeked slide down at the top cell.
        return slideDowns[0][0];
        }
        +
        public static int longestSlideDown(int[][] pyramid) {
        // In the beginning we declare new array. At this point it is easier to just
        // work with the one dimension, i.e. just allocating the space for the rows.
        int[][] slideDowns = new int[pyramid.length][];

        // Bottom row gets just copied, there's nothing else to do… It's the base
        // case.
        slideDowns[pyramid.length - 1] = Arrays.copyOf(pyramid[pyramid.length - 1],
        pyramid[pyramid.length - 1].length);

        // Then we need to propagate the found slide downs for each of the levels
        // above.
        for (int y = pyramid.length - 2; y >= 0; --y) {
        // We start by copying the values lying in the row we're processing.
        // They get included in the final sum and we need to allocate the space
        // for the precalculated slide downs anyways.
        int[] row = Arrays.copyOf(pyramid[y], pyramid[y].length);

        // At this we just need to “fetch” the partial results from “neighbours”
        for (int x = 0; x < row.length; ++x) {
        // We look under our position, since we expect the rows to get
        // shorter, we can safely assume such position exists.
        int under = slideDowns[y + 1][x];

        // Then we have a look to the right, such position doesn't have to
        // exist, e.g. on the right edge, so we validate the index, and if
        // it doesn't exist, we just assign minimum of the ‹int› which makes
        // sure that it doesn't get picked in the ‹Math.max()› call.
        int toRight = x + 1 < slideDowns[y + 1].length
        ? slideDowns[y + 1][x + 1]
        : Integer.MIN_VALUE;

        // Finally we add the best choice at this point.
        row[x] += Math.max(under, toRight);
        }

        // And save the row we've just calculated partial results for to the
        // “table”.
        slideDowns[y] = row;
        }

        // At the end we can find our seeked slide down at the top cell.
        return slideDowns[0][0];
        }

        I've tried to explain the code as much as possible within the comments, since it might be more beneficial to see right next to the “offending” lines.

        As you can see, in this approach we go from the other side3, the bottom of @@ -304,6 +304,6 @@ successor and represents the way we can enhance the existing implementation.

        one was not correct, thus the quotes

      • -
      +
      \ No newline at end of file diff --git a/algorithms/tags/applications/index.html b/algorithms/tags/applications/index.html index 752ed4c..24d838e 100644 --- a/algorithms/tags/applications/index.html +++ b/algorithms/tags/applications/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "applications"

      View All Tags

      Použití červeno-černých stromů

      Ukázka použití červeno-černých stromů v standardních knižnicích známých jazyků. diff --git a/algorithms/tags/backtracking/index.html b/algorithms/tags/backtracking/index.html index a048c27..9859d37 100644 --- a/algorithms/tags/backtracking/index.html +++ b/algorithms/tags/backtracking/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "backtracking"

      View All Tags

      Recursion and backtracking with Robot Karel

      A problem with too many restrictions. diff --git a/algorithms/tags/balanced-trees/index.html b/algorithms/tags/balanced-trees/index.html index cf3704a..ea76545 100644 --- a/algorithms/tags/balanced-trees/index.html +++ b/algorithms/tags/balanced-trees/index.html @@ -14,8 +14,8 @@ - - + +

      2 docs tagged with "balanced trees"

      View All Tags

      On the rules of the red-black tree

      Shower thoughts on the rules of the red-black tree. diff --git a/algorithms/tags/bfs/index.html b/algorithms/tags/bfs/index.html index 4359cb3..a7b5b43 100644 --- a/algorithms/tags/bfs/index.html +++ b/algorithms/tags/bfs/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "bfs"

      View All Tags

      Distance boundaries from BFS tree on undirected graphs

      Short explanation of distance boundaries deduced from a BFS tree. diff --git a/algorithms/tags/bottom-up-dp/index.html b/algorithms/tags/bottom-up-dp/index.html index 227f25c..94453c5 100644 --- a/algorithms/tags/bottom-up-dp/index.html +++ b/algorithms/tags/bottom-up-dp/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "bottom-up-dp"

      View All Tags

      Introduction to dynamic programming

      Solving a problem in different ways. diff --git a/algorithms/tags/c/index.html b/algorithms/tags/c/index.html index 7f41efe..11955f7 100644 --- a/algorithms/tags/c/index.html +++ b/algorithms/tags/c/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "c"

      View All Tags

      Time complexity of ‹extend›

      How to make inefficient algorithm unknowingly. diff --git a/algorithms/tags/csharp/index.html b/algorithms/tags/csharp/index.html index 9c88f93..3fbb34d 100644 --- a/algorithms/tags/csharp/index.html +++ b/algorithms/tags/csharp/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "csharp"

      View All Tags

      Iterative algorithms via iterators

      Iterative DFS using iterators. diff --git a/algorithms/tags/dynamic-array/index.html b/algorithms/tags/dynamic-array/index.html index 5211866..d3a57a5 100644 --- a/algorithms/tags/dynamic-array/index.html +++ b/algorithms/tags/dynamic-array/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "dynamic array"

      View All Tags

      Time complexity of ‹extend›

      How to make inefficient algorithm unknowingly. diff --git a/algorithms/tags/dynamic-programming/index.html b/algorithms/tags/dynamic-programming/index.html index 37af723..78f337e 100644 --- a/algorithms/tags/dynamic-programming/index.html +++ b/algorithms/tags/dynamic-programming/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "dynamic-programming"

      View All Tags

      Introduction to dynamic programming

      Solving a problem in different ways. diff --git a/algorithms/tags/exponential/index.html b/algorithms/tags/exponential/index.html index 4559087..6c138d0 100644 --- a/algorithms/tags/exponential/index.html +++ b/algorithms/tags/exponential/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "exponential"

      View All Tags

      Introduction to dynamic programming

      Solving a problem in different ways. diff --git a/algorithms/tags/graphs/index.html b/algorithms/tags/graphs/index.html index 8113e09..b51832f 100644 --- a/algorithms/tags/graphs/index.html +++ b/algorithms/tags/graphs/index.html @@ -14,8 +14,8 @@ - - + +

      2 docs tagged with "graphs"

      View All Tags

      Distance boundaries from BFS tree on undirected graphs

      Short explanation of distance boundaries deduced from a BFS tree. diff --git a/algorithms/tags/greedy/index.html b/algorithms/tags/greedy/index.html index 6ae7513..51f269d 100644 --- a/algorithms/tags/greedy/index.html +++ b/algorithms/tags/greedy/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "greedy"

      View All Tags

      Introduction to dynamic programming

      Solving a problem in different ways. diff --git a/algorithms/tags/index.html b/algorithms/tags/index.html index b5f457b..08b7479 100644 --- a/algorithms/tags/index.html +++ b/algorithms/tags/index.html @@ -14,8 +14,8 @@ - - + +

      diff --git a/algorithms/tags/iterative/index.html b/algorithms/tags/iterative/index.html index f7a7ed1..917f899 100644 --- a/algorithms/tags/iterative/index.html +++ b/algorithms/tags/iterative/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "iterative"

      View All Tags

      Iterative algorithms via iterators

      Iterative DFS using iterators. diff --git a/algorithms/tags/iterators/index.html b/algorithms/tags/iterators/index.html index 754efa7..4276063 100644 --- a/algorithms/tags/iterators/index.html +++ b/algorithms/tags/iterators/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "iterators"

      View All Tags

      Iterative algorithms via iterators

      Iterative DFS using iterators. diff --git a/algorithms/tags/java/index.html b/algorithms/tags/java/index.html index 1707a8d..6ee0dc1 100644 --- a/algorithms/tags/java/index.html +++ b/algorithms/tags/java/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "java"

      View All Tags

      Introduction to dynamic programming

      Solving a problem in different ways. diff --git a/algorithms/tags/karel/index.html b/algorithms/tags/karel/index.html index 19f3ab3..536d621 100644 --- a/algorithms/tags/karel/index.html +++ b/algorithms/tags/karel/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "karel"

      View All Tags

      Recursion and backtracking with Robot Karel

      A problem with too many restrictions. diff --git a/algorithms/tags/postconditions/index.html b/algorithms/tags/postconditions/index.html index 5efcc90..1d2f61b 100644 --- a/algorithms/tags/postconditions/index.html +++ b/algorithms/tags/postconditions/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "postconditions"

      View All Tags

      Vague postconditions and proving correctness of algorithms

      Debugging and testing with precise postconditions. diff --git a/algorithms/tags/python/index.html b/algorithms/tags/python/index.html index 955dee8..db91de6 100644 --- a/algorithms/tags/python/index.html +++ b/algorithms/tags/python/index.html @@ -14,8 +14,8 @@ - - + +

      3 docs tagged with "python"

      View All Tags

      Recursion and backtracking with Robot Karel

      A problem with too many restrictions. diff --git a/algorithms/tags/recursion/index.html b/algorithms/tags/recursion/index.html index 9ce500a..1a83a4d 100644 --- a/algorithms/tags/recursion/index.html +++ b/algorithms/tags/recursion/index.html @@ -14,8 +14,8 @@ - - + +

      3 docs tagged with "recursion"

      View All Tags

      Introduction to dynamic programming

      Solving a problem in different ways. diff --git a/algorithms/tags/red-black-trees/index.html b/algorithms/tags/red-black-trees/index.html index c9bd69b..0dd3bb3 100644 --- a/algorithms/tags/red-black-trees/index.html +++ b/algorithms/tags/red-black-trees/index.html @@ -14,8 +14,8 @@ - - + +

      2 docs tagged with "red-black trees"

      View All Tags

      On the rules of the red-black tree

      Shower thoughts on the rules of the red-black tree. diff --git a/algorithms/tags/sorting/index.html b/algorithms/tags/sorting/index.html index 704cecf..6279f0d 100644 --- a/algorithms/tags/sorting/index.html +++ b/algorithms/tags/sorting/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "sorting"

      View All Tags

      Vague postconditions and proving correctness of algorithms

      Debugging and testing with precise postconditions. diff --git a/algorithms/tags/testing/index.html b/algorithms/tags/testing/index.html index 2a8a1af..4344c1c 100644 --- a/algorithms/tags/testing/index.html +++ b/algorithms/tags/testing/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "testing"

      View All Tags

      Vague postconditions and proving correctness of algorithms

      Debugging and testing with precise postconditions. diff --git a/algorithms/tags/time-complexity/index.html b/algorithms/tags/time-complexity/index.html index 004cb58..8abd3a1 100644 --- a/algorithms/tags/time-complexity/index.html +++ b/algorithms/tags/time-complexity/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "time complexity"

      View All Tags

      Time complexity of ‹extend›

      How to make inefficient algorithm unknowingly. diff --git a/algorithms/tags/top-down-dp/index.html b/algorithms/tags/top-down-dp/index.html index 6eb3826..c4c7182 100644 --- a/algorithms/tags/top-down-dp/index.html +++ b/algorithms/tags/top-down-dp/index.html @@ -14,8 +14,8 @@ - - + +

      One doc tagged with "top-down-dp"

      View All Tags

      Introduction to dynamic programming

      Solving a problem in different ways. diff --git a/algorithms/time-complexity/extend/index.html b/algorithms/time-complexity/extend/index.html index 17c6ca6..e6f6f12 100644 --- a/algorithms/time-complexity/extend/index.html +++ b/algorithms/time-complexity/extend/index.html @@ -16,8 +16,8 @@ - - + +

    Example #1

    Let us assume function that uses divide & conquer strategy to return indices at which we can find specific element in any list.

    -
    def recursive_find_in_list(
    values: List[Any], key: Any, lower: int, upper: int
    ) -> List[int]:
    if lower == upper:
    return [lower] if values[lower] == key else []

    indices = []
    mid = (lower + upper) // 2

    indices.extend(recursive_find_in_list(values, key, lower, mid))
    indices.extend(recursive_find_in_list(values, key, mid + 1, upper))

    return indices


    def find_in_list(values: List[Any], key: Any) -> List[int]:
    return recursive_find_in_list(values, key, 0, len(values) - 1)
    +
    def recursive_find_in_list(
    values: List[Any], key: Any, lower: int, upper: int
    ) -> List[int]:
    if lower == upper:
    return [lower] if values[lower] == key else []

    indices = []
    mid = (lower + upper) // 2

    indices.extend(recursive_find_in_list(values, key, lower, mid))
    indices.extend(recursive_find_in_list(values, key, mid + 1, upper))

    return indices


    def find_in_list(values: List[Any], key: Any) -> List[int]:
    return recursive_find_in_list(values, key, 0, len(values) - 1)

    This implementation works nicely, extend is linear (with the respect to the length of the list that is being appended).

    Let us try to dissect the way this function works on some specific input (that will be pushed to the extreme, just in case ;)

    find_in_list([1] * 5000, 1). What shall be the result of this? Since we have key = 1 and the list contains only 1s, we should get list of all indices.

    @@ -88,7 +88,7 @@ elements from b.

    For the sake of Algorithms and Data Structures I we consider APPEND operation, i.e. adding the element to the end of the list, to have time complexity O(1)\mathcal{O}(1) (amortized; which is out of the scope of IB002).

    If we have a look at the extend implementation in this dynamic array example:

    -
    void dynamic_array_extend(struct dynamic_array_t *arr, struct dynamic_array_t *src)
    {
    if (arr == NULL || src == NULL)
    {
    return;
    }

    for (size_t i = 0; i < src->count; i++)
    {
    dynamic_array_push_back(arr, dynamic_array_at(src, i));
    }
    }
    +
    void dynamic_array_extend(struct dynamic_array_t *arr, struct dynamic_array_t *src)
    {
    if (arr == NULL || src == NULL)
    {
    return;
    }

    for (size_t i = 0; i < src->count; i++)
    {
    dynamic_array_push_back(arr, dynamic_array_at(src, i));
    }
    }

    Apart from checking edge cases, we can notice that we run for-loop over the elements from the other array and add them one-by-one to the arr. Time complexity of this operation is time dependant on the src array.

    In this specific implementation, you could also resize the memory allocated for the array in one go and copy whole src array in one go. However even if you did so, it would be still dependant on the size of the src array. Cause you still need to copy count(src)elementSize(src)\texttt{count}(src) \cdot \texttt{elementSize}(src) bytes. From that we can assume that for specific instance of array the elementSize(src)\texttt{elementSize}(src) is fixed, therefore we consider it a constant. That way we are getting O(count(src))\mathcal{O}(\texttt{count}(src)) as a time complexity of our extend operation.

    diff --git a/assets/js/1535ede8.66ce7202.js b/assets/js/1535ede8.505a0194.js similarity index 99% rename from assets/js/1535ede8.66ce7202.js rename to assets/js/1535ede8.505a0194.js index 0aae519..6767dab 100644 --- a/assets/js/1535ede8.66ce7202.js +++ b/assets/js/1535ede8.505a0194.js @@ -1 +1 @@ -"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[5376],{4969:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>c,contentTitle:()=>o,default:()=>h,frontMatter:()=>r,metadata:()=>a,toc:()=>l});var s=t(5893),i=t(1151);const r={id:"seminar-10",title:"10th seminar",description:"Finding bugs in a hangman.\n"},o=void 0,a={id:"bonuses/seminar-10",title:"10th seminar",description:"Finding bugs in a hangman.\n",source:"@site/c/bonuses/10.md",sourceDirName:"bonuses",slug:"/bonuses/seminar-10",permalink:"/c/bonuses/seminar-10",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/c/bonuses/10.md",tags:[],version:"current",lastUpdatedAt:1700847079,formattedLastUpdatedAt:"Nov 24, 2023",frontMatter:{id:"seminar-10",title:"10th seminar",description:"Finding bugs in a hangman.\n"},sidebar:"autogeneratedBar",previous:{title:"8th seminar",permalink:"/c/bonuses/seminar-08"},next:{title:"Practice Exams",permalink:"/c/category/practice-exams"}},c={},l=[{value:"Introduction",id:"introduction",level:2},{value:"Project",id:"project",level:2},{value:"Summary of the gameplay",id:"summary-of-the-gameplay",level:3},{value:"Suggested workflow",id:"suggested-workflow",level:2},{value:"Tasks",id:"tasks",level:2},{value:"Dictionary",id:"dictionary",level:2},{value:"Submitting",id:"submitting",level:2}];function d(e){const n={a:"a",blockquote:"blockquote",code:"code",em:"em",h2:"h2",h3:"h3",hr:"hr",img:"img",li:"li",ol:"ol",p:"p",pre:"pre",table:"table",tbody:"tbody",td:"td",th:"th",thead:"thead",tr:"tr",ul:"ul",...(0,i.a)(),...e.components};return(0,s.jsxs)(s.Fragment,{children:[(0,s.jsx)(n.p,{children:(0,s.jsx)(n.a,{href:"pathname:///files/c/bonuses/10.tar.gz",children:"Source"})}),"\n",(0,s.jsx)(n.h2,{id:"introduction",children:"Introduction"}),"\n",(0,s.jsx)(n.p,{children:"For this bonus you are given almost finished project - The Hangman Game. Your\ntask is to try the game, in case you find any bugs point them out and cover as\nmuch of the game as possible with tests."}),"\n",(0,s.jsx)(n.p,{children:"For this bonus you can get at maximum 2 K\u20a1."}),"\n",(0,s.jsxs)(n.table,{children:[(0,s.jsx)(n.thead,{children:(0,s.jsxs)(n.tr,{children:[(0,s.jsx)(n.th,{children:"Item"}),(0,s.jsx)(n.th,{children:"Bonus"})]})}),(0,s.jsxs)(n.tbody,{children:[(0,s.jsxs)(n.tr,{children:[(0,s.jsx)(n.td,{children:"Fixing bugs from failing tests"}),(0,s.jsx)(n.td,{children:"0.25"})]}),(0,s.jsxs)(n.tr,{children:[(0,s.jsx)(n.td,{children:(0,s.jsx)(n.code,{children:"word_guessed"})}),(0,s.jsx)(n.td,{children:"0.50"})]}),(0,s.jsxs)(n.tr,{children:[(0,s.jsx)(n.td,{children:"Hidden bug"}),(0,s.jsx)(n.td,{children:"0.50"})]}),(0,s.jsxs)(n.tr,{children:[(0,s.jsx)(n.td,{children:"Extending tests, undetectable bugs or evil bug"}),(0,s.jsx)(n.td,{children:"0.37"})]}),(0,s.jsxs)(n.tr,{children:[(0,s.jsx)(n.td,{children:"Refactor"}),(0,s.jsx)(n.td,{children:"0.38"})]})]})]}),"\n",(0,s.jsx)(n.h2,{id:"project",children:"Project"}),"\n",(0,s.jsxs)(n.p,{children:["Project consists of 2 source files - ",(0,s.jsx)(n.code,{children:"hangman.c"})," and ",(0,s.jsx)(n.code,{children:"main.c"}),"."]}),"\n",(0,s.jsxs)(n.p,{children:[(0,s.jsx)(n.code,{children:"main.c"})," is quite short and concise, there is nothing for you to do."]}),"\n",(0,s.jsxs)(n.p,{children:[(0,s.jsx)(n.code,{children:"hangman.c"})," contains implementation of the game. In case you feel lost, consult\nthe documentation in ",(0,s.jsx)(n.code,{children:"hangman.h"})," that represents an interface that can be used\nfor implementing the game."]}),"\n",(0,s.jsxs)(n.p,{children:["Apart from those sources this project is a bit more complicated. ",(0,s.jsx)(n.em,{children:"Game loop"})," is\nrealised via single encapsulated function that complicates the testing. Because\nof that, there are 2 kinds of tests:"]}),"\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:[(0,s.jsx)(n.em,{children:"Unit tests"})," - that are present in ",(0,s.jsx)(n.code,{children:"test_hangman.c"})," and can be run via:"]}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{children:"$ make check-unit\n"})}),"\n",(0,s.jsx)(n.p,{children:"They cover majorly functions that can be tested easily via testing framework."}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:[(0,s.jsx)(n.em,{children:"Functional tests"})," - same as in ",(0,s.jsx)(n.code,{children:"seminar-08"})," and are focused on testing the\nprogram as whole. Basic smoke test is already included in ",(0,s.jsx)(n.code,{children:"usage"})," test case."]}),"\n",(0,s.jsx)(n.p,{children:"They can be run via:"}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{children:"$ make check-functional\n"})}),"\n",(0,s.jsxs)(n.p,{children:["When testing ",(0,s.jsx)(n.code,{children:"hangman"})," function (the game loop), it is suggested to create\nfunctional tests."]}),"\n",(0,s.jsx)(n.p,{children:"When submitting the files for review, please leave out functional tests that\nwere given as a part of the assignment, so that it is easier to navigate, I\nwill drag the common files myself. :)"}),"\n"]}),"\n"]}),"\n",(0,s.jsxs)(n.blockquote,{children:["\n",(0,s.jsx)(n.p,{children:"Whole test suite can be run via:"}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{children:"$ make check\n"})}),"\n"]}),"\n",(0,s.jsx)(n.h3,{id:"summary-of-the-gameplay",children:"Summary of the gameplay"}),"\n",(0,s.jsxs)(n.ol,{children:["\n",(0,s.jsx)(n.li,{children:"Secret word gets chosen from the file that's path is given as an argument."}),"\n",(0,s.jsx)(n.li,{children:"You get 8 guesses."}),"\n",(0,s.jsx)(n.li,{children:"Invalid characters don't count."}),"\n",(0,s.jsx)(n.li,{children:"Already guessed characters don't count, even if not included in the secret."}),"\n",(0,s.jsxs)(n.li,{children:["You can guess the whole word at once","\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsx)(n.li,{children:"If you get it right, you won, game ends."}),"\n",(0,s.jsx)(n.li,{children:"If you don't get it right, you get to see the secret, game ends."}),"\n"]}),"\n"]}),"\n",(0,s.jsx)(n.li,{children:"In case of end of input, game finishes via force."}),"\n",(0,s.jsx)(n.li,{children:"In case of invalid input, no guesses are subtracted, game carries on."}),"\n",(0,s.jsx)(n.li,{children:"Letters and words are not case sensitive."}),"\n"]}),"\n",(0,s.jsx)(n.h2,{id:"suggested-workflow",children:"Suggested workflow"}),"\n",(0,s.jsxs)(n.p,{children:["As we have talked about on the seminar, I suggest you to follow\n",(0,s.jsx)(n.em,{children:"Test-Driven Development"}),"\nin this case."]}),"\n",(0,s.jsx)(n.p,{children:(0,s.jsx)(n.img,{alt:"TDD workflow",src:t(7420).Z+"",width:"2814",height:"1652"})}),"\n",(0,s.jsx)(n.p,{children:"In our current scenario we are already in the stage of refactoring and fixing the\nbugs. Therefore try to follow this succession of steps:"}),"\n",(0,s.jsxs)(n.ol,{children:["\n",(0,s.jsx)(n.li,{children:"Try to reproduce the bug."}),"\n",(0,s.jsx)(n.li,{children:"Create a test that proves the presence of the bug."}),"\n",(0,s.jsx)(n.li,{children:"Fix the bug."}),"\n"]}),"\n",(0,s.jsxs)(n.p,{children:["In case you are submitting the bonus via GitLab, it is helpful to commit tests\nbefore commiting the fixes, so that it is apparent that the bug is manifested.\nExample of ",(0,s.jsx)(n.code,{children:"git log"})," (notice that the first line represents latest commit):"]}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{children:"feat: Implement fizz_buzzer\ntest: Add tests for fizz_buzzer\nfix: Fix NULL-check in print_name\ntest: Add test for NULL in print_name\n"})}),"\n",(0,s.jsx)(n.h2,{id:"tasks",children:"Tasks"}),"\n",(0,s.jsx)(n.p,{children:"As to your tasks, there are multiple things wrong in this project."}),"\n",(0,s.jsxs)(n.ol,{children:["\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsx)(n.p,{children:'There are 2 "bugs" that cannot be detected via tests, i.e. they are not bugs\nthat affect functionality of the game.'}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:["There is one evil bug in ",(0,s.jsx)(n.code,{children:"get_word"}),". It is not required to be fixed ;) Assign\nit the lowest priority."]}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsx)(n.p,{children:"There are some tests failing. Please try to figure it out, so you have green\ntests for the rest :)"}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:["We have gotten a bug report for ",(0,s.jsx)(n.code,{children:"word_guessed"}),", all we got is"]}),"\n",(0,s.jsxs)(n.blockquote,{children:["\n",(0,s.jsxs)(n.p,{children:["doesn't work when there are too many ",(0,s.jsx)(n.code,{children:"a"}),"s"]}),"\n"]}),"\n",(0,s.jsx)(n.p,{children:"Please try to replicate the bug and create a tests, so we don't get any\nregression later on."}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsx)(n.p,{children:"One hidden bug :) Closely non-specified, we cannot reproduce it and we were\ndrunk while playing the game, so we don't remember a thing. :/"}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsx)(n.p,{children:"Try to cover as much code via the tests as possible. We are not going to look\nat the metrics, but DRY is violated a lot, so as a last task try to remove as\nmuch of the duplicit code as possible."}),"\n",(0,s.jsx)(n.p,{children:"Tests should help you a lot in case there are some regressions."}),"\n"]}),"\n"]}),"\n",(0,s.jsx)(n.hr,{}),"\n",(0,s.jsxs)(n.p,{children:["In case you wonder why there are always 3 same words in the file with words, it\nis because of the ",(0,s.jsx)(n.code,{children:"get_word"})," bug. It is not a bug that can be easily fixed, so\nit is a not requirement at all and you can still get all points for the bonus ;)"]}),"\n",(0,s.jsx)(n.h2,{id:"dictionary",children:"Dictionary"}),"\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.a,{href:"https://en.wikipedia.org/wiki/Functional_testing",children:"Functional tests"})}),"\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.a,{href:"https://en.wikipedia.org/wiki/Smoke_testing_%28software%29",children:"Smoke test"})}),"\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.a,{href:"https://en.wikipedia.org/wiki/Don%27t_repeat_yourself",children:"DRY"})}),"\n"]}),"\n",(0,s.jsx)(n.h2,{id:"submitting",children:"Submitting"}),"\n",(0,s.jsx)(n.p,{children:"In case you have any questions, feel free to reach out to me."}),"\n",(0,s.jsx)(n.hr,{})]})}function h(e={}){const{wrapper:n}={...(0,i.a)(),...e.components};return n?(0,s.jsx)(n,{...e,children:(0,s.jsx)(d,{...e})}):d(e)}},7420:(e,n,t)=>{t.d(n,{Z:()=>s});const s=t.p+"assets/images/tdd_lifecycle-327ad9ee0ed8318ed11e19a28e02b2cc.png"},1151:(e,n,t)=>{t.d(n,{Z:()=>a,a:()=>o});var s=t(7294);const i={},r=s.createContext(i);function o(e){const n=s.useContext(r);return s.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function a(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(i):e.components||i:o(e.components),s.createElement(r.Provider,{value:n},e.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[5376],{4969:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>c,contentTitle:()=>o,default:()=>h,frontMatter:()=>r,metadata:()=>a,toc:()=>l});var s=t(5893),i=t(1151);const r={id:"seminar-10",title:"10th seminar",description:"Finding bugs in a hangman.\n"},o=void 0,a={id:"bonuses/seminar-10",title:"10th seminar",description:"Finding bugs in a hangman.\n",source:"@site/c/bonuses/10.md",sourceDirName:"bonuses",slug:"/bonuses/seminar-10",permalink:"/c/bonuses/seminar-10",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/c/bonuses/10.md",tags:[],version:"current",lastUpdatedAt:1700944879,formattedLastUpdatedAt:"Nov 25, 2023",frontMatter:{id:"seminar-10",title:"10th seminar",description:"Finding bugs in a hangman.\n"},sidebar:"autogeneratedBar",previous:{title:"8th seminar",permalink:"/c/bonuses/seminar-08"},next:{title:"Practice Exams",permalink:"/c/category/practice-exams"}},c={},l=[{value:"Introduction",id:"introduction",level:2},{value:"Project",id:"project",level:2},{value:"Summary of the gameplay",id:"summary-of-the-gameplay",level:3},{value:"Suggested workflow",id:"suggested-workflow",level:2},{value:"Tasks",id:"tasks",level:2},{value:"Dictionary",id:"dictionary",level:2},{value:"Submitting",id:"submitting",level:2}];function d(e){const n={a:"a",blockquote:"blockquote",code:"code",em:"em",h2:"h2",h3:"h3",hr:"hr",img:"img",li:"li",ol:"ol",p:"p",pre:"pre",table:"table",tbody:"tbody",td:"td",th:"th",thead:"thead",tr:"tr",ul:"ul",...(0,i.a)(),...e.components};return(0,s.jsxs)(s.Fragment,{children:[(0,s.jsx)(n.p,{children:(0,s.jsx)(n.a,{href:"pathname:///files/c/bonuses/10.tar.gz",children:"Source"})}),"\n",(0,s.jsx)(n.h2,{id:"introduction",children:"Introduction"}),"\n",(0,s.jsx)(n.p,{children:"For this bonus you are given almost finished project - The Hangman Game. Your\ntask is to try the game, in case you find any bugs point them out and cover as\nmuch of the game as possible with tests."}),"\n",(0,s.jsx)(n.p,{children:"For this bonus you can get at maximum 2 K\u20a1."}),"\n",(0,s.jsxs)(n.table,{children:[(0,s.jsx)(n.thead,{children:(0,s.jsxs)(n.tr,{children:[(0,s.jsx)(n.th,{children:"Item"}),(0,s.jsx)(n.th,{children:"Bonus"})]})}),(0,s.jsxs)(n.tbody,{children:[(0,s.jsxs)(n.tr,{children:[(0,s.jsx)(n.td,{children:"Fixing bugs from failing tests"}),(0,s.jsx)(n.td,{children:"0.25"})]}),(0,s.jsxs)(n.tr,{children:[(0,s.jsx)(n.td,{children:(0,s.jsx)(n.code,{children:"word_guessed"})}),(0,s.jsx)(n.td,{children:"0.50"})]}),(0,s.jsxs)(n.tr,{children:[(0,s.jsx)(n.td,{children:"Hidden bug"}),(0,s.jsx)(n.td,{children:"0.50"})]}),(0,s.jsxs)(n.tr,{children:[(0,s.jsx)(n.td,{children:"Extending tests, undetectable bugs or evil bug"}),(0,s.jsx)(n.td,{children:"0.37"})]}),(0,s.jsxs)(n.tr,{children:[(0,s.jsx)(n.td,{children:"Refactor"}),(0,s.jsx)(n.td,{children:"0.38"})]})]})]}),"\n",(0,s.jsx)(n.h2,{id:"project",children:"Project"}),"\n",(0,s.jsxs)(n.p,{children:["Project consists of 2 source files - ",(0,s.jsx)(n.code,{children:"hangman.c"})," and ",(0,s.jsx)(n.code,{children:"main.c"}),"."]}),"\n",(0,s.jsxs)(n.p,{children:[(0,s.jsx)(n.code,{children:"main.c"})," is quite short and concise, there is nothing for you to do."]}),"\n",(0,s.jsxs)(n.p,{children:[(0,s.jsx)(n.code,{children:"hangman.c"})," contains implementation of the game. In case you feel lost, consult\nthe documentation in ",(0,s.jsx)(n.code,{children:"hangman.h"})," that represents an interface that can be used\nfor implementing the game."]}),"\n",(0,s.jsxs)(n.p,{children:["Apart from those sources this project is a bit more complicated. ",(0,s.jsx)(n.em,{children:"Game loop"})," is\nrealised via single encapsulated function that complicates the testing. Because\nof that, there are 2 kinds of tests:"]}),"\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:[(0,s.jsx)(n.em,{children:"Unit tests"})," - that are present in ",(0,s.jsx)(n.code,{children:"test_hangman.c"})," and can be run via:"]}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{children:"$ make check-unit\n"})}),"\n",(0,s.jsx)(n.p,{children:"They cover majorly functions that can be tested easily via testing framework."}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:[(0,s.jsx)(n.em,{children:"Functional tests"})," - same as in ",(0,s.jsx)(n.code,{children:"seminar-08"})," and are focused on testing the\nprogram as whole. Basic smoke test is already included in ",(0,s.jsx)(n.code,{children:"usage"})," test case."]}),"\n",(0,s.jsx)(n.p,{children:"They can be run via:"}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{children:"$ make check-functional\n"})}),"\n",(0,s.jsxs)(n.p,{children:["When testing ",(0,s.jsx)(n.code,{children:"hangman"})," function (the game loop), it is suggested to create\nfunctional tests."]}),"\n",(0,s.jsx)(n.p,{children:"When submitting the files for review, please leave out functional tests that\nwere given as a part of the assignment, so that it is easier to navigate, I\nwill drag the common files myself. :)"}),"\n"]}),"\n"]}),"\n",(0,s.jsxs)(n.blockquote,{children:["\n",(0,s.jsx)(n.p,{children:"Whole test suite can be run via:"}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{children:"$ make check\n"})}),"\n"]}),"\n",(0,s.jsx)(n.h3,{id:"summary-of-the-gameplay",children:"Summary of the gameplay"}),"\n",(0,s.jsxs)(n.ol,{children:["\n",(0,s.jsx)(n.li,{children:"Secret word gets chosen from the file that's path is given as an argument."}),"\n",(0,s.jsx)(n.li,{children:"You get 8 guesses."}),"\n",(0,s.jsx)(n.li,{children:"Invalid characters don't count."}),"\n",(0,s.jsx)(n.li,{children:"Already guessed characters don't count, even if not included in the secret."}),"\n",(0,s.jsxs)(n.li,{children:["You can guess the whole word at once","\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsx)(n.li,{children:"If you get it right, you won, game ends."}),"\n",(0,s.jsx)(n.li,{children:"If you don't get it right, you get to see the secret, game ends."}),"\n"]}),"\n"]}),"\n",(0,s.jsx)(n.li,{children:"In case of end of input, game finishes via force."}),"\n",(0,s.jsx)(n.li,{children:"In case of invalid input, no guesses are subtracted, game carries on."}),"\n",(0,s.jsx)(n.li,{children:"Letters and words are not case sensitive."}),"\n"]}),"\n",(0,s.jsx)(n.h2,{id:"suggested-workflow",children:"Suggested workflow"}),"\n",(0,s.jsxs)(n.p,{children:["As we have talked about on the seminar, I suggest you to follow\n",(0,s.jsx)(n.em,{children:"Test-Driven Development"}),"\nin this case."]}),"\n",(0,s.jsx)(n.p,{children:(0,s.jsx)(n.img,{alt:"TDD workflow",src:t(7420).Z+"",width:"2814",height:"1652"})}),"\n",(0,s.jsx)(n.p,{children:"In our current scenario we are already in the stage of refactoring and fixing the\nbugs. Therefore try to follow this succession of steps:"}),"\n",(0,s.jsxs)(n.ol,{children:["\n",(0,s.jsx)(n.li,{children:"Try to reproduce the bug."}),"\n",(0,s.jsx)(n.li,{children:"Create a test that proves the presence of the bug."}),"\n",(0,s.jsx)(n.li,{children:"Fix the bug."}),"\n"]}),"\n",(0,s.jsxs)(n.p,{children:["In case you are submitting the bonus via GitLab, it is helpful to commit tests\nbefore commiting the fixes, so that it is apparent that the bug is manifested.\nExample of ",(0,s.jsx)(n.code,{children:"git log"})," (notice that the first line represents latest commit):"]}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{children:"feat: Implement fizz_buzzer\ntest: Add tests for fizz_buzzer\nfix: Fix NULL-check in print_name\ntest: Add test for NULL in print_name\n"})}),"\n",(0,s.jsx)(n.h2,{id:"tasks",children:"Tasks"}),"\n",(0,s.jsx)(n.p,{children:"As to your tasks, there are multiple things wrong in this project."}),"\n",(0,s.jsxs)(n.ol,{children:["\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsx)(n.p,{children:'There are 2 "bugs" that cannot be detected via tests, i.e. they are not bugs\nthat affect functionality of the game.'}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:["There is one evil bug in ",(0,s.jsx)(n.code,{children:"get_word"}),". It is not required to be fixed ;) Assign\nit the lowest priority."]}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsx)(n.p,{children:"There are some tests failing. Please try to figure it out, so you have green\ntests for the rest :)"}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:["We have gotten a bug report for ",(0,s.jsx)(n.code,{children:"word_guessed"}),", all we got is"]}),"\n",(0,s.jsxs)(n.blockquote,{children:["\n",(0,s.jsxs)(n.p,{children:["doesn't work when there are too many ",(0,s.jsx)(n.code,{children:"a"}),"s"]}),"\n"]}),"\n",(0,s.jsx)(n.p,{children:"Please try to replicate the bug and create a tests, so we don't get any\nregression later on."}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsx)(n.p,{children:"One hidden bug :) Closely non-specified, we cannot reproduce it and we were\ndrunk while playing the game, so we don't remember a thing. :/"}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsx)(n.p,{children:"Try to cover as much code via the tests as possible. We are not going to look\nat the metrics, but DRY is violated a lot, so as a last task try to remove as\nmuch of the duplicit code as possible."}),"\n",(0,s.jsx)(n.p,{children:"Tests should help you a lot in case there are some regressions."}),"\n"]}),"\n"]}),"\n",(0,s.jsx)(n.hr,{}),"\n",(0,s.jsxs)(n.p,{children:["In case you wonder why there are always 3 same words in the file with words, it\nis because of the ",(0,s.jsx)(n.code,{children:"get_word"})," bug. It is not a bug that can be easily fixed, so\nit is a not requirement at all and you can still get all points for the bonus ;)"]}),"\n",(0,s.jsx)(n.h2,{id:"dictionary",children:"Dictionary"}),"\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.a,{href:"https://en.wikipedia.org/wiki/Functional_testing",children:"Functional tests"})}),"\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.a,{href:"https://en.wikipedia.org/wiki/Smoke_testing_%28software%29",children:"Smoke test"})}),"\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.a,{href:"https://en.wikipedia.org/wiki/Don%27t_repeat_yourself",children:"DRY"})}),"\n"]}),"\n",(0,s.jsx)(n.h2,{id:"submitting",children:"Submitting"}),"\n",(0,s.jsx)(n.p,{children:"In case you have any questions, feel free to reach out to me."}),"\n",(0,s.jsx)(n.hr,{})]})}function h(e={}){const{wrapper:n}={...(0,i.a)(),...e.components};return n?(0,s.jsx)(n,{...e,children:(0,s.jsx)(d,{...e})}):d(e)}},7420:(e,n,t)=>{t.d(n,{Z:()=>s});const s=t.p+"assets/images/tdd_lifecycle-327ad9ee0ed8318ed11e19a28e02b2cc.png"},1151:(e,n,t)=>{t.d(n,{Z:()=>a,a:()=>o});var s=t(7294);const i={},r=s.createContext(i);function o(e){const n=s.useContext(r);return s.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function a(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(i):e.components||i:o(e.components),s.createElement(r.Provider,{value:n},e.children)}}}]); \ No newline at end of file diff --git a/assets/js/595c7293.e0207e26.js b/assets/js/595c7293.712225b4.js similarity index 98% rename from assets/js/595c7293.e0207e26.js rename to assets/js/595c7293.712225b4.js index e12e2ee..a18ebad 100644 --- a/assets/js/595c7293.e0207e26.js +++ b/assets/js/595c7293.712225b4.js @@ -1 +1 @@ -"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[5634],{8396:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>a,contentTitle:()=>o,default:()=>h,frontMatter:()=>r,metadata:()=>c,toc:()=>l});var i=t(5893),s=t(1151);const r={id:"seminar-08",title:"8th seminar",description:"Manipulating with files only char-by-char and a magic tree.\n"},o="8th seminar bonus assignment",c={id:"bonuses/seminar-08",title:"8th seminar",description:"Manipulating with files only char-by-char and a magic tree.\n",source:"@site/c/bonuses/08.md",sourceDirName:"bonuses",slug:"/bonuses/seminar-08",permalink:"/c/bonuses/seminar-08",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/c/bonuses/08.md",tags:[],version:"current",lastUpdatedAt:1700847079,formattedLastUpdatedAt:"Nov 24, 2023",frontMatter:{id:"seminar-08",title:"8th seminar",description:"Manipulating with files only char-by-char and a magic tree.\n"},sidebar:"autogeneratedBar",previous:{title:"5th and 6th seminar",permalink:"/c/bonuses/seminar-05-06"},next:{title:"10th seminar",permalink:"/c/bonuses/seminar-10"}},a={},l=[{value:"Introduction",id:"introduction",level:2},{value:"Warning",id:"warning",level:2},{value:"Testing",id:"testing",level:2},{value:"Task no. 1: Counting (0.75 K\u20a1)",id:"task-no-1-counting-075-k",level:2},{value:"Requirements",id:"requirements",level:3},{value:"Bonus part (0.75 K\u20a1)",id:"bonus-part-075-k",level:3},{value:"Task no. 2: Weird trees (1 K\u20a1)",id:"task-no-2-weird-trees-1-k",level:2},{value:"Submitting",id:"submitting",level:2}];function d(e){const n={a:"a",blockquote:"blockquote",code:"code",em:"em",h1:"h1",h2:"h2",h3:"h3",hr:"hr",img:"img",li:"li",ol:"ol",p:"p",pre:"pre",strong:"strong",ul:"ul",...(0,s.a)(),...e.components};return(0,i.jsxs)(i.Fragment,{children:[(0,i.jsx)(n.h1,{id:"8th-seminar-bonus-assignment",children:"8th seminar bonus assignment"}),"\n",(0,i.jsx)(n.p,{children:(0,i.jsx)(n.a,{href:"pathname:///files/c/bonuses/08.tar.gz",children:"Source"})}),"\n",(0,i.jsx)(n.h2,{id:"introduction",children:"Introduction"}),"\n",(0,i.jsx)(n.p,{children:"In this bonus you can implement two tasks, one of them has a bonus part with generic\nsolution."}),"\n",(0,i.jsx)(n.p,{children:"One is focused on counting ananas or in case of generic version any substring in\nthe file, but with a restriction on the function you use."}),"\n",(0,i.jsx)(n.p,{children:"Other one has a more algorithmic spirit."}),"\n",(0,i.jsx)(n.p,{children:"For this bonus you can get at maximum 2.5 K\u20a1."}),"\n",(0,i.jsx)(n.h2,{id:"warning",children:"Warning"}),"\n",(0,i.jsxs)(n.p,{children:[(0,i.jsx)(n.strong,{children:"DO NOT COMMIT test data"})," to your own git repository, since the tests include\nfiles that exceed 10MB by themselves. Even if they are on separate branch, they\ntake up the space."]}),"\n",(0,i.jsx)(n.h2,{id:"testing",children:"Testing"}),"\n",(0,i.jsxs)(n.p,{children:["For testing you are provided with python script (requires ",(0,i.jsx)(n.code,{children:"click"})," to be installed:\n",(0,i.jsx)(n.code,{children:"pip3 install --user click"}),") and ",(0,i.jsx)(n.code,{children:"Makefile"})," that provides following targets:"]}),"\n",(0,i.jsxs)(n.ul,{children:["\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"check-counting"})," - runs the ",(0,i.jsx)(n.code,{children:"counting"})," tests"]}),"\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"check-counting-bonus"})," - runs the ",(0,i.jsx)(n.code,{children:"counting"})," tests with bonus implemented"]}),"\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"check"})," - runs both ",(0,i.jsx)(n.code,{children:"counting"})," and ",(0,i.jsx)(n.code,{children:"counting-bonus"})," tests"]}),"\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"clean"})," - removes output files from the test runs"]}),"\n"]}),"\n",(0,i.jsx)(n.h2,{id:"task-no-1-counting-075-k",children:"Task no. 1: Counting (0.75 K\u20a1)"}),"\n",(0,i.jsx)(n.p,{children:"Your first task is to make smallish program that counts occurences of specific\n(or given) word from file and writes the number to other file."}),"\n",(0,i.jsx)(n.p,{children:"Usage of the program is:"}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{children:"Usage: ./counting [string-to-be-counted]\n"})}),"\n",(0,i.jsx)(n.p,{children:"Arguments that are passed to the program represent:"}),"\n",(0,i.jsxs)(n.ul,{children:["\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:""})," - path to the file where we count the words"]}),"\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:""})," - path to the file where we output the count"]}),"\n",(0,i.jsxs)(n.li,{children:["(optional argument) ",(0,i.jsx)(n.code,{children:"[string-to-be-counted]"})," - in case you implement bonus,\notherwise we default to word ",(0,i.jsx)(n.code,{children:"ananas"})," ;)"]}),"\n"]}),"\n",(0,i.jsx)(n.p,{children:"In skeleton you are given 3 empty, but documented, functions to implement."}),"\n",(0,i.jsxs)(n.ol,{children:["\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"count_anything"})," - function accepts input file and substring to be counted in\nthe file, returns the count."]}),"\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"count_ananas"})," - same as ",(0,i.jsx)(n.code,{children:"count_anything"}),", but specialized for ananases, the\ndefault implementation from the skeleton expects you to implement ",(0,i.jsx)(n.code,{children:"count_anything"}),"\nand therefore it just calls the other function."]}),"\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"write_number"})," - function that writes the number to the file, why would you\nneed the function is explained later :)"]}),"\n"]}),"\n",(0,i.jsx)(n.h3,{id:"requirements",children:"Requirements"}),"\n",(0,i.jsxs)(n.p,{children:["For manipulation with the files you are only allowed to use ",(0,i.jsx)(n.code,{children:"fopen"}),", ",(0,i.jsx)(n.code,{children:"fclose"}),",\n",(0,i.jsx)(n.code,{children:"fgetc"})," and ",(0,i.jsx)(n.code,{children:"fputc"}),". Functions like ",(0,i.jsx)(n.code,{children:"fprintf"})," (except for ",(0,i.jsx)(n.code,{children:"stderr"})," or logging) and\n",(0,i.jsx)(n.code,{children:"fscanf"})," are ",(0,i.jsx)(n.strong,{children:"forbidden"}),"."]}),"\n",(0,i.jsx)(n.p,{children:"In case you struggle and want to use one of those functions, the solution will be\npenalized by 50% of points."}),"\n",(0,i.jsx)(n.h3,{id:"bonus-part-075-k",children:"Bonus part (0.75 K\u20a1)"}),"\n",(0,i.jsxs)(n.p,{children:["Bonus part of this assignment is to implement ",(0,i.jsx)(n.code,{children:"count_anything"})," rather than ",(0,i.jsx)(n.code,{children:"count_ananas"}),"."]}),"\n",(0,i.jsxs)(n.blockquote,{children:["\n",(0,i.jsx)(n.p,{children:"Smaller hint: This task does not need dynamic allocation :) You just need one\ngood helper function and the right idea ;)"}),"\n"]}),"\n",(0,i.jsx)(n.h2,{id:"task-no-2-weird-trees-1-k",children:"Task no. 2: Weird trees (1 K\u20a1)"}),"\n",(0,i.jsxs)(n.p,{children:["In this task we are crossing our paths with ",(0,i.jsx)(n.em,{children:"algorithms and data structures"}),".\nYour task is to write a program that constructs tree from the file that is given\nas an argument and pretty-prints it."]}),"\n",(0,i.jsxs)(n.p,{children:["Input file consists of lines, that include ",(0,i.jsx)(n.code,{children:"key"})," and ",(0,i.jsx)(n.code,{children:"rank"})," in form ",(0,i.jsx)(n.code,{children:"key;rank"}),"\nor ",(0,i.jsx)(n.code,{children:"nil"}),". Why would we have ",(0,i.jsx)(n.code,{children:"nil"})," in a file? The file represents pre-order iteration\nthrough the tree. Leaves never have rank different than 0, so you can safely assume\n2 non-existing ",(0,i.jsx)(n.code,{children:"nil"}),"s in the input after you read such node ;)"]}),"\n",(0,i.jsxs)("table",{children:[(0,i.jsxs)("tr",{children:[(0,i.jsx)("th",{children:"Example input file"}),(0,i.jsx)("th",{children:"Tree it represents"})]}),(0,i.jsxs)("tr",{children:[(0,i.jsx)("td",{children:(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{children:"8;4\n5;3\n3;2\n2;1\n1;0\nnil\n4;0\n7;1\n6;0\nnil\n11;2\n10;1\n9;0\nnil\n12;0\n"})})}),(0,i.jsx)("td",{children:(0,i.jsx)(n.p,{children:(0,i.jsx)(n.img,{alt:"tree",src:t(73).Z+"",width:"633",height:"684"})})})]})]}),"\n",(0,i.jsxs)(n.p,{children:["In this task you are only provided with different trees in the ",(0,i.jsx)(n.code,{children:"test-trees"})," directory.\nImplementation and format of the pretty-print is totally up to you. :)"]}),"\n",(0,i.jsx)(n.p,{children:"Example of mine for the tree above:"}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{children:"8 (rank = 4)\n+-- 5 (rank = 3)\n| +-- 3 (rank = 2)\n| | +-- 2 (rank = 1)\n| | | +-- 1 (rank = 0)\n| | +-- 4 (rank = 0)\n| +-- 7 (rank = 1)\n| +-- 6 (rank = 0)\n+-- 11 (rank = 2)\n +-- 10 (rank = 1)\n | +-- 9 (rank = 0)\n +-- 12 (rank = 0)\n"})}),"\n",(0,i.jsxs)(n.blockquote,{children:["\n",(0,i.jsx)(n.p,{children:"Can you find out what are those trees? :)"}),"\n"]}),"\n",(0,i.jsx)(n.h2,{id:"submitting",children:"Submitting"}),"\n",(0,i.jsx)(n.p,{children:"In case you have any questions, feel free to reach out to me."}),"\n",(0,i.jsx)(n.hr,{})]})}function h(e={}){const{wrapper:n}={...(0,s.a)(),...e.components};return n?(0,i.jsx)(n,{...e,children:(0,i.jsx)(d,{...e})}):d(e)}},73:(e,n,t)=>{t.d(n,{Z:()=>i});const i=t.p+"assets/images/tree-c9e37f87f9095c00fad33ea034485ce6.png"},1151:(e,n,t)=>{t.d(n,{Z:()=>c,a:()=>o});var i=t(7294);const s={},r=i.createContext(s);function o(e){const n=i.useContext(r);return i.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function c(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(s):e.components||s:o(e.components),i.createElement(r.Provider,{value:n},e.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[5634],{8396:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>a,contentTitle:()=>o,default:()=>h,frontMatter:()=>r,metadata:()=>c,toc:()=>l});var i=t(5893),s=t(1151);const r={id:"seminar-08",title:"8th seminar",description:"Manipulating with files only char-by-char and a magic tree.\n"},o="8th seminar bonus assignment",c={id:"bonuses/seminar-08",title:"8th seminar",description:"Manipulating with files only char-by-char and a magic tree.\n",source:"@site/c/bonuses/08.md",sourceDirName:"bonuses",slug:"/bonuses/seminar-08",permalink:"/c/bonuses/seminar-08",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/c/bonuses/08.md",tags:[],version:"current",lastUpdatedAt:1700944879,formattedLastUpdatedAt:"Nov 25, 2023",frontMatter:{id:"seminar-08",title:"8th seminar",description:"Manipulating with files only char-by-char and a magic tree.\n"},sidebar:"autogeneratedBar",previous:{title:"5th and 6th seminar",permalink:"/c/bonuses/seminar-05-06"},next:{title:"10th seminar",permalink:"/c/bonuses/seminar-10"}},a={},l=[{value:"Introduction",id:"introduction",level:2},{value:"Warning",id:"warning",level:2},{value:"Testing",id:"testing",level:2},{value:"Task no. 1: Counting (0.75 K\u20a1)",id:"task-no-1-counting-075-k",level:2},{value:"Requirements",id:"requirements",level:3},{value:"Bonus part (0.75 K\u20a1)",id:"bonus-part-075-k",level:3},{value:"Task no. 2: Weird trees (1 K\u20a1)",id:"task-no-2-weird-trees-1-k",level:2},{value:"Submitting",id:"submitting",level:2}];function d(e){const n={a:"a",blockquote:"blockquote",code:"code",em:"em",h1:"h1",h2:"h2",h3:"h3",hr:"hr",img:"img",li:"li",ol:"ol",p:"p",pre:"pre",strong:"strong",ul:"ul",...(0,s.a)(),...e.components};return(0,i.jsxs)(i.Fragment,{children:[(0,i.jsx)(n.h1,{id:"8th-seminar-bonus-assignment",children:"8th seminar bonus assignment"}),"\n",(0,i.jsx)(n.p,{children:(0,i.jsx)(n.a,{href:"pathname:///files/c/bonuses/08.tar.gz",children:"Source"})}),"\n",(0,i.jsx)(n.h2,{id:"introduction",children:"Introduction"}),"\n",(0,i.jsx)(n.p,{children:"In this bonus you can implement two tasks, one of them has a bonus part with generic\nsolution."}),"\n",(0,i.jsx)(n.p,{children:"One is focused on counting ananas or in case of generic version any substring in\nthe file, but with a restriction on the function you use."}),"\n",(0,i.jsx)(n.p,{children:"Other one has a more algorithmic spirit."}),"\n",(0,i.jsx)(n.p,{children:"For this bonus you can get at maximum 2.5 K\u20a1."}),"\n",(0,i.jsx)(n.h2,{id:"warning",children:"Warning"}),"\n",(0,i.jsxs)(n.p,{children:[(0,i.jsx)(n.strong,{children:"DO NOT COMMIT test data"})," to your own git repository, since the tests include\nfiles that exceed 10MB by themselves. Even if they are on separate branch, they\ntake up the space."]}),"\n",(0,i.jsx)(n.h2,{id:"testing",children:"Testing"}),"\n",(0,i.jsxs)(n.p,{children:["For testing you are provided with python script (requires ",(0,i.jsx)(n.code,{children:"click"})," to be installed:\n",(0,i.jsx)(n.code,{children:"pip3 install --user click"}),") and ",(0,i.jsx)(n.code,{children:"Makefile"})," that provides following targets:"]}),"\n",(0,i.jsxs)(n.ul,{children:["\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"check-counting"})," - runs the ",(0,i.jsx)(n.code,{children:"counting"})," tests"]}),"\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"check-counting-bonus"})," - runs the ",(0,i.jsx)(n.code,{children:"counting"})," tests with bonus implemented"]}),"\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"check"})," - runs both ",(0,i.jsx)(n.code,{children:"counting"})," and ",(0,i.jsx)(n.code,{children:"counting-bonus"})," tests"]}),"\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"clean"})," - removes output files from the test runs"]}),"\n"]}),"\n",(0,i.jsx)(n.h2,{id:"task-no-1-counting-075-k",children:"Task no. 1: Counting (0.75 K\u20a1)"}),"\n",(0,i.jsx)(n.p,{children:"Your first task is to make smallish program that counts occurences of specific\n(or given) word from file and writes the number to other file."}),"\n",(0,i.jsx)(n.p,{children:"Usage of the program is:"}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{children:"Usage: ./counting [string-to-be-counted]\n"})}),"\n",(0,i.jsx)(n.p,{children:"Arguments that are passed to the program represent:"}),"\n",(0,i.jsxs)(n.ul,{children:["\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:""})," - path to the file where we count the words"]}),"\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:""})," - path to the file where we output the count"]}),"\n",(0,i.jsxs)(n.li,{children:["(optional argument) ",(0,i.jsx)(n.code,{children:"[string-to-be-counted]"})," - in case you implement bonus,\notherwise we default to word ",(0,i.jsx)(n.code,{children:"ananas"})," ;)"]}),"\n"]}),"\n",(0,i.jsx)(n.p,{children:"In skeleton you are given 3 empty, but documented, functions to implement."}),"\n",(0,i.jsxs)(n.ol,{children:["\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"count_anything"})," - function accepts input file and substring to be counted in\nthe file, returns the count."]}),"\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"count_ananas"})," - same as ",(0,i.jsx)(n.code,{children:"count_anything"}),", but specialized for ananases, the\ndefault implementation from the skeleton expects you to implement ",(0,i.jsx)(n.code,{children:"count_anything"}),"\nand therefore it just calls the other function."]}),"\n",(0,i.jsxs)(n.li,{children:[(0,i.jsx)(n.code,{children:"write_number"})," - function that writes the number to the file, why would you\nneed the function is explained later :)"]}),"\n"]}),"\n",(0,i.jsx)(n.h3,{id:"requirements",children:"Requirements"}),"\n",(0,i.jsxs)(n.p,{children:["For manipulation with the files you are only allowed to use ",(0,i.jsx)(n.code,{children:"fopen"}),", ",(0,i.jsx)(n.code,{children:"fclose"}),",\n",(0,i.jsx)(n.code,{children:"fgetc"})," and ",(0,i.jsx)(n.code,{children:"fputc"}),". Functions like ",(0,i.jsx)(n.code,{children:"fprintf"})," (except for ",(0,i.jsx)(n.code,{children:"stderr"})," or logging) and\n",(0,i.jsx)(n.code,{children:"fscanf"})," are ",(0,i.jsx)(n.strong,{children:"forbidden"}),"."]}),"\n",(0,i.jsx)(n.p,{children:"In case you struggle and want to use one of those functions, the solution will be\npenalized by 50% of points."}),"\n",(0,i.jsx)(n.h3,{id:"bonus-part-075-k",children:"Bonus part (0.75 K\u20a1)"}),"\n",(0,i.jsxs)(n.p,{children:["Bonus part of this assignment is to implement ",(0,i.jsx)(n.code,{children:"count_anything"})," rather than ",(0,i.jsx)(n.code,{children:"count_ananas"}),"."]}),"\n",(0,i.jsxs)(n.blockquote,{children:["\n",(0,i.jsx)(n.p,{children:"Smaller hint: This task does not need dynamic allocation :) You just need one\ngood helper function and the right idea ;)"}),"\n"]}),"\n",(0,i.jsx)(n.h2,{id:"task-no-2-weird-trees-1-k",children:"Task no. 2: Weird trees (1 K\u20a1)"}),"\n",(0,i.jsxs)(n.p,{children:["In this task we are crossing our paths with ",(0,i.jsx)(n.em,{children:"algorithms and data structures"}),".\nYour task is to write a program that constructs tree from the file that is given\nas an argument and pretty-prints it."]}),"\n",(0,i.jsxs)(n.p,{children:["Input file consists of lines, that include ",(0,i.jsx)(n.code,{children:"key"})," and ",(0,i.jsx)(n.code,{children:"rank"})," in form ",(0,i.jsx)(n.code,{children:"key;rank"}),"\nor ",(0,i.jsx)(n.code,{children:"nil"}),". Why would we have ",(0,i.jsx)(n.code,{children:"nil"})," in a file? The file represents pre-order iteration\nthrough the tree. Leaves never have rank different than 0, so you can safely assume\n2 non-existing ",(0,i.jsx)(n.code,{children:"nil"}),"s in the input after you read such node ;)"]}),"\n",(0,i.jsxs)("table",{children:[(0,i.jsxs)("tr",{children:[(0,i.jsx)("th",{children:"Example input file"}),(0,i.jsx)("th",{children:"Tree it represents"})]}),(0,i.jsxs)("tr",{children:[(0,i.jsx)("td",{children:(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{children:"8;4\n5;3\n3;2\n2;1\n1;0\nnil\n4;0\n7;1\n6;0\nnil\n11;2\n10;1\n9;0\nnil\n12;0\n"})})}),(0,i.jsx)("td",{children:(0,i.jsx)(n.p,{children:(0,i.jsx)(n.img,{alt:"tree",src:t(73).Z+"",width:"633",height:"684"})})})]})]}),"\n",(0,i.jsxs)(n.p,{children:["In this task you are only provided with different trees in the ",(0,i.jsx)(n.code,{children:"test-trees"})," directory.\nImplementation and format of the pretty-print is totally up to you. :)"]}),"\n",(0,i.jsx)(n.p,{children:"Example of mine for the tree above:"}),"\n",(0,i.jsx)(n.pre,{children:(0,i.jsx)(n.code,{children:"8 (rank = 4)\n+-- 5 (rank = 3)\n| +-- 3 (rank = 2)\n| | +-- 2 (rank = 1)\n| | | +-- 1 (rank = 0)\n| | +-- 4 (rank = 0)\n| +-- 7 (rank = 1)\n| +-- 6 (rank = 0)\n+-- 11 (rank = 2)\n +-- 10 (rank = 1)\n | +-- 9 (rank = 0)\n +-- 12 (rank = 0)\n"})}),"\n",(0,i.jsxs)(n.blockquote,{children:["\n",(0,i.jsx)(n.p,{children:"Can you find out what are those trees? :)"}),"\n"]}),"\n",(0,i.jsx)(n.h2,{id:"submitting",children:"Submitting"}),"\n",(0,i.jsx)(n.p,{children:"In case you have any questions, feel free to reach out to me."}),"\n",(0,i.jsx)(n.hr,{})]})}function h(e={}){const{wrapper:n}={...(0,s.a)(),...e.components};return n?(0,i.jsx)(n,{...e,children:(0,i.jsx)(d,{...e})}):d(e)}},73:(e,n,t)=>{t.d(n,{Z:()=>i});const i=t.p+"assets/images/tree-c9e37f87f9095c00fad33ea034485ce6.png"},1151:(e,n,t)=>{t.d(n,{Z:()=>c,a:()=>o});var i=t(7294);const s={},r=i.createContext(s);function o(e){const n=i.useContext(r);return i.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function c(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(s):e.components||s:o(e.components),i.createElement(r.Provider,{value:n},e.children)}}}]); \ No newline at end of file diff --git a/assets/js/5fe5d476.3a9fa5ac.js b/assets/js/5fe5d476.eb20fb28.js similarity index 99% rename from assets/js/5fe5d476.3a9fa5ac.js rename to assets/js/5fe5d476.eb20fb28.js index 41202e7..129cf22 100644 --- a/assets/js/5fe5d476.3a9fa5ac.js +++ b/assets/js/5fe5d476.eb20fb28.js @@ -1 +1 @@ -"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[2619],{4457:(s,e,n)=>{n.r(e),n.d(e,{assets:()=>c,contentTitle:()=>t,default:()=>o,frontMatter:()=>l,metadata:()=>r,toc:()=>m});var a=n(5893),i=n(1151);const l={id:"pyramid-slide-down",title:"Introduction to dynamic programming",description:"Solving a problem in different ways.\n",tags:["java","recursion","exponential","greedy","dynamic-programming","top-down-dp","bottom-up-dp"],last_updated:{date:new Date("2023-08-17T00:00:00.000Z")}},t=void 0,r={id:"recursion/pyramid-slide-down",title:"Introduction to dynamic programming",description:"Solving a problem in different ways.\n",source:"@site/algorithms/04-recursion/2023-08-17-pyramid-slide-down.md",sourceDirName:"04-recursion",slug:"/recursion/pyramid-slide-down",permalink:"/algorithms/recursion/pyramid-slide-down",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/algorithms/04-recursion/2023-08-17-pyramid-slide-down.md",tags:[{label:"java",permalink:"/algorithms/tags/java"},{label:"recursion",permalink:"/algorithms/tags/recursion"},{label:"exponential",permalink:"/algorithms/tags/exponential"},{label:"greedy",permalink:"/algorithms/tags/greedy"},{label:"dynamic-programming",permalink:"/algorithms/tags/dynamic-programming"},{label:"top-down-dp",permalink:"/algorithms/tags/top-down-dp"},{label:"bottom-up-dp",permalink:"/algorithms/tags/bottom-up-dp"}],version:"current",lastUpdatedAt:1700847079,formattedLastUpdatedAt:"Nov 24, 2023",frontMatter:{id:"pyramid-slide-down",title:"Introduction to dynamic programming",description:"Solving a problem in different ways.\n",tags:["java","recursion","exponential","greedy","dynamic-programming","top-down-dp","bottom-up-dp"],last_updated:{date:"2023-08-17T00:00:00.000Z"}},sidebar:"autogeneratedBar",previous:{title:"Recursion and backtracking with Robot Karel",permalink:"/algorithms/recursion/karel-1"},next:{title:"Red-Black Trees",permalink:"/algorithms/category/red-black-trees"}},c={},m=[{value:"Problem",id:"problem",level:2},{value:"Solving the problem",id:"solving-the-problem",level:2},{value:"Na\xefve solution",id:"na\xefve-solution",level:2},{value:"Time complexity",id:"time-complexity",level:3},{value:"Greedy solution",id:"greedy-solution",level:2},{value:"Time complexity",id:"time-complexity-1",level:3},{value:"Running the tests",id:"running-the-tests",level:3},{value:"Top-down DP",id:"top-down-dp",level:2},{value:"Time complexity",id:"time-complexity-2",level:3},{value:"Memory complexity",id:"memory-complexity",level:3},{value:"Bottom-up DP",id:"bottom-up-dp",level:2},{value:"Time complexity",id:"time-complexity-3",level:3},{value:"Memory complexity",id:"memory-complexity-1",level:3},{value:"Summary",id:"summary",level:2}];function h(s){const e={a:"a",admonition:"admonition",annotation:"annotation",code:"code",em:"em",h2:"h2",h3:"h3",hr:"hr",li:"li",math:"math",mdxAdmonitionTitle:"mdxAdmonitionTitle",mi:"mi",mn:"mn",mo:"mo",mrow:"mrow",mspace:"mspace",mstyle:"mstyle",msub:"msub",msup:"msup",mtable:"mtable",mtd:"mtd",mtext:"mtext",mtr:"mtr",munderover:"munderover",ol:"ol",p:"p",pre:"pre",section:"section",semantics:"semantics",span:"span",strong:"strong",sup:"sup",...(0,i.a)(),...s.components};return(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)(e.p,{children:"In this post we will try to solve one problem in different ways."}),"\n",(0,a.jsx)(e.h2,{id:"problem",children:"Problem"}),"\n",(0,a.jsxs)(e.p,{children:["The problem we are going to solve is one of ",(0,a.jsx)(e.em,{children:"CodeWars"})," katas and is called\n",(0,a.jsx)(e.a,{href:"https://www.codewars.com/kata/551f23362ff852e2ab000037",children:"Pyramid Slide Down"}),"."]}),"\n",(0,a.jsxs)(e.p,{children:["We are given a 2D array of integers and we are to find the ",(0,a.jsx)(e.em,{children:"slide down"}),".\n",(0,a.jsx)(e.em,{children:"Slide down"})," is a maximum sum of consecutive numbers from the top to the bottom."]}),"\n",(0,a.jsx)(e.p,{children:"Let's have a look at few examples. Consider the following pyramid:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{children:" 3\n 7 4\n 2 4 6\n8 5 9 3\n"})}),"\n",(0,a.jsx)(e.p,{children:"This pyramid has following slide down:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{children:" *3\n *7 4\n 2 *4 6\n8 5 *9 3\n"})}),"\n",(0,a.jsxs)(e.p,{children:["And its value is ",(0,a.jsx)(e.code,{children:"23"}),"."]}),"\n",(0,a.jsxs)(e.p,{children:["We can also have a look at a ",(0,a.jsx)(e.em,{children:"bigger"})," example:"]}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{children:" 75\n 95 64\n 17 47 82\n 18 35 87 10\n 20 4 82 47 65\n 19 1 23 3 34\n 88 2 77 73 7 63 67\n 99 65 4 28 6 16 70 92\n 41 41 26 56 83 40 80 70 33\n 41 48 72 33 47 32 37 16 94 29\n 53 71 44 65 25 43 91 52 97 51 14\n 70 11 33 28 77 73 17 78 39 68 17 57\n 91 71 52 38 17 14 91 43 58 50 27 29 48\n 63 66 4 68 89 53 67 30 73 16 69 87 40 31\n 4 62 98 27 23 9 70 98 73 93 38 53 60 4 23\n"})}),"\n",(0,a.jsxs)(e.p,{children:["Slide down in this case is equal to ",(0,a.jsx)(e.code,{children:"1074"}),"."]}),"\n",(0,a.jsx)(e.h2,{id:"solving-the-problem",children:"Solving the problem"}),"\n",(0,a.jsx)(e.admonition,{type:"caution",children:(0,a.jsxs)(e.p,{children:["I will describe the following ways you can approach this problem and implement\nthem in ",(0,a.jsx)(e.em,{children:"Java"}),(0,a.jsx)(e.sup,{children:(0,a.jsx)(e.a,{href:"#user-content-fn-1",id:"user-content-fnref-1","data-footnote-ref":!0,"aria-describedby":"footnote-label",children:"1"})}),"."]})}),"\n",(0,a.jsxs)(e.p,{children:["For all of the following solutions I will be using basic ",(0,a.jsx)(e.code,{children:"main"})," function that\nwill output ",(0,a.jsx)(e.code,{children:"true"}),"/",(0,a.jsx)(e.code,{children:"false"})," based on the expected output of our algorithm. Any\nother differences will lie only in the solutions of the problem. You can see the\n",(0,a.jsx)(e.code,{children:"main"})," here:"]}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:'public static void main(String[] args) {\n System.out.print("Test #1: ");\n System.out.println(longestSlideDown(new int[][] {\n { 3 },\n { 7, 4 },\n { 2, 4, 6 },\n { 8, 5, 9, 3 }\n }) == 23 ? "passed" : "failed");\n\n System.out.print("Test #2: ");\n System.out.println(longestSlideDown(new int[][] {\n { 75 },\n { 95, 64 },\n { 17, 47, 82 },\n { 18, 35, 87, 10 },\n { 20, 4, 82, 47, 65 },\n { 19, 1, 23, 75, 3, 34 },\n { 88, 2, 77, 73, 7, 63, 67 },\n { 99, 65, 4, 28, 6, 16, 70, 92 },\n { 41, 41, 26, 56, 83, 40, 80, 70, 33 },\n { 41, 48, 72, 33, 47, 32, 37, 16, 94, 29 },\n { 53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14 },\n { 70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57 },\n { 91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48 },\n { 63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31 },\n { 4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23 },\n }) == 1074 ? "passed" : "failed");\n}\n'})}),"\n",(0,a.jsx)(e.h2,{id:"na\xefve-solution",children:"Na\xefve solution"}),"\n",(0,a.jsx)(e.p,{children:"Our na\xefve solution consists of trying out all the possible slides and finding\nthe one with maximum sum."}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"public static int longestSlideDown(int[][] pyramid, int row, int col) {\n if (row >= pyramid.length || col < 0 || col >= pyramid[row].length) {\n // BASE: We have gotten out of bounds, there's no reasonable value to\n // return, so we just return the \u2039MIN_VALUE\u203a to ensure that it cannot\n // be maximum.\n return Integer.MIN_VALUE;\n }\n\n if (row == pyramid.length - 1) {\n // BASE: Bottom of the pyramid, we just return the value, there's\n // nowhere to slide anymore.\n return pyramid[row][col];\n }\n\n // Otherwise we account for the current position and return maximum of the\n // available \u201cslides\u201d.\n return pyramid[row][col] + Math.max(\n longestSlideDown(pyramid, row + 1, col),\n longestSlideDown(pyramid, row + 1, col + 1));\n}\n\npublic static int longestSlideDown(int[][] pyramid) {\n // We start the slide in the top cell of the pyramid.\n return longestSlideDown(pyramid, 0, 0);\n}\n"})}),"\n",(0,a.jsx)(e.p,{children:"As you can see, we have 2 overloads:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"int longestSlideDown(int[][] pyramid);\nint longestSlideDown(int[][] pyramid, int row, int col);\n"})}),"\n",(0,a.jsxs)(e.p,{children:["First one is used as a ",(0,a.jsx)(e.em,{children:"public interface"})," to the solution, you just pass in the\npyramid itself. Second one is the recursive \u201calgorithm\u201d that finds the slide\ndown."]}),"\n",(0,a.jsxs)(e.p,{children:["It is a relatively simple solution\u2026 There's nothing to do at the bottom of the\npyramid, so we just return the value in the ",(0,a.jsx)(e.em,{children:"cell"}),". Otherwise we add it and try\nto slide down the available cells below the current row."]}),"\n",(0,a.jsx)(e.h3,{id:"time-complexity",children:"Time complexity"}),"\n",(0,a.jsx)(e.p,{children:"If you get the source code and run it yourself, it runs rather fine\u2026 I hope you\nare wondering about the time complexity of the proposed solution and, since it\nreally is a na\xefve solution, the time complexity is pretty bad. Let's find the\nworst case scenario."}),"\n",(0,a.jsx)(e.p,{children:"Let's start with the first overload:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"public static int longestSlideDown(int[][] pyramid) {\n return longestSlideDown(pyramid, 0, 0);\n}\n"})}),"\n",(0,a.jsxs)(e.p,{children:["There's not much to do here, so we can safely say that the time complexity of\nthis function is bounded by ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"T"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"T(n)"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.13889em"},children:"T"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})})]}),", where ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsx)(e.mrow,{children:(0,a.jsx)(e.mi,{children:"T"})}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"T"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.6833em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.13889em"},children:"T"})]})})]})," is our second overload. This\ndoesn't tell us anything, so let's move on to the second overload where we are\ngoing to define the ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"T"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"T(n)"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.13889em"},children:"T"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})})]})," function."]}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"public static int longestSlideDown(int[][] pyramid, int row, int col) {\n if (row >= pyramid.length || col < 0 || col >= pyramid[row].length) {\n // BASE: We have gotten out of bounds, there's no reasonable value to\n // return, so we just return the \u2039MIN_VALUE\u203a to ensure that it cannot\n // be maximum.\n return Integer.MIN_VALUE;\n }\n\n if (row == pyramid.length - 1) {\n // BASE: Bottom of the pyramid, we just return the value, there's\n // nowhere to slide anymore.\n return pyramid[row][col];\n }\n\n // Otherwise we account for the current position and return maximum of the\n // available \u201cslides\u201d.\n return pyramid[row][col] + Math.max(\n longestSlideDown(pyramid, row + 1, col),\n longestSlideDown(pyramid, row + 1, col + 1));\n}\n"})}),"\n",(0,a.jsxs)(e.p,{children:["Fun fact is that the whole \u201calgorithm\u201d consists of just 2 ",(0,a.jsx)(e.code,{children:"return"})," statements\nand nothing else. Let's dissect them!"]}),"\n",(0,a.jsxs)(e.p,{children:["First ",(0,a.jsx)(e.code,{children:"return"})," statement is the base case, so it has a constant time complexity."]}),"\n",(0,a.jsxs)(e.p,{children:["Second one a bit tricky. We add two numbers together, which we'll consider as\nconstant, but for the right part of the expression we take maximum from the left\nand right paths. OK\u2026 So what happens? We evaluate the ",(0,a.jsx)(e.code,{children:"longestSlideDown"})," while\nchoosing the under and right both. They are separate computations though, so we\nare branching from each call of ",(0,a.jsx)(e.code,{children:"longestSlideDown"}),", unless it's a base case."]}),"\n",(0,a.jsx)(e.p,{children:"What does that mean for us then? We basically get"}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"T"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"y"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mo,{fence:"true",children:"{"}),(0,a.jsxs)(e.mtable,{rowspacing:"0.36em",columnalign:"left left",columnspacing:"1em",children:[(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"false",children:(0,a.jsx)(e.mn,{children:"1"})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"false",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mtext,{children:",\xa0if\xa0"}),(0,a.jsx)(e.mi,{children:"y"}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mi,{children:"o"}),(0,a.jsx)(e.mi,{children:"w"}),(0,a.jsx)(e.mi,{children:"s"})]})})})]}),(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"false",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mn,{children:"1"}),(0,a.jsx)(e.mo,{children:"+"}),(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"T"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"y"}),(0,a.jsx)(e.mo,{children:"+"}),(0,a.jsx)(e.mn,{children:"1"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"false",children:(0,a.jsx)(e.mtext,{children:",\xa0otherwise"})})})]})]})]})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"T(y) =\n\\begin{cases}\n1 & \\text{, if } y = rows \\\\\n1 + 2 \\cdot T(y + 1) & \\text{, otherwise}\n\\end{cases}"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.13889em"},children:"T"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.03588em"},children:"y"}),(0,a.jsx)(e.span,{className:"mclose",children:")"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"3em",verticalAlign:"-1.25em"}}),(0,a.jsxs)(e.span,{className:"minner",children:[(0,a.jsx)(e.span,{className:"mopen delimcenter",style:{top:"0em"},children:(0,a.jsx)(e.span,{className:"delimsizing size4",children:"{"})}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsxs)(e.span,{className:"mtable",children:[(0,a.jsx)(e.span,{className:"col-align-l",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.69em"},children:[(0,a.jsxs)(e.span,{style:{top:"-3.69em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.008em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord",children:"1"})})]}),(0,a.jsxs)(e.span,{style:{top:"-2.25em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.008em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord",children:"1"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"+"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.13889em"},children:"T"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.03588em"},children:"y"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"+"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord",children:"1"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.19em"},children:(0,a.jsx)(e.span,{})})})]})}),(0,a.jsx)(e.span,{className:"arraycolsep",style:{width:"1em"}}),(0,a.jsx)(e.span,{className:"col-align-l",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.69em"},children:[(0,a.jsxs)(e.span,{style:{top:"-3.69em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.008em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord text",children:(0,a.jsx)(e.span,{className:"mord",children:",\xa0if\xa0"})}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.03588em"},children:"y"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"ro"}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02691em"},children:"w"}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"})]})]}),(0,a.jsxs)(e.span,{style:{top:"-2.25em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.008em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord text",children:(0,a.jsx)(e.span,{className:"mord",children:",\xa0otherwise"})})})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.19em"},children:(0,a.jsx)(e.span,{})})})]})})]})}),(0,a.jsx)(e.span,{className:"mclose nulldelimiter"})]})]})]})]})}),"\n",(0,a.jsx)(e.p,{children:"That looks rather easy to compute, isn't it? If you sum it up, you'll get:"}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"T"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mi,{children:"o"}),(0,a.jsx)(e.mi,{children:"w"}),(0,a.jsx)(e.mi,{children:"s"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"}),(0,a.jsx)(e.mo,{children:"\u2208"}),(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsxs)(e.msup,{children:[(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mi,{children:"o"}),(0,a.jsx)(e.mi,{children:"w"}),(0,a.jsx)(e.mi,{children:"s"})]})]}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"T(rows) \\in \\mathcal{O}(2^{rows})"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.13889em"},children:"T"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"ro"}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02691em"},children:"w"}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"}),(0,a.jsx)(e.span,{className:"mclose",children:")"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"\u2208"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"msupsub",children:(0,a.jsx)(e.span,{className:"vlist-t",children:(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.7144em"},children:(0,a.jsxs)(e.span,{style:{top:"-3.113em",marginRight:"0.05em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"2.7em"}}),(0,a.jsx)(e.span,{className:"sizing reset-size6 size3 mtight",children:(0,a.jsxs)(e.span,{className:"mord mtight",children:[(0,a.jsx)(e.span,{className:"mord mathnormal mtight",children:"ro"}),(0,a.jsx)(e.span,{className:"mord mathnormal mtight",style:{marginRight:"0.02691em"},children:"w"}),(0,a.jsx)(e.span,{className:"mord mathnormal mtight",children:"s"})]})})]})})})})})]}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})]})]})}),"\n",(0,a.jsx)(e.p,{children:"If you wonder why, I'll try to describe it intuitively:"}),"\n",(0,a.jsxs)(e.ol,{children:["\n",(0,a.jsxs)(e.li,{children:["In each call to ",(0,a.jsx)(e.code,{children:"longestSlideDown"})," we do some work in constant time,\nregardless of being in the base case. Those are the ",(0,a.jsx)(e.code,{children:"1"}),"s in both cases."]}),"\n",(0,a.jsxs)(e.li,{children:["If we are not in the base case, we move one row down ",(0,a.jsx)(e.strong,{children:"twice"}),". That's how we\nobtained ",(0,a.jsx)(e.code,{children:"2 *"})," and ",(0,a.jsx)(e.code,{children:"y + 1"})," in the ",(0,a.jsx)(e.em,{children:"otherwise"})," case."]}),"\n",(0,a.jsxs)(e.li,{children:["We move row-by-row, so we move down ",(0,a.jsx)(e.code,{children:"y"}),"-times and each call splits to two\nsubtrees."]}),"\n",(0,a.jsxs)(e.li,{children:["Overall, if we were to represent the calls as a tree, we would get a full\nbinary tree of height ",(0,a.jsx)(e.code,{children:"y"}),", in each node we do some work in constant time,\ntherefore we can just sum the ones."]}),"\n"]}),"\n",(0,a.jsx)(e.admonition,{type:"warning",children:(0,a.jsx)(e.p,{children:"It would've been more complicated to get an exact result. In the equation above\nwe are assuming that the width of the pyramid is bound by the height."})}),"\n",(0,a.jsxs)(e.p,{children:["Hopefully we can agree that this is not the best we can do. ","\ud83d\ude09"]}),"\n",(0,a.jsx)(e.h2,{id:"greedy-solution",children:"Greedy solution"}),"\n",(0,a.jsxs)(e.p,{children:["We will try to optimize it a bit. Let's start with a relatively simple ",(0,a.jsx)(e.em,{children:"greedy"}),"\napproach."]}),"\n",(0,a.jsx)(e.admonition,{title:"Greedy algorithms",type:"info",children:(0,a.jsxs)(e.p,{children:[(0,a.jsx)(e.em,{children:"Greedy algorithms"})," can be described as algorithms that decide the action on the\noptimal option at the moment."]})}),"\n",(0,a.jsx)(e.p,{children:"We can try to adjust the na\xefve solution. The most problematic part are the\nrecursive calls. Let's apply the greedy approach there:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"public static int longestSlideDown(int[][] pyramid, int row, int col) {\n if (row == pyramid.length - 1) {\n // BASE: We're at the bottom\n return pyramid[row][col];\n }\n\n if (col + 1 >= pyramid[row + 1].length\n || pyramid[row + 1][col] > pyramid[row + 1][col + 1]) {\n // If we cannot go right or it's not feasible, we continue to the left.\n return pyramid[row][col] + longestSlideDown(pyramid, row + 1, col);\n }\n\n // Otherwise we just move to the right.\n return pyramid[row][col] + longestSlideDown(pyramid, row + 1, col + 1);\n}\n"})}),"\n",(0,a.jsxs)(e.p,{children:["OK, if we cannot go right ",(0,a.jsx)(e.strong,{children:"or"})," the right path adds smaller value to the sum,\nwe simply go left."]}),"\n",(0,a.jsx)(e.h3,{id:"time-complexity-1",children:"Time complexity"}),"\n",(0,a.jsxs)(e.p,{children:["We have switched from ",(0,a.jsx)(e.em,{children:"adding the maximum"})," to ",(0,a.jsx)(e.em,{children:"following the \u201cbigger\u201d path"}),", so\nwe improved the time complexity tremendously. We just go down the pyramid all\nthe way to the bottom. Therefore we are getting:"]}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mi,{children:"o"}),(0,a.jsx)(e.mi,{children:"w"}),(0,a.jsx)(e.mi,{children:"s"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(rows)"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"ro"}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02691em"},children:"w"}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})})]})}),"\n",(0,a.jsx)(e.p,{children:"We have managed to convert our exponential solution into a linear one."}),"\n",(0,a.jsx)(e.h3,{id:"running-the-tests",children:"Running the tests"}),"\n",(0,a.jsx)(e.p,{children:"However, if we run the tests, we notice that the second test failed:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{children:"Test #1: passed\nTest #2: failed\n"})}),"\n",(0,a.jsxs)(e.p,{children:["What's going on? Well, we have improved the time complexity, but greedy\nalgorithms are not the ideal solution to ",(0,a.jsx)(e.strong,{children:"all"})," problems. In this case there\nmay be a solution that is bigger than the one found using the greedy algorithm."]}),"\n",(0,a.jsx)(e.p,{children:"Imagine the following pyramid:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{children:" 1\n 2 3\n 5 6 7\n 8 9 10 11\n99 13 14 15 16\n"})}),"\n",(0,a.jsx)(e.p,{children:"We start at the top:"}),"\n",(0,a.jsxs)(e.ol,{children:["\n",(0,a.jsxs)(e.li,{children:["Current cell: ",(0,a.jsx)(e.code,{children:"1"}),", we can choose from ",(0,a.jsx)(e.code,{children:"2"})," and ",(0,a.jsx)(e.code,{children:"3"}),", ",(0,a.jsx)(e.code,{children:"3"})," looks better, so we\nchoose it."]}),"\n",(0,a.jsxs)(e.li,{children:["Current cell: ",(0,a.jsx)(e.code,{children:"3"}),", we can choose from ",(0,a.jsx)(e.code,{children:"6"})," and ",(0,a.jsx)(e.code,{children:"7"}),", ",(0,a.jsx)(e.code,{children:"7"})," looks better, so we\nchoose it."]}),"\n",(0,a.jsxs)(e.li,{children:["Current cell: ",(0,a.jsx)(e.code,{children:"7"}),", we can choose from ",(0,a.jsx)(e.code,{children:"10"})," and ",(0,a.jsx)(e.code,{children:"11"}),", ",(0,a.jsx)(e.code,{children:"11"})," looks better, so we\nchoose it."]}),"\n",(0,a.jsxs)(e.li,{children:["Current cell: ",(0,a.jsx)(e.code,{children:"11"}),", we can choose from ",(0,a.jsx)(e.code,{children:"15"})," and ",(0,a.jsx)(e.code,{children:"16"}),", ",(0,a.jsx)(e.code,{children:"16"})," looks better, so\nwe choose it."]}),"\n"]}),"\n",(0,a.jsxs)(e.p,{children:["Our final sum is: ",(0,a.jsx)(e.code,{children:"1 + 3 + 7 + 11 + 16 = 38"}),", but in the bottom left cell we\nhave a ",(0,a.jsx)(e.code,{children:"99"})," that is bigger than our whole sum."]}),"\n",(0,a.jsx)(e.admonition,{type:"tip",children:(0,a.jsx)(e.p,{children:"Dijkstra's algorithm is a greedy algorithm too, try to think why it is correct."})}),"\n",(0,a.jsx)(e.h2,{id:"top-down-dp",children:"Top-down DP"}),"\n",(0,a.jsxs)(e.p,{children:[(0,a.jsx)(e.em,{children:"Top-down dynamic programming"})," is probably the most common approach, since (at\nleast looks like) is the easiest to implement. The whole point is avoiding the\nunnecessary computations that we have already done."]}),"\n",(0,a.jsxs)(e.p,{children:["In our case, we can use our na\xefve solution and put a ",(0,a.jsx)(e.em,{children:"cache"})," on top of it that\nwill make sure, we don't do unnecessary calculations."]}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"// This \u201cstructure\u201d is required, since I have decided to use \u2039TreeMap\u203a which\n// requires the ordering on the keys. It represents one position in the pyramid.\nrecord Position(int row, int col) implements Comparable {\n public int compareTo(Position r) {\n if (row != r.row) {\n return Integer.valueOf(row).compareTo(r.row);\n }\n\n if (col != r.col) {\n return Integer.valueOf(col).compareTo(r.col);\n }\n\n return 0;\n }\n}\n\npublic static int longestSlideDown(\n int[][] pyramid,\n TreeMap cache,\n Position position) {\n int row = position.row;\n int col = position.col;\n\n if (row >= pyramid.length || col < 0 || col >= pyramid[row].length) {\n // BASE: out of bounds\n return Integer.MIN_VALUE;\n }\n\n if (row == pyramid.length - 1) {\n // BASE: bottom of the pyramid\n return pyramid[position.row][position.col];\n }\n\n if (!cache.containsKey(position)) {\n // We haven't computed the position yet, so we run the same \u201cformula\u201d as\n // in the na\xefve version \xbband\xab we put calculated slide into the cache.\n // Next time we want the slide down from given position, it will be just\n // retrieved from the cache.\n int slideDown = Math.max(\n longestSlideDown(pyramid, cache, new Position(row + 1, col)),\n longestSlideDown(pyramid, cache, new Position(row + 1, col + 1)));\n cache.put(position, pyramid[row][col] + slideDown);\n }\n\n return cache.get(position);\n}\n\npublic static int longestSlideDown(int[][] pyramid) {\n // At the beginning we need to create a cache and share it across the calls.\n TreeMap cache = new TreeMap<>();\n return longestSlideDown(pyramid, cache, new Position(0, 0));\n}\n"})}),"\n",(0,a.jsxs)(e.p,{children:["You have probably noticed that ",(0,a.jsx)(e.code,{children:"record Position"})," have appeared. Since we are\ncaching the already computed values, we need a \u201creasonable\u201d key. In this case we\nshare the cache only for one ",(0,a.jsx)(e.em,{children:"run"})," (i.e. pyramid) of the ",(0,a.jsx)(e.code,{children:"longestSlideDown"}),", so\nwe can cache just with the indices within the pyramid, i.e. the ",(0,a.jsx)(e.code,{children:"Position"}),"."]}),"\n",(0,a.jsx)(e.admonition,{title:"Record",type:"tip",children:(0,a.jsxs)(e.p,{children:[(0,a.jsx)(e.em,{children:"Record"})," is relatively new addition to the Java language. It is basically an\nimmutable structure with implicitly defined ",(0,a.jsx)(e.code,{children:".equals()"}),", ",(0,a.jsx)(e.code,{children:".hashCode()"}),",\n",(0,a.jsx)(e.code,{children:".toString()"})," and getters for the attributes."]})}),"\n",(0,a.jsxs)(e.p,{children:["Because of the choice of ",(0,a.jsx)(e.code,{children:"TreeMap"}),", we had to additionally define the ordering\non it."]}),"\n",(0,a.jsxs)(e.p,{children:["In the ",(0,a.jsx)(e.code,{children:"longestSlideDown"})," you can notice that the computation which used to be\nat the end of the na\xefve version above, is now wrapped in an ",(0,a.jsx)(e.code,{children:"if"})," statement that\nchecks for the presence of the position in the cache and computes the slide down\njust when it's needed."]}),"\n",(0,a.jsx)(e.h3,{id:"time-complexity-2",children:"Time complexity"}),"\n",(0,a.jsx)(e.p,{children:"If you think that evaluating time complexity for this approach is a bit more\ntricky, you are right. Keeping the cache in mind, it is not the easiest thing\nto do. However there are some observations that might help us figure this out:"}),"\n",(0,a.jsxs)(e.ol,{children:["\n",(0,a.jsx)(e.li,{children:"Slide down from each position is calculated only once."}),"\n",(0,a.jsx)(e.li,{children:"Once calculated, we use the result from the cache."}),"\n"]}),"\n",(0,a.jsxs)(e.p,{children:["Knowing this, we still cannot, at least easily, describe the time complexity of\nfinding the best slide down from a specific position, ",(0,a.jsx)(e.strong,{children:"but"})," we can bound it\nfrom above for the ",(0,a.jsx)(e.strong,{children:"whole"})," run from the top. Now the question is how we can do\nthat!"]}),"\n",(0,a.jsxs)(e.p,{children:["Overall we are doing the same things for almost",(0,a.jsx)(e.sup,{children:(0,a.jsx)(e.a,{href:"#user-content-fn-2",id:"user-content-fnref-2","data-footnote-ref":!0,"aria-describedby":"footnote-label",children:"2"})})," all of the positions within\nthe pyramid:"]}),"\n",(0,a.jsxs)(e.ol,{children:["\n",(0,a.jsxs)(e.li,{children:["\n",(0,a.jsx)(e.p,{children:"We calculate and store it (using the partial results stored in cache). This\nis done only once."}),"\n",(0,a.jsxs)(e.p,{children:["For each calculation we take 2 values from the cache and insert one value.\nBecause we have chosen ",(0,a.jsx)(e.code,{children:"TreeMap"}),", these 3 operations have logarithmic time\ncomplexity and therefore this step is equivalent to ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mn,{children:"3"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsxs)(e.msub,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"})]}),(0,a.jsx)(e.mn,{children:"2"})]}),(0,a.jsx)(e.mi,{children:"n"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"3 \\cdot \\log_2{n}"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.6444em"}}),(0,a.jsx)(e.span,{className:"mord",children:"3"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.9386em",verticalAlign:"-0.2441em"}}),(0,a.jsxs)(e.span,{className:"mop",children:[(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"msupsub",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.207em"},children:(0,a.jsxs)(e.span,{style:{top:"-2.4559em",marginRight:"0.05em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"2.7em"}}),(0,a.jsx)(e.span,{className:"sizing reset-size6 size3 mtight",children:(0,a.jsx)(e.span,{className:"mord mtight",children:"2"})})]})}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.2441em"},children:(0,a.jsx)(e.span,{})})})]})})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})]})]}),"."]}),"\n",(0,a.jsx)(e.p,{children:"However for the sake of simplicity, we are going to account only for the\ninsertion, the reason is rather simple, if we include the 2 retrievals here,\nit will be interleaved with the next step, therefore it is easier to keep the\nretrievals in the following point."}),"\n",(0,a.jsx)(e.admonition,{type:"caution",children:(0,a.jsx)(e.p,{children:"You might have noticed it's still not that easy, cause we're not having full\ncache right from the beginning, but the sum of those logarithms cannot be\nexpressed in a nice way, so taking the upper bound, i.e. expecting the cache\nto be full at all times, is the best option for nice and readable complexity\nof the whole approach."})}),"\n",(0,a.jsxs)(e.p,{children:["Our final upper bound of this work is therefore ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsxs)(e.msub,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"})]}),(0,a.jsx)(e.mn,{children:"2"})]}),(0,a.jsx)(e.mi,{children:"n"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\log_2{n}"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.9386em",verticalAlign:"-0.2441em"}}),(0,a.jsxs)(e.span,{className:"mop",children:[(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"msupsub",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.207em"},children:(0,a.jsxs)(e.span,{style:{top:"-2.4559em",marginRight:"0.05em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"2.7em"}}),(0,a.jsx)(e.span,{className:"sizing reset-size6 size3 mtight",children:(0,a.jsx)(e.span,{className:"mord mtight",children:"2"})})]})}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.2441em"},children:(0,a.jsx)(e.span,{})})})]})})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})})]}),"."]}),"\n"]}),"\n",(0,a.jsxs)(e.li,{children:["\n",(0,a.jsxs)(e.p,{children:["We retrieve it from the cache. Same as in first point, but only twice, so we\nget ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsxs)(e.msub,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"})]}),(0,a.jsx)(e.mn,{children:"2"})]}),(0,a.jsx)(e.mi,{children:"n"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"2 \\cdot \\log_2{n}"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.6444em"}}),(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.9386em",verticalAlign:"-0.2441em"}}),(0,a.jsxs)(e.span,{className:"mop",children:[(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"msupsub",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.207em"},children:(0,a.jsxs)(e.span,{style:{top:"-2.4559em",marginRight:"0.05em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"2.7em"}}),(0,a.jsx)(e.span,{className:"sizing reset-size6 size3 mtight",children:(0,a.jsx)(e.span,{className:"mord mtight",children:"2"})})]})}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.2441em"},children:(0,a.jsx)(e.span,{})})})]})})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})]})]}),"."]}),"\n",(0,a.jsx)(e.admonition,{type:"caution",children:(0,a.jsxs)(e.p,{children:["It's done twice because of the ",(0,a.jsx)(e.code,{children:".containsKey()"})," in the ",(0,a.jsx)(e.code,{children:"if"})," condition."]})}),"\n"]}),"\n"]}),"\n",(0,a.jsx)(e.p,{children:"Okay, we have evaluated work done for each of the cells in the pyramid and now\nwe need to put it together."}),"\n",(0,a.jsx)(e.p,{children:"Let's split the time complexity of our solution into two operands:"}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mo,{children:"+"}),(0,a.jsx)(e.mi,{children:"s"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(r + s)"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02778em"},children:"r"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"+"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})]})]})}),"\n",(0,a.jsxs)(e.p,{children:[(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsx)(e.mrow,{children:(0,a.jsx)(e.mi,{children:"r"})}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"r"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.4306em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02778em"},children:"r"})]})})]})," will represent the ",(0,a.jsx)(e.em,{children:"actual"})," calculation of the cells and ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsx)(e.mrow,{children:(0,a.jsx)(e.mi,{children:"s"})}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"s"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.4306em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"})]})})]})," will represent\nthe additional retrievals on top of the calculation."]}),"\n",(0,a.jsxs)(e.p,{children:["We calculate the values only ",(0,a.jsx)(e.strong,{children:"once"}),", therefore we can safely agree on:"]}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsx)(e.mtable,{rowspacing:"0.25em",columnalign:"right left",columnspacing:"0em",children:(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsx)(e.mi,{children:"r"})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mrow,{}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"})]})})})]})}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\begin{align*}\nr &= n \\cdot \\log{n} \\\\\n\\end{align*}"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1.5em",verticalAlign:"-0.5em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsxs)(e.span,{className:"mtable",children:[(0,a.jsx)(e.span,{className:"col-align-r",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsx)(e.span,{className:"vlist",style:{height:"1em"},children:(0,a.jsxs)(e.span,{style:{top:"-3.16em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02778em"},children:"r"})})]})}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.5em"},children:(0,a.jsx)(e.span,{})})})]})}),(0,a.jsx)(e.span,{className:"col-align-l",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsx)(e.span,{className:"vlist",style:{height:"1em"},children:(0,a.jsxs)(e.span,{style:{top:"-3.16em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})]})}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.5em"},children:(0,a.jsx)(e.span,{})})})]})})]})})]})})]})}),"\n",(0,a.jsxs)(e.p,{children:["What about the ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsx)(e.mrow,{children:(0,a.jsx)(e.mi,{children:"s"})}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"s"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.4306em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"})]})})]})," though? Key observation here is the fact that we have 2\nlookups on the tree in each of them ",(0,a.jsx)(e.strong,{children:"and"})," we do it twice, cause each cell has\nat most 2 parents:"]}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mtable,{rowspacing:"0.25em",columnalign:"right left",columnspacing:"0em",children:[(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsx)(e.mi,{children:"s"})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mrow,{}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mo,{fence:"true",children:"("}),(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{fence:"true",children:")"})]})]})})})]}),(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsx)(e.mi,{children:"s"})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mrow,{}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mn,{children:"4"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"})]})})})]})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\begin{align*}\ns &= n \\cdot 2 \\cdot \\left( 2 \\cdot \\log{n} \\right) \\\\\ns &= 4 \\cdot n \\cdot \\log{n}\n\\end{align*}"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"3em",verticalAlign:"-1.25em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsxs)(e.span,{className:"mtable",children:[(0,a.jsx)(e.span,{className:"col-align-r",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.75em"},children:[(0,a.jsxs)(e.span,{style:{top:"-3.91em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"})})]}),(0,a.jsxs)(e.span,{style:{top:"-2.41em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"})})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.25em"},children:(0,a.jsx)(e.span,{})})})]})}),(0,a.jsx)(e.span,{className:"col-align-l",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.75em"},children:[(0,a.jsxs)(e.span,{style:{top:"-3.91em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsxs)(e.span,{className:"minner",children:[(0,a.jsx)(e.span,{className:"mopen delimcenter",style:{top:"0em"},children:"("}),(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})}),(0,a.jsx)(e.span,{className:"mclose delimcenter",style:{top:"0em"},children:")"})]})]})]}),(0,a.jsxs)(e.span,{style:{top:"-2.41em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mord",children:"4"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.25em"},children:(0,a.jsx)(e.span,{})})})]})})]})})]})})]})}),"\n",(0,a.jsxs)(e.admonition,{type:"tip",children:[(0,a.jsxs)(e.p,{children:["You might've noticed that lookups actually take more time than the construction\nof the results. This is not entirely true, since we have included the\n",(0,a.jsx)(e.code,{children:".containsKey()"})," and ",(0,a.jsx)(e.code,{children:".get()"})," from the ",(0,a.jsx)(e.code,{children:"return"})," statement in the second part."]}),(0,a.jsx)(e.p,{children:"If we were to represent this more precisely, we could've gone with:"}),(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mtable,{rowspacing:"0.25em",columnalign:"right left",columnspacing:"0em",children:[(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsx)(e.mi,{children:"r"})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mrow,{}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mn,{children:"3"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"})]})})})]}),(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsx)(e.mi,{children:"s"})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mrow,{}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"})]})})})]})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\begin{align*}\nr &= 3 \\cdot n \\cdot \\log{n} \\\\\ns &= 2 \\cdot n \\cdot \\log{n}\n\\end{align*}"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"3em",verticalAlign:"-1.25em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsxs)(e.span,{className:"mtable",children:[(0,a.jsx)(e.span,{className:"col-align-r",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.75em"},children:[(0,a.jsxs)(e.span,{style:{top:"-3.91em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02778em"},children:"r"})})]}),(0,a.jsxs)(e.span,{style:{top:"-2.41em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"})})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.25em"},children:(0,a.jsx)(e.span,{})})})]})}),(0,a.jsx)(e.span,{className:"col-align-l",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.75em"},children:[(0,a.jsxs)(e.span,{style:{top:"-3.91em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mord",children:"3"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})]}),(0,a.jsxs)(e.span,{style:{top:"-2.41em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.25em"},children:(0,a.jsx)(e.span,{})})})]})})]})})]})})]})}),(0,a.jsx)(e.p,{children:"On the other hand we are summing both numbers together, therefore in the end it\ndoesn't really matter."}),(0,a.jsxs)(e.p,{children:["(",(0,a.jsx)(e.em,{children:"Feel free to compare the sums of both \u201csplits\u201d."}),")"]})]}),"\n",(0,a.jsxs)(e.p,{children:["And so our final time complexity for the whole ",(0,a.jsx)(e.em,{children:"top-down dynamic programming"}),"\napproach is:"]}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mo,{children:"+"}),(0,a.jsx)(e.mi,{children:"s"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"}),(0,a.jsx)(e.mspace,{linebreak:"newline"}),(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"+"}),(0,a.jsx)(e.mn,{children:"4"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"}),(0,a.jsx)(e.mspace,{linebreak:"newline"}),(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mn,{children:"5"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"}),(0,a.jsx)(e.mspace,{linebreak:"newline"}),(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(r + s) \\\\\n\\mathcal{O}(n \\cdot \\log{n} + 4 \\cdot n \\cdot \\log{n}) \\\\\n\\mathcal{O}(5 \\cdot n \\cdot \\log{n}) \\\\\n\\mathcal{O}(n \\cdot \\log{n})"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02778em"},children:"r"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"+"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]}),(0,a.jsx)(e.span,{className:"mspace newline"}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.8889em",verticalAlign:"-0.1944em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"+"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.6444em"}}),(0,a.jsx)(e.span,{className:"mord",children:"4"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.4445em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]}),(0,a.jsx)(e.span,{className:"mspace newline"}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord",children:"5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.4445em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]}),(0,a.jsx)(e.span,{className:"mspace newline"}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})]})]})}),"\n",(0,a.jsxs)(e.p,{children:["As you can see, this is worse than our ",(0,a.jsx)(e.em,{children:"greedy"})," solution that was incorrect, but\nit's better than the ",(0,a.jsx)(e.em,{children:"na\xefve"})," one."]}),"\n",(0,a.jsx)(e.h3,{id:"memory-complexity",children:"Memory complexity"}),"\n",(0,a.jsxs)(e.p,{children:["With this approach we need to talk about the memory complexity too, because we\nhave introduced cache. If you think that the memory complexity is linear to the\ninput, you are right. We start at the top and try to find each and every slide\ndown. At the end we get the final result for ",(0,a.jsx)(e.code,{children:"new Position(0, 0)"}),", so we need to\ncompute everything below."]}),"\n",(0,a.jsx)(e.p,{children:"That's how we obtain:"}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(n)"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})})]})}),"\n",(0,a.jsxs)(e.p,{children:[(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsx)(e.mrow,{children:(0,a.jsx)(e.mi,{children:"n"})}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"n"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.4306em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})]})})]})," represents the total amount of cells in the pyramid, i.e."]}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsxs)(e.munderover,{children:[(0,a.jsx)(e.mo,{children:"\u2211"}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"y"}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mn,{children:"0"})]}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"p"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"y"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"r"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"a"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"m"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"i"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"d"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"."}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"l"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"e"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"n"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"g"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"t"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"h"})]}),(0,a.jsx)(e.mo,{children:"\u2212"}),(0,a.jsx)(e.mn,{children:"1"})]})]}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"p"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"y"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"r"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"a"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"m"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"i"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"d"})]}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mo,{fence:"true",children:"["}),(0,a.jsx)(e.mi,{children:"y"}),(0,a.jsx)(e.mo,{fence:"true",children:"]"})]}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"."}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"l"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"e"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"n"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"g"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"t"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"h"})]})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\sum_{y=0}^{\\mathtt{pyramid.length} - 1} \\mathtt{pyramid}\\left[y\\right]\\mathtt{.length}"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"3.2709em",verticalAlign:"-1.4032em"}}),(0,a.jsx)(e.span,{className:"mop op-limits",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.8677em"},children:[(0,a.jsxs)(e.span,{style:{top:"-1.8829em",marginLeft:"0em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.05em"}}),(0,a.jsx)(e.span,{className:"sizing reset-size6 size3 mtight",children:(0,a.jsxs)(e.span,{className:"mord mtight",children:[(0,a.jsx)(e.span,{className:"mord mathnormal mtight",style:{marginRight:"0.03588em"},children:"y"}),(0,a.jsx)(e.span,{className:"mrel mtight",children:"="}),(0,a.jsx)(e.span,{className:"mord mtight",children:"0"})]})})]}),(0,a.jsxs)(e.span,{style:{top:"-3.05em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.05em"}}),(0,a.jsx)(e.span,{children:(0,a.jsx)(e.span,{className:"mop op-symbol large-op",children:"\u2211"})})]}),(0,a.jsxs)(e.span,{style:{top:"-4.3666em",marginLeft:"0em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.05em"}}),(0,a.jsx)(e.span,{className:"sizing reset-size6 size3 mtight",children:(0,a.jsxs)(e.span,{className:"mord mtight",children:[(0,a.jsx)(e.span,{className:"mord mtight",children:(0,a.jsx)(e.span,{className:"mord mathtt mtight",children:"pyramid.length"})}),(0,a.jsx)(e.span,{className:"mbin mtight",children:"\u2212"}),(0,a.jsx)(e.span,{className:"mord mtight",children:"1"})]})})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.4032em"},children:(0,a.jsx)(e.span,{})})})]})}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathtt",children:"pyramid"})}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsxs)(e.span,{className:"minner",children:[(0,a.jsx)(e.span,{className:"mopen delimcenter",style:{top:"0em"},children:"["}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.03588em"},children:"y"}),(0,a.jsx)(e.span,{className:"mclose delimcenter",style:{top:"0em"},children:"]"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathtt",children:".length"})})]})})]})}),"\n",(0,a.jsx)(e.admonition,{type:"caution",children:(0,a.jsxs)(e.p,{children:["If you're wondering whether it's correct because of the second ",(0,a.jsx)(e.code,{children:"if"})," in our\nfunction, your guess is right. However we are expressing the complexity in the\nBachmann-Landau notation, so we care about the ",(0,a.jsx)(e.strong,{children:"upper bound"}),", not the exact\nnumber."]})}),"\n",(0,a.jsxs)(e.admonition,{title:"Can this be optimized?",type:"tip",children:[(0,a.jsx)(e.p,{children:"Yes, it can! Try to think about a way, how can you minimize the memory\ncomplexity of this approach. I'll give you a hint:"}),(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mi,{children:"o"}),(0,a.jsx)(e.mi,{children:"w"}),(0,a.jsx)(e.mi,{children:"s"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(rows)"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"ro"}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02691em"},children:"w"}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})})]})})]}),"\n",(0,a.jsx)(e.h2,{id:"bottom-up-dp",children:"Bottom-up DP"}),"\n",(0,a.jsxs)(e.p,{children:["If you try to think in depth about the top-down DP solution, you might notice\nthat the ",(0,a.jsx)(e.em,{children:"core"})," of it stands on caching the calculations that have been already\ndone on the lower \u201clevels\u201d of the pyramid. Our bottom-up implementation will be\nusing this fact!"]}),"\n",(0,a.jsxs)(e.admonition,{type:"tip",children:[(0,a.jsxs)(e.p,{children:["As I have said in the ",(0,a.jsx)(e.em,{children:"top-down DP"})," section, it is the easiest way to implement\nDP (unless the cached function has complicated parameters, in that case it might\nget messy)."]}),(0,a.jsx)(e.p,{children:"Bottom-up dynamic programming can be more effective, but may be more complicated\nto implement right from the beginning."})]}),"\n",(0,a.jsx)(e.p,{children:"Let's see how we can implement it:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"public static int longestSlideDown(int[][] pyramid) {\n // In the beginning we declare new array. At this point it is easier to just\n // work with the one dimension, i.e. just allocating the space for the rows.\n int[][] slideDowns = new int[pyramid.length][];\n\n // Bottom row gets just copied, there's nothing else to do\u2026 It's the base\n // case.\n slideDowns[pyramid.length - 1] = Arrays.copyOf(pyramid[pyramid.length - 1],\n pyramid[pyramid.length - 1].length);\n\n // Then we need to propagate the found slide downs for each of the levels\n // above.\n for (int y = pyramid.length - 2; y >= 0; --y) {\n // We start by copying the values lying in the row we're processing.\n // They get included in the final sum and we need to allocate the space\n // for the precalculated slide downs anyways.\n int[] row = Arrays.copyOf(pyramid[y], pyramid[y].length);\n\n // At this we just need to \u201cfetch\u201d the partial results from \u201cneighbours\u201d\n for (int x = 0; x < row.length; ++x) {\n // We look under our position, since we expect the rows to get\n // shorter, we can safely assume such position exists.\n int under = slideDowns[y + 1][x];\n\n // Then we have a look to the right, such position doesn't have to\n // exist, e.g. on the right edge, so we validate the index, and if\n // it doesn't exist, we just assign minimum of the \u2039int\u203a which makes\n // sure that it doesn't get picked in the \u2039Math.max()\u203a call.\n int toRight = x + 1 < slideDowns[y + 1].length\n ? slideDowns[y + 1][x + 1]\n : Integer.MIN_VALUE;\n\n // Finally we add the best choice at this point.\n row[x] += Math.max(under, toRight);\n }\n\n // And save the row we've just calculated partial results for to the\n // \u201ctable\u201d.\n slideDowns[y] = row;\n }\n\n // At the end we can find our seeked slide down at the top cell.\n return slideDowns[0][0];\n}\n"})}),"\n",(0,a.jsx)(e.p,{children:"I've tried to explain the code as much as possible within the comments, since it\nmight be more beneficial to see right next to the \u201coffending\u201d lines."}),"\n",(0,a.jsxs)(e.p,{children:["As you can see, in this approach we go from the other side",(0,a.jsx)(e.sup,{children:(0,a.jsx)(e.a,{href:"#user-content-fn-3",id:"user-content-fnref-3","data-footnote-ref":!0,"aria-describedby":"footnote-label",children:"3"})}),", the bottom of\nthe pyramid and propagate the partial results up."]}),"\n",(0,a.jsxs)(e.admonition,{type:"info",children:[(0,a.jsxs)(e.mdxAdmonitionTitle,{children:["How is this different from the ",(0,a.jsx)(e.em,{children:"greedy"})," solution???"]}),(0,a.jsxs)(e.p,{children:["If you try to compare them, you might find a very noticable difference. The\ngreedy approach is going from the top to the bottom without ",(0,a.jsx)(e.strong,{children:"any"})," knowledge of\nwhat's going on below. On the other hand, bottom-up DP is going from the bottom\n(",(0,a.jsx)(e.em,{children:"DUH\u2026"}),") and ",(0,a.jsx)(e.strong,{children:"propagates"})," the partial results to the top. The propagation is\nwhat makes sure that at the top I don't choose the best ",(0,a.jsx)(e.strong,{children:"local"})," choice, but\nthe best ",(0,a.jsx)(e.strong,{children:"overall"})," result I can achieve."]})]}),"\n",(0,a.jsx)(e.h3,{id:"time-complexity-3",children:"Time complexity"}),"\n",(0,a.jsx)(e.p,{children:"Time complexity of this solution is rather simple. We allocate an array for the\nrows and then for each row, we copy it and adjust the partial results. Doing\nthis we get:"}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mi,{children:"o"}),(0,a.jsx)(e.mi,{children:"w"}),(0,a.jsx)(e.mi,{children:"s"}),(0,a.jsx)(e.mo,{children:"+"}),(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(rows + 2n)"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"ro"}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02691em"},children:"w"}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"+"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})]})]})}),"\n",(0,a.jsx)(e.p,{children:"Of course, this is an upper bound, since we iterate through the bottom row only\nonce."}),"\n",(0,a.jsx)(e.h3,{id:"memory-complexity-1",children:"Memory complexity"}),"\n",(0,a.jsxs)(e.p,{children:["We're allocating an array for the pyramid ",(0,a.jsx)(e.strong,{children:"again"})," for our partial results, so\nwe get:"]}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(n)"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})})]})}),"\n",(0,a.jsxs)(e.admonition,{type:"tip",children:[(0,a.jsx)(e.p,{children:"If we were writing this in C++ or Rust, we could've avoided that, but not\nreally."}),(0,a.jsxs)(e.p,{children:["C++ would allow us to ",(0,a.jsx)(e.strong,{children:"copy"})," the pyramid rightaway into the parameter, so we\nwould be able to directly change it. However it's still a copy, even though we\ndon't need to allocate anything ourselves. It's just implicitly done for us."]}),(0,a.jsxs)(e.p,{children:["Rust is more funny in this case. If the pyramids weren't used after the call of\n",(0,a.jsx)(e.code,{children:"longest_slide_down"}),", it would simply ",(0,a.jsx)(e.strong,{children:"move"})," them into the functions. If they\nwere used afterwards, the compiler would force you to either borrow it, or\n",(0,a.jsx)(e.em,{children:"clone-and-move"})," for the function."]}),(0,a.jsx)(e.hr,{}),(0,a.jsxs)(e.p,{children:["Since we're doing it in Java, we get a reference to the ",(0,a.jsx)(e.em,{children:"original"})," array and we\ncan't do whatever we want with it."]})]}),"\n",(0,a.jsx)(e.h2,{id:"summary",children:"Summary"}),"\n",(0,a.jsxs)(e.p,{children:["And we've finally reached the end. We have seen 4 different \u201csolutions\u201d",(0,a.jsx)(e.sup,{children:(0,a.jsx)(e.a,{href:"#user-content-fn-4",id:"user-content-fnref-4","data-footnote-ref":!0,"aria-describedby":"footnote-label",children:"4"})})," of\nthe same problem using different approaches. Different approaches follow the\norder in which you might come up with them, each approach influences its\nsuccessor and represents the way we can enhance the existing implementation."]}),"\n",(0,a.jsx)(e.hr,{}),"\n",(0,a.jsx)(e.admonition,{title:"source",type:"info",children:(0,a.jsxs)(e.p,{children:["You can find source code referenced in the text\n",(0,a.jsx)(e.a,{href:"pathname:///files/algorithms/recursion/pyramid-slide-down.tar.gz",children:"here"}),"."]})}),"\n",(0,a.jsxs)(e.section,{"data-footnotes":!0,className:"footnotes",children:[(0,a.jsx)(e.h2,{className:"sr-only",id:"footnote-label",children:"Footnotes"}),"\n",(0,a.jsxs)(e.ol,{children:["\n",(0,a.jsxs)(e.li,{id:"user-content-fn-1",children:["\n",(0,a.jsxs)(e.p,{children:["cause why not, right!? ",(0,a.jsx)(e.a,{href:"#user-content-fnref-1","data-footnote-backref":"","aria-label":"Back to reference 1",className:"data-footnote-backref",children:"\u21a9"})]}),"\n"]}),"\n",(0,a.jsxs)(e.li,{id:"user-content-fn-2",children:["\n",(0,a.jsxs)(e.p,{children:["except the bottom row ",(0,a.jsx)(e.a,{href:"#user-content-fnref-2","data-footnote-backref":"","aria-label":"Back to reference 2",className:"data-footnote-backref",children:"\u21a9"})]}),"\n"]}),"\n",(0,a.jsxs)(e.li,{id:"user-content-fn-3",children:["\n",(0,a.jsxs)(e.p,{children:["definitely not an RHCP reference ","\ud83d\ude09 ",(0,a.jsx)(e.a,{href:"#user-content-fnref-3","data-footnote-backref":"","aria-label":"Back to reference 3",className:"data-footnote-backref",children:"\u21a9"})]}),"\n"]}),"\n",(0,a.jsxs)(e.li,{id:"user-content-fn-4",children:["\n",(0,a.jsxs)(e.p,{children:["one was not correct, thus the quotes ",(0,a.jsx)(e.a,{href:"#user-content-fnref-4","data-footnote-backref":"","aria-label":"Back to reference 4",className:"data-footnote-backref",children:"\u21a9"})]}),"\n"]}),"\n"]}),"\n"]})]})}function o(s={}){const{wrapper:e}={...(0,i.a)(),...s.components};return e?(0,a.jsx)(e,{...s,children:(0,a.jsx)(h,{...s})}):h(s)}},1151:(s,e,n)=>{n.d(e,{Z:()=>r,a:()=>t});var a=n(7294);const i={},l=a.createContext(i);function t(s){const e=a.useContext(l);return a.useMemo((function(){return"function"==typeof s?s(e):{...e,...s}}),[e,s])}function r(s){let e;return e=s.disableParentContext?"function"==typeof s.components?s.components(i):s.components||i:t(s.components),a.createElement(l.Provider,{value:e},s.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[2619],{4457:(s,e,n)=>{n.r(e),n.d(e,{assets:()=>c,contentTitle:()=>t,default:()=>o,frontMatter:()=>l,metadata:()=>r,toc:()=>m});var a=n(5893),i=n(1151);const l={id:"pyramid-slide-down",title:"Introduction to dynamic programming",description:"Solving a problem in different ways.\n",tags:["java","recursion","exponential","greedy","dynamic-programming","top-down-dp","bottom-up-dp"],last_updated:{date:new Date("2023-08-17T00:00:00.000Z")}},t=void 0,r={id:"recursion/pyramid-slide-down",title:"Introduction to dynamic programming",description:"Solving a problem in different ways.\n",source:"@site/algorithms/04-recursion/2023-08-17-pyramid-slide-down.md",sourceDirName:"04-recursion",slug:"/recursion/pyramid-slide-down",permalink:"/algorithms/recursion/pyramid-slide-down",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/algorithms/04-recursion/2023-08-17-pyramid-slide-down.md",tags:[{label:"java",permalink:"/algorithms/tags/java"},{label:"recursion",permalink:"/algorithms/tags/recursion"},{label:"exponential",permalink:"/algorithms/tags/exponential"},{label:"greedy",permalink:"/algorithms/tags/greedy"},{label:"dynamic-programming",permalink:"/algorithms/tags/dynamic-programming"},{label:"top-down-dp",permalink:"/algorithms/tags/top-down-dp"},{label:"bottom-up-dp",permalink:"/algorithms/tags/bottom-up-dp"}],version:"current",lastUpdatedAt:1700944879,formattedLastUpdatedAt:"Nov 25, 2023",frontMatter:{id:"pyramid-slide-down",title:"Introduction to dynamic programming",description:"Solving a problem in different ways.\n",tags:["java","recursion","exponential","greedy","dynamic-programming","top-down-dp","bottom-up-dp"],last_updated:{date:"2023-08-17T00:00:00.000Z"}},sidebar:"autogeneratedBar",previous:{title:"Recursion and backtracking with Robot Karel",permalink:"/algorithms/recursion/karel-1"},next:{title:"Red-Black Trees",permalink:"/algorithms/category/red-black-trees"}},c={},m=[{value:"Problem",id:"problem",level:2},{value:"Solving the problem",id:"solving-the-problem",level:2},{value:"Na\xefve solution",id:"na\xefve-solution",level:2},{value:"Time complexity",id:"time-complexity",level:3},{value:"Greedy solution",id:"greedy-solution",level:2},{value:"Time complexity",id:"time-complexity-1",level:3},{value:"Running the tests",id:"running-the-tests",level:3},{value:"Top-down DP",id:"top-down-dp",level:2},{value:"Time complexity",id:"time-complexity-2",level:3},{value:"Memory complexity",id:"memory-complexity",level:3},{value:"Bottom-up DP",id:"bottom-up-dp",level:2},{value:"Time complexity",id:"time-complexity-3",level:3},{value:"Memory complexity",id:"memory-complexity-1",level:3},{value:"Summary",id:"summary",level:2}];function h(s){const e={a:"a",admonition:"admonition",annotation:"annotation",code:"code",em:"em",h2:"h2",h3:"h3",hr:"hr",li:"li",math:"math",mdxAdmonitionTitle:"mdxAdmonitionTitle",mi:"mi",mn:"mn",mo:"mo",mrow:"mrow",mspace:"mspace",mstyle:"mstyle",msub:"msub",msup:"msup",mtable:"mtable",mtd:"mtd",mtext:"mtext",mtr:"mtr",munderover:"munderover",ol:"ol",p:"p",pre:"pre",section:"section",semantics:"semantics",span:"span",strong:"strong",sup:"sup",...(0,i.a)(),...s.components};return(0,a.jsxs)(a.Fragment,{children:[(0,a.jsx)(e.p,{children:"In this post we will try to solve one problem in different ways."}),"\n",(0,a.jsx)(e.h2,{id:"problem",children:"Problem"}),"\n",(0,a.jsxs)(e.p,{children:["The problem we are going to solve is one of ",(0,a.jsx)(e.em,{children:"CodeWars"})," katas and is called\n",(0,a.jsx)(e.a,{href:"https://www.codewars.com/kata/551f23362ff852e2ab000037",children:"Pyramid Slide Down"}),"."]}),"\n",(0,a.jsxs)(e.p,{children:["We are given a 2D array of integers and we are to find the ",(0,a.jsx)(e.em,{children:"slide down"}),".\n",(0,a.jsx)(e.em,{children:"Slide down"})," is a maximum sum of consecutive numbers from the top to the bottom."]}),"\n",(0,a.jsx)(e.p,{children:"Let's have a look at few examples. Consider the following pyramid:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{children:" 3\n 7 4\n 2 4 6\n8 5 9 3\n"})}),"\n",(0,a.jsx)(e.p,{children:"This pyramid has following slide down:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{children:" *3\n *7 4\n 2 *4 6\n8 5 *9 3\n"})}),"\n",(0,a.jsxs)(e.p,{children:["And its value is ",(0,a.jsx)(e.code,{children:"23"}),"."]}),"\n",(0,a.jsxs)(e.p,{children:["We can also have a look at a ",(0,a.jsx)(e.em,{children:"bigger"})," example:"]}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{children:" 75\n 95 64\n 17 47 82\n 18 35 87 10\n 20 4 82 47 65\n 19 1 23 3 34\n 88 2 77 73 7 63 67\n 99 65 4 28 6 16 70 92\n 41 41 26 56 83 40 80 70 33\n 41 48 72 33 47 32 37 16 94 29\n 53 71 44 65 25 43 91 52 97 51 14\n 70 11 33 28 77 73 17 78 39 68 17 57\n 91 71 52 38 17 14 91 43 58 50 27 29 48\n 63 66 4 68 89 53 67 30 73 16 69 87 40 31\n 4 62 98 27 23 9 70 98 73 93 38 53 60 4 23\n"})}),"\n",(0,a.jsxs)(e.p,{children:["Slide down in this case is equal to ",(0,a.jsx)(e.code,{children:"1074"}),"."]}),"\n",(0,a.jsx)(e.h2,{id:"solving-the-problem",children:"Solving the problem"}),"\n",(0,a.jsx)(e.admonition,{type:"caution",children:(0,a.jsxs)(e.p,{children:["I will describe the following ways you can approach this problem and implement\nthem in ",(0,a.jsx)(e.em,{children:"Java"}),(0,a.jsx)(e.sup,{children:(0,a.jsx)(e.a,{href:"#user-content-fn-1",id:"user-content-fnref-1","data-footnote-ref":!0,"aria-describedby":"footnote-label",children:"1"})}),"."]})}),"\n",(0,a.jsxs)(e.p,{children:["For all of the following solutions I will be using basic ",(0,a.jsx)(e.code,{children:"main"})," function that\nwill output ",(0,a.jsx)(e.code,{children:"true"}),"/",(0,a.jsx)(e.code,{children:"false"})," based on the expected output of our algorithm. Any\nother differences will lie only in the solutions of the problem. You can see the\n",(0,a.jsx)(e.code,{children:"main"})," here:"]}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:'public static void main(String[] args) {\n System.out.print("Test #1: ");\n System.out.println(longestSlideDown(new int[][] {\n { 3 },\n { 7, 4 },\n { 2, 4, 6 },\n { 8, 5, 9, 3 }\n }) == 23 ? "passed" : "failed");\n\n System.out.print("Test #2: ");\n System.out.println(longestSlideDown(new int[][] {\n { 75 },\n { 95, 64 },\n { 17, 47, 82 },\n { 18, 35, 87, 10 },\n { 20, 4, 82, 47, 65 },\n { 19, 1, 23, 75, 3, 34 },\n { 88, 2, 77, 73, 7, 63, 67 },\n { 99, 65, 4, 28, 6, 16, 70, 92 },\n { 41, 41, 26, 56, 83, 40, 80, 70, 33 },\n { 41, 48, 72, 33, 47, 32, 37, 16, 94, 29 },\n { 53, 71, 44, 65, 25, 43, 91, 52, 97, 51, 14 },\n { 70, 11, 33, 28, 77, 73, 17, 78, 39, 68, 17, 57 },\n { 91, 71, 52, 38, 17, 14, 91, 43, 58, 50, 27, 29, 48 },\n { 63, 66, 4, 68, 89, 53, 67, 30, 73, 16, 69, 87, 40, 31 },\n { 4, 62, 98, 27, 23, 9, 70, 98, 73, 93, 38, 53, 60, 4, 23 },\n }) == 1074 ? "passed" : "failed");\n}\n'})}),"\n",(0,a.jsx)(e.h2,{id:"na\xefve-solution",children:"Na\xefve solution"}),"\n",(0,a.jsx)(e.p,{children:"Our na\xefve solution consists of trying out all the possible slides and finding\nthe one with maximum sum."}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"public static int longestSlideDown(int[][] pyramid, int row, int col) {\n if (row >= pyramid.length || col < 0 || col >= pyramid[row].length) {\n // BASE: We have gotten out of bounds, there's no reasonable value to\n // return, so we just return the \u2039MIN_VALUE\u203a to ensure that it cannot\n // be maximum.\n return Integer.MIN_VALUE;\n }\n\n if (row == pyramid.length - 1) {\n // BASE: Bottom of the pyramid, we just return the value, there's\n // nowhere to slide anymore.\n return pyramid[row][col];\n }\n\n // Otherwise we account for the current position and return maximum of the\n // available \u201cslides\u201d.\n return pyramid[row][col] + Math.max(\n longestSlideDown(pyramid, row + 1, col),\n longestSlideDown(pyramid, row + 1, col + 1));\n}\n\npublic static int longestSlideDown(int[][] pyramid) {\n // We start the slide in the top cell of the pyramid.\n return longestSlideDown(pyramid, 0, 0);\n}\n"})}),"\n",(0,a.jsx)(e.p,{children:"As you can see, we have 2 overloads:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"int longestSlideDown(int[][] pyramid);\nint longestSlideDown(int[][] pyramid, int row, int col);\n"})}),"\n",(0,a.jsxs)(e.p,{children:["First one is used as a ",(0,a.jsx)(e.em,{children:"public interface"})," to the solution, you just pass in the\npyramid itself. Second one is the recursive \u201calgorithm\u201d that finds the slide\ndown."]}),"\n",(0,a.jsxs)(e.p,{children:["It is a relatively simple solution\u2026 There's nothing to do at the bottom of the\npyramid, so we just return the value in the ",(0,a.jsx)(e.em,{children:"cell"}),". Otherwise we add it and try\nto slide down the available cells below the current row."]}),"\n",(0,a.jsx)(e.h3,{id:"time-complexity",children:"Time complexity"}),"\n",(0,a.jsx)(e.p,{children:"If you get the source code and run it yourself, it runs rather fine\u2026 I hope you\nare wondering about the time complexity of the proposed solution and, since it\nreally is a na\xefve solution, the time complexity is pretty bad. Let's find the\nworst case scenario."}),"\n",(0,a.jsx)(e.p,{children:"Let's start with the first overload:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"public static int longestSlideDown(int[][] pyramid) {\n return longestSlideDown(pyramid, 0, 0);\n}\n"})}),"\n",(0,a.jsxs)(e.p,{children:["There's not much to do here, so we can safely say that the time complexity of\nthis function is bounded by ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"T"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"T(n)"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.13889em"},children:"T"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})})]}),", where ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsx)(e.mrow,{children:(0,a.jsx)(e.mi,{children:"T"})}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"T"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.6833em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.13889em"},children:"T"})]})})]})," is our second overload. This\ndoesn't tell us anything, so let's move on to the second overload where we are\ngoing to define the ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"T"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"T(n)"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.13889em"},children:"T"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})})]})," function."]}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"public static int longestSlideDown(int[][] pyramid, int row, int col) {\n if (row >= pyramid.length || col < 0 || col >= pyramid[row].length) {\n // BASE: We have gotten out of bounds, there's no reasonable value to\n // return, so we just return the \u2039MIN_VALUE\u203a to ensure that it cannot\n // be maximum.\n return Integer.MIN_VALUE;\n }\n\n if (row == pyramid.length - 1) {\n // BASE: Bottom of the pyramid, we just return the value, there's\n // nowhere to slide anymore.\n return pyramid[row][col];\n }\n\n // Otherwise we account for the current position and return maximum of the\n // available \u201cslides\u201d.\n return pyramid[row][col] + Math.max(\n longestSlideDown(pyramid, row + 1, col),\n longestSlideDown(pyramid, row + 1, col + 1));\n}\n"})}),"\n",(0,a.jsxs)(e.p,{children:["Fun fact is that the whole \u201calgorithm\u201d consists of just 2 ",(0,a.jsx)(e.code,{children:"return"})," statements\nand nothing else. Let's dissect them!"]}),"\n",(0,a.jsxs)(e.p,{children:["First ",(0,a.jsx)(e.code,{children:"return"})," statement is the base case, so it has a constant time complexity."]}),"\n",(0,a.jsxs)(e.p,{children:["Second one a bit tricky. We add two numbers together, which we'll consider as\nconstant, but for the right part of the expression we take maximum from the left\nand right paths. OK\u2026 So what happens? We evaluate the ",(0,a.jsx)(e.code,{children:"longestSlideDown"})," while\nchoosing the under and right both. They are separate computations though, so we\nare branching from each call of ",(0,a.jsx)(e.code,{children:"longestSlideDown"}),", unless it's a base case."]}),"\n",(0,a.jsx)(e.p,{children:"What does that mean for us then? We basically get"}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"T"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"y"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mo,{fence:"true",children:"{"}),(0,a.jsxs)(e.mtable,{rowspacing:"0.36em",columnalign:"left left",columnspacing:"1em",children:[(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"false",children:(0,a.jsx)(e.mn,{children:"1"})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"false",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mtext,{children:",\xa0if\xa0"}),(0,a.jsx)(e.mi,{children:"y"}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mi,{children:"o"}),(0,a.jsx)(e.mi,{children:"w"}),(0,a.jsx)(e.mi,{children:"s"})]})})})]}),(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"false",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mn,{children:"1"}),(0,a.jsx)(e.mo,{children:"+"}),(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"T"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"y"}),(0,a.jsx)(e.mo,{children:"+"}),(0,a.jsx)(e.mn,{children:"1"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"false",children:(0,a.jsx)(e.mtext,{children:",\xa0otherwise"})})})]})]})]})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"T(y) =\n\\begin{cases}\n1 & \\text{, if } y = rows \\\\\n1 + 2 \\cdot T(y + 1) & \\text{, otherwise}\n\\end{cases}"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.13889em"},children:"T"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.03588em"},children:"y"}),(0,a.jsx)(e.span,{className:"mclose",children:")"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"3em",verticalAlign:"-1.25em"}}),(0,a.jsxs)(e.span,{className:"minner",children:[(0,a.jsx)(e.span,{className:"mopen delimcenter",style:{top:"0em"},children:(0,a.jsx)(e.span,{className:"delimsizing size4",children:"{"})}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsxs)(e.span,{className:"mtable",children:[(0,a.jsx)(e.span,{className:"col-align-l",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.69em"},children:[(0,a.jsxs)(e.span,{style:{top:"-3.69em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.008em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord",children:"1"})})]}),(0,a.jsxs)(e.span,{style:{top:"-2.25em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.008em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord",children:"1"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"+"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.13889em"},children:"T"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.03588em"},children:"y"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"+"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord",children:"1"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.19em"},children:(0,a.jsx)(e.span,{})})})]})}),(0,a.jsx)(e.span,{className:"arraycolsep",style:{width:"1em"}}),(0,a.jsx)(e.span,{className:"col-align-l",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.69em"},children:[(0,a.jsxs)(e.span,{style:{top:"-3.69em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.008em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord text",children:(0,a.jsx)(e.span,{className:"mord",children:",\xa0if\xa0"})}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.03588em"},children:"y"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"ro"}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02691em"},children:"w"}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"})]})]}),(0,a.jsxs)(e.span,{style:{top:"-2.25em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.008em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord text",children:(0,a.jsx)(e.span,{className:"mord",children:",\xa0otherwise"})})})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.19em"},children:(0,a.jsx)(e.span,{})})})]})})]})}),(0,a.jsx)(e.span,{className:"mclose nulldelimiter"})]})]})]})]})}),"\n",(0,a.jsx)(e.p,{children:"That looks rather easy to compute, isn't it? If you sum it up, you'll get:"}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"T"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mi,{children:"o"}),(0,a.jsx)(e.mi,{children:"w"}),(0,a.jsx)(e.mi,{children:"s"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"}),(0,a.jsx)(e.mo,{children:"\u2208"}),(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsxs)(e.msup,{children:[(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mi,{children:"o"}),(0,a.jsx)(e.mi,{children:"w"}),(0,a.jsx)(e.mi,{children:"s"})]})]}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"T(rows) \\in \\mathcal{O}(2^{rows})"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.13889em"},children:"T"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"ro"}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02691em"},children:"w"}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"}),(0,a.jsx)(e.span,{className:"mclose",children:")"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"\u2208"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"msupsub",children:(0,a.jsx)(e.span,{className:"vlist-t",children:(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.7144em"},children:(0,a.jsxs)(e.span,{style:{top:"-3.113em",marginRight:"0.05em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"2.7em"}}),(0,a.jsx)(e.span,{className:"sizing reset-size6 size3 mtight",children:(0,a.jsxs)(e.span,{className:"mord mtight",children:[(0,a.jsx)(e.span,{className:"mord mathnormal mtight",children:"ro"}),(0,a.jsx)(e.span,{className:"mord mathnormal mtight",style:{marginRight:"0.02691em"},children:"w"}),(0,a.jsx)(e.span,{className:"mord mathnormal mtight",children:"s"})]})})]})})})})})]}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})]})]})}),"\n",(0,a.jsx)(e.p,{children:"If you wonder why, I'll try to describe it intuitively:"}),"\n",(0,a.jsxs)(e.ol,{children:["\n",(0,a.jsxs)(e.li,{children:["In each call to ",(0,a.jsx)(e.code,{children:"longestSlideDown"})," we do some work in constant time,\nregardless of being in the base case. Those are the ",(0,a.jsx)(e.code,{children:"1"}),"s in both cases."]}),"\n",(0,a.jsxs)(e.li,{children:["If we are not in the base case, we move one row down ",(0,a.jsx)(e.strong,{children:"twice"}),". That's how we\nobtained ",(0,a.jsx)(e.code,{children:"2 *"})," and ",(0,a.jsx)(e.code,{children:"y + 1"})," in the ",(0,a.jsx)(e.em,{children:"otherwise"})," case."]}),"\n",(0,a.jsxs)(e.li,{children:["We move row-by-row, so we move down ",(0,a.jsx)(e.code,{children:"y"}),"-times and each call splits to two\nsubtrees."]}),"\n",(0,a.jsxs)(e.li,{children:["Overall, if we were to represent the calls as a tree, we would get a full\nbinary tree of height ",(0,a.jsx)(e.code,{children:"y"}),", in each node we do some work in constant time,\ntherefore we can just sum the ones."]}),"\n"]}),"\n",(0,a.jsx)(e.admonition,{type:"warning",children:(0,a.jsx)(e.p,{children:"It would've been more complicated to get an exact result. In the equation above\nwe are assuming that the width of the pyramid is bound by the height."})}),"\n",(0,a.jsxs)(e.p,{children:["Hopefully we can agree that this is not the best we can do. ","\ud83d\ude09"]}),"\n",(0,a.jsx)(e.h2,{id:"greedy-solution",children:"Greedy solution"}),"\n",(0,a.jsxs)(e.p,{children:["We will try to optimize it a bit. Let's start with a relatively simple ",(0,a.jsx)(e.em,{children:"greedy"}),"\napproach."]}),"\n",(0,a.jsx)(e.admonition,{title:"Greedy algorithms",type:"info",children:(0,a.jsxs)(e.p,{children:[(0,a.jsx)(e.em,{children:"Greedy algorithms"})," can be described as algorithms that decide the action on the\noptimal option at the moment."]})}),"\n",(0,a.jsx)(e.p,{children:"We can try to adjust the na\xefve solution. The most problematic part are the\nrecursive calls. Let's apply the greedy approach there:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"public static int longestSlideDown(int[][] pyramid, int row, int col) {\n if (row == pyramid.length - 1) {\n // BASE: We're at the bottom\n return pyramid[row][col];\n }\n\n if (col + 1 >= pyramid[row + 1].length\n || pyramid[row + 1][col] > pyramid[row + 1][col + 1]) {\n // If we cannot go right or it's not feasible, we continue to the left.\n return pyramid[row][col] + longestSlideDown(pyramid, row + 1, col);\n }\n\n // Otherwise we just move to the right.\n return pyramid[row][col] + longestSlideDown(pyramid, row + 1, col + 1);\n}\n"})}),"\n",(0,a.jsxs)(e.p,{children:["OK, if we cannot go right ",(0,a.jsx)(e.strong,{children:"or"})," the right path adds smaller value to the sum,\nwe simply go left."]}),"\n",(0,a.jsx)(e.h3,{id:"time-complexity-1",children:"Time complexity"}),"\n",(0,a.jsxs)(e.p,{children:["We have switched from ",(0,a.jsx)(e.em,{children:"adding the maximum"})," to ",(0,a.jsx)(e.em,{children:"following the \u201cbigger\u201d path"}),", so\nwe improved the time complexity tremendously. We just go down the pyramid all\nthe way to the bottom. Therefore we are getting:"]}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mi,{children:"o"}),(0,a.jsx)(e.mi,{children:"w"}),(0,a.jsx)(e.mi,{children:"s"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(rows)"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"ro"}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02691em"},children:"w"}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})})]})}),"\n",(0,a.jsx)(e.p,{children:"We have managed to convert our exponential solution into a linear one."}),"\n",(0,a.jsx)(e.h3,{id:"running-the-tests",children:"Running the tests"}),"\n",(0,a.jsx)(e.p,{children:"However, if we run the tests, we notice that the second test failed:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{children:"Test #1: passed\nTest #2: failed\n"})}),"\n",(0,a.jsxs)(e.p,{children:["What's going on? Well, we have improved the time complexity, but greedy\nalgorithms are not the ideal solution to ",(0,a.jsx)(e.strong,{children:"all"})," problems. In this case there\nmay be a solution that is bigger than the one found using the greedy algorithm."]}),"\n",(0,a.jsx)(e.p,{children:"Imagine the following pyramid:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{children:" 1\n 2 3\n 5 6 7\n 8 9 10 11\n99 13 14 15 16\n"})}),"\n",(0,a.jsx)(e.p,{children:"We start at the top:"}),"\n",(0,a.jsxs)(e.ol,{children:["\n",(0,a.jsxs)(e.li,{children:["Current cell: ",(0,a.jsx)(e.code,{children:"1"}),", we can choose from ",(0,a.jsx)(e.code,{children:"2"})," and ",(0,a.jsx)(e.code,{children:"3"}),", ",(0,a.jsx)(e.code,{children:"3"})," looks better, so we\nchoose it."]}),"\n",(0,a.jsxs)(e.li,{children:["Current cell: ",(0,a.jsx)(e.code,{children:"3"}),", we can choose from ",(0,a.jsx)(e.code,{children:"6"})," and ",(0,a.jsx)(e.code,{children:"7"}),", ",(0,a.jsx)(e.code,{children:"7"})," looks better, so we\nchoose it."]}),"\n",(0,a.jsxs)(e.li,{children:["Current cell: ",(0,a.jsx)(e.code,{children:"7"}),", we can choose from ",(0,a.jsx)(e.code,{children:"10"})," and ",(0,a.jsx)(e.code,{children:"11"}),", ",(0,a.jsx)(e.code,{children:"11"})," looks better, so we\nchoose it."]}),"\n",(0,a.jsxs)(e.li,{children:["Current cell: ",(0,a.jsx)(e.code,{children:"11"}),", we can choose from ",(0,a.jsx)(e.code,{children:"15"})," and ",(0,a.jsx)(e.code,{children:"16"}),", ",(0,a.jsx)(e.code,{children:"16"})," looks better, so\nwe choose it."]}),"\n"]}),"\n",(0,a.jsxs)(e.p,{children:["Our final sum is: ",(0,a.jsx)(e.code,{children:"1 + 3 + 7 + 11 + 16 = 38"}),", but in the bottom left cell we\nhave a ",(0,a.jsx)(e.code,{children:"99"})," that is bigger than our whole sum."]}),"\n",(0,a.jsx)(e.admonition,{type:"tip",children:(0,a.jsx)(e.p,{children:"Dijkstra's algorithm is a greedy algorithm too, try to think why it is correct."})}),"\n",(0,a.jsx)(e.h2,{id:"top-down-dp",children:"Top-down DP"}),"\n",(0,a.jsxs)(e.p,{children:[(0,a.jsx)(e.em,{children:"Top-down dynamic programming"})," is probably the most common approach, since (at\nleast looks like) is the easiest to implement. The whole point is avoiding the\nunnecessary computations that we have already done."]}),"\n",(0,a.jsxs)(e.p,{children:["In our case, we can use our na\xefve solution and put a ",(0,a.jsx)(e.em,{children:"cache"})," on top of it that\nwill make sure, we don't do unnecessary calculations."]}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"// This \u201cstructure\u201d is required, since I have decided to use \u2039TreeMap\u203a which\n// requires the ordering on the keys. It represents one position in the pyramid.\nrecord Position(int row, int col) implements Comparable {\n public int compareTo(Position r) {\n if (row != r.row) {\n return Integer.valueOf(row).compareTo(r.row);\n }\n\n if (col != r.col) {\n return Integer.valueOf(col).compareTo(r.col);\n }\n\n return 0;\n }\n}\n\npublic static int longestSlideDown(\n int[][] pyramid,\n TreeMap cache,\n Position position) {\n int row = position.row;\n int col = position.col;\n\n if (row >= pyramid.length || col < 0 || col >= pyramid[row].length) {\n // BASE: out of bounds\n return Integer.MIN_VALUE;\n }\n\n if (row == pyramid.length - 1) {\n // BASE: bottom of the pyramid\n return pyramid[position.row][position.col];\n }\n\n if (!cache.containsKey(position)) {\n // We haven't computed the position yet, so we run the same \u201cformula\u201d as\n // in the na\xefve version \xbband\xab we put calculated slide into the cache.\n // Next time we want the slide down from given position, it will be just\n // retrieved from the cache.\n int slideDown = Math.max(\n longestSlideDown(pyramid, cache, new Position(row + 1, col)),\n longestSlideDown(pyramid, cache, new Position(row + 1, col + 1)));\n cache.put(position, pyramid[row][col] + slideDown);\n }\n\n return cache.get(position);\n}\n\npublic static int longestSlideDown(int[][] pyramid) {\n // At the beginning we need to create a cache and share it across the calls.\n TreeMap cache = new TreeMap<>();\n return longestSlideDown(pyramid, cache, new Position(0, 0));\n}\n"})}),"\n",(0,a.jsxs)(e.p,{children:["You have probably noticed that ",(0,a.jsx)(e.code,{children:"record Position"})," have appeared. Since we are\ncaching the already computed values, we need a \u201creasonable\u201d key. In this case we\nshare the cache only for one ",(0,a.jsx)(e.em,{children:"run"})," (i.e. pyramid) of the ",(0,a.jsx)(e.code,{children:"longestSlideDown"}),", so\nwe can cache just with the indices within the pyramid, i.e. the ",(0,a.jsx)(e.code,{children:"Position"}),"."]}),"\n",(0,a.jsx)(e.admonition,{title:"Record",type:"tip",children:(0,a.jsxs)(e.p,{children:[(0,a.jsx)(e.em,{children:"Record"})," is relatively new addition to the Java language. It is basically an\nimmutable structure with implicitly defined ",(0,a.jsx)(e.code,{children:".equals()"}),", ",(0,a.jsx)(e.code,{children:".hashCode()"}),",\n",(0,a.jsx)(e.code,{children:".toString()"})," and getters for the attributes."]})}),"\n",(0,a.jsxs)(e.p,{children:["Because of the choice of ",(0,a.jsx)(e.code,{children:"TreeMap"}),", we had to additionally define the ordering\non it."]}),"\n",(0,a.jsxs)(e.p,{children:["In the ",(0,a.jsx)(e.code,{children:"longestSlideDown"})," you can notice that the computation which used to be\nat the end of the na\xefve version above, is now wrapped in an ",(0,a.jsx)(e.code,{children:"if"})," statement that\nchecks for the presence of the position in the cache and computes the slide down\njust when it's needed."]}),"\n",(0,a.jsx)(e.h3,{id:"time-complexity-2",children:"Time complexity"}),"\n",(0,a.jsx)(e.p,{children:"If you think that evaluating time complexity for this approach is a bit more\ntricky, you are right. Keeping the cache in mind, it is not the easiest thing\nto do. However there are some observations that might help us figure this out:"}),"\n",(0,a.jsxs)(e.ol,{children:["\n",(0,a.jsx)(e.li,{children:"Slide down from each position is calculated only once."}),"\n",(0,a.jsx)(e.li,{children:"Once calculated, we use the result from the cache."}),"\n"]}),"\n",(0,a.jsxs)(e.p,{children:["Knowing this, we still cannot, at least easily, describe the time complexity of\nfinding the best slide down from a specific position, ",(0,a.jsx)(e.strong,{children:"but"})," we can bound it\nfrom above for the ",(0,a.jsx)(e.strong,{children:"whole"})," run from the top. Now the question is how we can do\nthat!"]}),"\n",(0,a.jsxs)(e.p,{children:["Overall we are doing the same things for almost",(0,a.jsx)(e.sup,{children:(0,a.jsx)(e.a,{href:"#user-content-fn-2",id:"user-content-fnref-2","data-footnote-ref":!0,"aria-describedby":"footnote-label",children:"2"})})," all of the positions within\nthe pyramid:"]}),"\n",(0,a.jsxs)(e.ol,{children:["\n",(0,a.jsxs)(e.li,{children:["\n",(0,a.jsx)(e.p,{children:"We calculate and store it (using the partial results stored in cache). This\nis done only once."}),"\n",(0,a.jsxs)(e.p,{children:["For each calculation we take 2 values from the cache and insert one value.\nBecause we have chosen ",(0,a.jsx)(e.code,{children:"TreeMap"}),", these 3 operations have logarithmic time\ncomplexity and therefore this step is equivalent to ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mn,{children:"3"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsxs)(e.msub,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"})]}),(0,a.jsx)(e.mn,{children:"2"})]}),(0,a.jsx)(e.mi,{children:"n"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"3 \\cdot \\log_2{n}"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.6444em"}}),(0,a.jsx)(e.span,{className:"mord",children:"3"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.9386em",verticalAlign:"-0.2441em"}}),(0,a.jsxs)(e.span,{className:"mop",children:[(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"msupsub",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.207em"},children:(0,a.jsxs)(e.span,{style:{top:"-2.4559em",marginRight:"0.05em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"2.7em"}}),(0,a.jsx)(e.span,{className:"sizing reset-size6 size3 mtight",children:(0,a.jsx)(e.span,{className:"mord mtight",children:"2"})})]})}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.2441em"},children:(0,a.jsx)(e.span,{})})})]})})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})]})]}),"."]}),"\n",(0,a.jsx)(e.p,{children:"However for the sake of simplicity, we are going to account only for the\ninsertion, the reason is rather simple, if we include the 2 retrievals here,\nit will be interleaved with the next step, therefore it is easier to keep the\nretrievals in the following point."}),"\n",(0,a.jsx)(e.admonition,{type:"caution",children:(0,a.jsx)(e.p,{children:"You might have noticed it's still not that easy, cause we're not having full\ncache right from the beginning, but the sum of those logarithms cannot be\nexpressed in a nice way, so taking the upper bound, i.e. expecting the cache\nto be full at all times, is the best option for nice and readable complexity\nof the whole approach."})}),"\n",(0,a.jsxs)(e.p,{children:["Our final upper bound of this work is therefore ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsxs)(e.msub,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"})]}),(0,a.jsx)(e.mn,{children:"2"})]}),(0,a.jsx)(e.mi,{children:"n"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\log_2{n}"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.9386em",verticalAlign:"-0.2441em"}}),(0,a.jsxs)(e.span,{className:"mop",children:[(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"msupsub",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.207em"},children:(0,a.jsxs)(e.span,{style:{top:"-2.4559em",marginRight:"0.05em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"2.7em"}}),(0,a.jsx)(e.span,{className:"sizing reset-size6 size3 mtight",children:(0,a.jsx)(e.span,{className:"mord mtight",children:"2"})})]})}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.2441em"},children:(0,a.jsx)(e.span,{})})})]})})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})})]}),"."]}),"\n"]}),"\n",(0,a.jsxs)(e.li,{children:["\n",(0,a.jsxs)(e.p,{children:["We retrieve it from the cache. Same as in first point, but only twice, so we\nget ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsxs)(e.msub,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"})]}),(0,a.jsx)(e.mn,{children:"2"})]}),(0,a.jsx)(e.mi,{children:"n"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"2 \\cdot \\log_2{n}"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.6444em"}}),(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.9386em",verticalAlign:"-0.2441em"}}),(0,a.jsxs)(e.span,{className:"mop",children:[(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"msupsub",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.207em"},children:(0,a.jsxs)(e.span,{style:{top:"-2.4559em",marginRight:"0.05em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"2.7em"}}),(0,a.jsx)(e.span,{className:"sizing reset-size6 size3 mtight",children:(0,a.jsx)(e.span,{className:"mord mtight",children:"2"})})]})}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.2441em"},children:(0,a.jsx)(e.span,{})})})]})})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})]})]}),"."]}),"\n",(0,a.jsx)(e.admonition,{type:"caution",children:(0,a.jsxs)(e.p,{children:["It's done twice because of the ",(0,a.jsx)(e.code,{children:".containsKey()"})," in the ",(0,a.jsx)(e.code,{children:"if"})," condition."]})}),"\n"]}),"\n"]}),"\n",(0,a.jsx)(e.p,{children:"Okay, we have evaluated work done for each of the cells in the pyramid and now\nwe need to put it together."}),"\n",(0,a.jsx)(e.p,{children:"Let's split the time complexity of our solution into two operands:"}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mo,{children:"+"}),(0,a.jsx)(e.mi,{children:"s"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(r + s)"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02778em"},children:"r"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"+"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})]})]})}),"\n",(0,a.jsxs)(e.p,{children:[(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsx)(e.mrow,{children:(0,a.jsx)(e.mi,{children:"r"})}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"r"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.4306em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02778em"},children:"r"})]})})]})," will represent the ",(0,a.jsx)(e.em,{children:"actual"})," calculation of the cells and ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsx)(e.mrow,{children:(0,a.jsx)(e.mi,{children:"s"})}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"s"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.4306em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"})]})})]})," will represent\nthe additional retrievals on top of the calculation."]}),"\n",(0,a.jsxs)(e.p,{children:["We calculate the values only ",(0,a.jsx)(e.strong,{children:"once"}),", therefore we can safely agree on:"]}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsx)(e.mtable,{rowspacing:"0.25em",columnalign:"right left",columnspacing:"0em",children:(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsx)(e.mi,{children:"r"})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mrow,{}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"})]})})})]})}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\begin{align*}\nr &= n \\cdot \\log{n} \\\\\n\\end{align*}"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1.5em",verticalAlign:"-0.5em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsxs)(e.span,{className:"mtable",children:[(0,a.jsx)(e.span,{className:"col-align-r",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsx)(e.span,{className:"vlist",style:{height:"1em"},children:(0,a.jsxs)(e.span,{style:{top:"-3.16em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02778em"},children:"r"})})]})}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.5em"},children:(0,a.jsx)(e.span,{})})})]})}),(0,a.jsx)(e.span,{className:"col-align-l",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsx)(e.span,{className:"vlist",style:{height:"1em"},children:(0,a.jsxs)(e.span,{style:{top:"-3.16em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})]})}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"0.5em"},children:(0,a.jsx)(e.span,{})})})]})})]})})]})})]})}),"\n",(0,a.jsxs)(e.p,{children:["What about the ",(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsx)(e.mrow,{children:(0,a.jsx)(e.mi,{children:"s"})}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"s"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.4306em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"})]})})]})," though? Key observation here is the fact that we have 2\nlookups on the tree in each of them ",(0,a.jsx)(e.strong,{children:"and"})," we do it twice, cause each cell has\nat most 2 parents:"]}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mtable,{rowspacing:"0.25em",columnalign:"right left",columnspacing:"0em",children:[(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsx)(e.mi,{children:"s"})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mrow,{}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mo,{fence:"true",children:"("}),(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{fence:"true",children:")"})]})]})})})]}),(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsx)(e.mi,{children:"s"})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mrow,{}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mn,{children:"4"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"})]})})})]})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\begin{align*}\ns &= n \\cdot 2 \\cdot \\left( 2 \\cdot \\log{n} \\right) \\\\\ns &= 4 \\cdot n \\cdot \\log{n}\n\\end{align*}"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"3em",verticalAlign:"-1.25em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsxs)(e.span,{className:"mtable",children:[(0,a.jsx)(e.span,{className:"col-align-r",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.75em"},children:[(0,a.jsxs)(e.span,{style:{top:"-3.91em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"})})]}),(0,a.jsxs)(e.span,{style:{top:"-2.41em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"})})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.25em"},children:(0,a.jsx)(e.span,{})})})]})}),(0,a.jsx)(e.span,{className:"col-align-l",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.75em"},children:[(0,a.jsxs)(e.span,{style:{top:"-3.91em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsxs)(e.span,{className:"minner",children:[(0,a.jsx)(e.span,{className:"mopen delimcenter",style:{top:"0em"},children:"("}),(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})}),(0,a.jsx)(e.span,{className:"mclose delimcenter",style:{top:"0em"},children:")"})]})]})]}),(0,a.jsxs)(e.span,{style:{top:"-2.41em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mord",children:"4"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.25em"},children:(0,a.jsx)(e.span,{})})})]})})]})})]})})]})}),"\n",(0,a.jsxs)(e.admonition,{type:"tip",children:[(0,a.jsxs)(e.p,{children:["You might've noticed that lookups actually take more time than the construction\nof the results. This is not entirely true, since we have included the\n",(0,a.jsx)(e.code,{children:".containsKey()"})," and ",(0,a.jsx)(e.code,{children:".get()"})," from the ",(0,a.jsx)(e.code,{children:"return"})," statement in the second part."]}),(0,a.jsx)(e.p,{children:"If we were to represent this more precisely, we could've gone with:"}),(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mtable,{rowspacing:"0.25em",columnalign:"right left",columnspacing:"0em",children:[(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsx)(e.mi,{children:"r"})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mrow,{}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mn,{children:"3"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"})]})})})]}),(0,a.jsxs)(e.mtr,{children:[(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsx)(e.mi,{children:"s"})})}),(0,a.jsx)(e.mtd,{children:(0,a.jsx)(e.mstyle,{scriptlevel:"0",displaystyle:"true",children:(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mrow,{}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"})]})})})]})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\begin{align*}\nr &= 3 \\cdot n \\cdot \\log{n} \\\\\ns &= 2 \\cdot n \\cdot \\log{n}\n\\end{align*}"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"3em",verticalAlign:"-1.25em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsxs)(e.span,{className:"mtable",children:[(0,a.jsx)(e.span,{className:"col-align-r",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.75em"},children:[(0,a.jsxs)(e.span,{style:{top:"-3.91em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02778em"},children:"r"})})]}),(0,a.jsxs)(e.span,{style:{top:"-2.41em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"})})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.25em"},children:(0,a.jsx)(e.span,{})})})]})}),(0,a.jsx)(e.span,{className:"col-align-l",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.75em"},children:[(0,a.jsxs)(e.span,{style:{top:"-3.91em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mord",children:"3"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})]}),(0,a.jsxs)(e.span,{style:{top:"-2.41em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3em"}}),(0,a.jsxs)(e.span,{className:"mord",children:[(0,a.jsx)(e.span,{className:"mord"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mrel",children:"="}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2778em"}}),(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})})]})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.25em"},children:(0,a.jsx)(e.span,{})})})]})})]})})]})})]})}),(0,a.jsx)(e.p,{children:"On the other hand we are summing both numbers together, therefore in the end it\ndoesn't really matter."}),(0,a.jsxs)(e.p,{children:["(",(0,a.jsx)(e.em,{children:"Feel free to compare the sums of both \u201csplits\u201d."}),")"]})]}),"\n",(0,a.jsxs)(e.p,{children:["And so our final time complexity for the whole ",(0,a.jsx)(e.em,{children:"top-down dynamic programming"}),"\napproach is:"]}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mo,{children:"+"}),(0,a.jsx)(e.mi,{children:"s"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"}),(0,a.jsx)(e.mspace,{linebreak:"newline"}),(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"+"}),(0,a.jsx)(e.mn,{children:"4"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"}),(0,a.jsx)(e.mspace,{linebreak:"newline"}),(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mn,{children:"5"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"}),(0,a.jsx)(e.mspace,{linebreak:"newline"}),(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{children:"\u22c5"}),(0,a.jsx)(e.mi,{children:"log"}),(0,a.jsx)(e.mo,{children:"\u2061"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(r + s) \\\\\n\\mathcal{O}(n \\cdot \\log{n} + 4 \\cdot n \\cdot \\log{n}) \\\\\n\\mathcal{O}(5 \\cdot n \\cdot \\log{n}) \\\\\n\\mathcal{O}(n \\cdot \\log{n})"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02778em"},children:"r"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"+"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]}),(0,a.jsx)(e.span,{className:"mspace newline"}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.8889em",verticalAlign:"-0.1944em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"+"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.6444em"}}),(0,a.jsx)(e.span,{className:"mord",children:"4"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.4445em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]}),(0,a.jsx)(e.span,{className:"mspace newline"}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord",children:"5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.4445em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]}),(0,a.jsx)(e.span,{className:"mspace newline"}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"\u22c5"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsxs)(e.span,{className:"mop",children:["lo",(0,a.jsx)(e.span,{style:{marginRight:"0.01389em"},children:"g"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})]})]})}),"\n",(0,a.jsxs)(e.p,{children:["As you can see, this is worse than our ",(0,a.jsx)(e.em,{children:"greedy"})," solution that was incorrect, but\nit's better than the ",(0,a.jsx)(e.em,{children:"na\xefve"})," one."]}),"\n",(0,a.jsx)(e.h3,{id:"memory-complexity",children:"Memory complexity"}),"\n",(0,a.jsxs)(e.p,{children:["With this approach we need to talk about the memory complexity too, because we\nhave introduced cache. If you think that the memory complexity is linear to the\ninput, you are right. We start at the top and try to find each and every slide\ndown. At the end we get the final result for ",(0,a.jsx)(e.code,{children:"new Position(0, 0)"}),", so we need to\ncompute everything below."]}),"\n",(0,a.jsx)(e.p,{children:"That's how we obtain:"}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(n)"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})})]})}),"\n",(0,a.jsxs)(e.p,{children:[(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsx)(e.mrow,{children:(0,a.jsx)(e.mi,{children:"n"})}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"n"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"0.4306em"}}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"})]})})]})," represents the total amount of cells in the pyramid, i.e."]}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsxs)(e.munderover,{children:[(0,a.jsx)(e.mo,{children:"\u2211"}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{children:"y"}),(0,a.jsx)(e.mo,{children:"="}),(0,a.jsx)(e.mn,{children:"0"})]}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"p"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"y"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"r"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"a"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"m"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"i"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"d"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"."}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"l"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"e"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"n"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"g"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"t"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"h"})]}),(0,a.jsx)(e.mo,{children:"\u2212"}),(0,a.jsx)(e.mn,{children:"1"})]})]}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"p"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"y"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"r"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"a"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"m"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"i"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"d"})]}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mo,{fence:"true",children:"["}),(0,a.jsx)(e.mi,{children:"y"}),(0,a.jsx)(e.mo,{fence:"true",children:"]"})]}),(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"."}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"l"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"e"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"n"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"g"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"t"}),(0,a.jsx)(e.mi,{mathvariant:"monospace",children:"h"})]})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\sum_{y=0}^{\\mathtt{pyramid.length} - 1} \\mathtt{pyramid}\\left[y\\right]\\mathtt{.length}"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"3.2709em",verticalAlign:"-1.4032em"}}),(0,a.jsx)(e.span,{className:"mop op-limits",children:(0,a.jsxs)(e.span,{className:"vlist-t vlist-t2",children:[(0,a.jsxs)(e.span,{className:"vlist-r",children:[(0,a.jsxs)(e.span,{className:"vlist",style:{height:"1.8677em"},children:[(0,a.jsxs)(e.span,{style:{top:"-1.8829em",marginLeft:"0em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.05em"}}),(0,a.jsx)(e.span,{className:"sizing reset-size6 size3 mtight",children:(0,a.jsxs)(e.span,{className:"mord mtight",children:[(0,a.jsx)(e.span,{className:"mord mathnormal mtight",style:{marginRight:"0.03588em"},children:"y"}),(0,a.jsx)(e.span,{className:"mrel mtight",children:"="}),(0,a.jsx)(e.span,{className:"mord mtight",children:"0"})]})})]}),(0,a.jsxs)(e.span,{style:{top:"-3.05em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.05em"}}),(0,a.jsx)(e.span,{children:(0,a.jsx)(e.span,{className:"mop op-symbol large-op",children:"\u2211"})})]}),(0,a.jsxs)(e.span,{style:{top:"-4.3666em",marginLeft:"0em"},children:[(0,a.jsx)(e.span,{className:"pstrut",style:{height:"3.05em"}}),(0,a.jsx)(e.span,{className:"sizing reset-size6 size3 mtight",children:(0,a.jsxs)(e.span,{className:"mord mtight",children:[(0,a.jsx)(e.span,{className:"mord mtight",children:(0,a.jsx)(e.span,{className:"mord mathtt mtight",children:"pyramid.length"})}),(0,a.jsx)(e.span,{className:"mbin mtight",children:"\u2212"}),(0,a.jsx)(e.span,{className:"mord mtight",children:"1"})]})})]})]}),(0,a.jsx)(e.span,{className:"vlist-s",children:"\u200b"})]}),(0,a.jsx)(e.span,{className:"vlist-r",children:(0,a.jsx)(e.span,{className:"vlist",style:{height:"1.4032em"},children:(0,a.jsx)(e.span,{})})})]})}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathtt",children:"pyramid"})}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsxs)(e.span,{className:"minner",children:[(0,a.jsx)(e.span,{className:"mopen delimcenter",style:{top:"0em"},children:"["}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.03588em"},children:"y"}),(0,a.jsx)(e.span,{className:"mclose delimcenter",style:{top:"0em"},children:"]"})]}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.1667em"}}),(0,a.jsx)(e.span,{className:"mord",children:(0,a.jsx)(e.span,{className:"mord mathtt",children:".length"})})]})})]})}),"\n",(0,a.jsx)(e.admonition,{type:"caution",children:(0,a.jsxs)(e.p,{children:["If you're wondering whether it's correct because of the second ",(0,a.jsx)(e.code,{children:"if"})," in our\nfunction, your guess is right. However we are expressing the complexity in the\nBachmann-Landau notation, so we care about the ",(0,a.jsx)(e.strong,{children:"upper bound"}),", not the exact\nnumber."]})}),"\n",(0,a.jsxs)(e.admonition,{title:"Can this be optimized?",type:"tip",children:[(0,a.jsx)(e.p,{children:"Yes, it can! Try to think about a way, how can you minimize the memory\ncomplexity of this approach. I'll give you a hint:"}),(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mi,{children:"o"}),(0,a.jsx)(e.mi,{children:"w"}),(0,a.jsx)(e.mi,{children:"s"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(rows)"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"ro"}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02691em"},children:"w"}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})})]})})]}),"\n",(0,a.jsx)(e.h2,{id:"bottom-up-dp",children:"Bottom-up DP"}),"\n",(0,a.jsxs)(e.p,{children:["If you try to think in depth about the top-down DP solution, you might notice\nthat the ",(0,a.jsx)(e.em,{children:"core"})," of it stands on caching the calculations that have been already\ndone on the lower \u201clevels\u201d of the pyramid. Our bottom-up implementation will be\nusing this fact!"]}),"\n",(0,a.jsxs)(e.admonition,{type:"tip",children:[(0,a.jsxs)(e.p,{children:["As I have said in the ",(0,a.jsx)(e.em,{children:"top-down DP"})," section, it is the easiest way to implement\nDP (unless the cached function has complicated parameters, in that case it might\nget messy)."]}),(0,a.jsx)(e.p,{children:"Bottom-up dynamic programming can be more effective, but may be more complicated\nto implement right from the beginning."})]}),"\n",(0,a.jsx)(e.p,{children:"Let's see how we can implement it:"}),"\n",(0,a.jsx)(e.pre,{children:(0,a.jsx)(e.code,{className:"language-java",children:"public static int longestSlideDown(int[][] pyramid) {\n // In the beginning we declare new array. At this point it is easier to just\n // work with the one dimension, i.e. just allocating the space for the rows.\n int[][] slideDowns = new int[pyramid.length][];\n\n // Bottom row gets just copied, there's nothing else to do\u2026 It's the base\n // case.\n slideDowns[pyramid.length - 1] = Arrays.copyOf(pyramid[pyramid.length - 1],\n pyramid[pyramid.length - 1].length);\n\n // Then we need to propagate the found slide downs for each of the levels\n // above.\n for (int y = pyramid.length - 2; y >= 0; --y) {\n // We start by copying the values lying in the row we're processing.\n // They get included in the final sum and we need to allocate the space\n // for the precalculated slide downs anyways.\n int[] row = Arrays.copyOf(pyramid[y], pyramid[y].length);\n\n // At this we just need to \u201cfetch\u201d the partial results from \u201cneighbours\u201d\n for (int x = 0; x < row.length; ++x) {\n // We look under our position, since we expect the rows to get\n // shorter, we can safely assume such position exists.\n int under = slideDowns[y + 1][x];\n\n // Then we have a look to the right, such position doesn't have to\n // exist, e.g. on the right edge, so we validate the index, and if\n // it doesn't exist, we just assign minimum of the \u2039int\u203a which makes\n // sure that it doesn't get picked in the \u2039Math.max()\u203a call.\n int toRight = x + 1 < slideDowns[y + 1].length\n ? slideDowns[y + 1][x + 1]\n : Integer.MIN_VALUE;\n\n // Finally we add the best choice at this point.\n row[x] += Math.max(under, toRight);\n }\n\n // And save the row we've just calculated partial results for to the\n // \u201ctable\u201d.\n slideDowns[y] = row;\n }\n\n // At the end we can find our seeked slide down at the top cell.\n return slideDowns[0][0];\n}\n"})}),"\n",(0,a.jsx)(e.p,{children:"I've tried to explain the code as much as possible within the comments, since it\nmight be more beneficial to see right next to the \u201coffending\u201d lines."}),"\n",(0,a.jsxs)(e.p,{children:["As you can see, in this approach we go from the other side",(0,a.jsx)(e.sup,{children:(0,a.jsx)(e.a,{href:"#user-content-fn-3",id:"user-content-fnref-3","data-footnote-ref":!0,"aria-describedby":"footnote-label",children:"3"})}),", the bottom of\nthe pyramid and propagate the partial results up."]}),"\n",(0,a.jsxs)(e.admonition,{type:"info",children:[(0,a.jsxs)(e.mdxAdmonitionTitle,{children:["How is this different from the ",(0,a.jsx)(e.em,{children:"greedy"})," solution???"]}),(0,a.jsxs)(e.p,{children:["If you try to compare them, you might find a very noticable difference. The\ngreedy approach is going from the top to the bottom without ",(0,a.jsx)(e.strong,{children:"any"})," knowledge of\nwhat's going on below. On the other hand, bottom-up DP is going from the bottom\n(",(0,a.jsx)(e.em,{children:"DUH\u2026"}),") and ",(0,a.jsx)(e.strong,{children:"propagates"})," the partial results to the top. The propagation is\nwhat makes sure that at the top I don't choose the best ",(0,a.jsx)(e.strong,{children:"local"})," choice, but\nthe best ",(0,a.jsx)(e.strong,{children:"overall"})," result I can achieve."]})]}),"\n",(0,a.jsx)(e.h3,{id:"time-complexity-3",children:"Time complexity"}),"\n",(0,a.jsx)(e.p,{children:"Time complexity of this solution is rather simple. We allocate an array for the\nrows and then for each row, we copy it and adjust the partial results. Doing\nthis we get:"}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"r"}),(0,a.jsx)(e.mi,{children:"o"}),(0,a.jsx)(e.mi,{children:"w"}),(0,a.jsx)(e.mi,{children:"s"}),(0,a.jsx)(e.mo,{children:"+"}),(0,a.jsx)(e.mn,{children:"2"}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(rows + 2n)"})]})})}),(0,a.jsxs)(e.span,{className:"katex-html","aria-hidden":"true",children:[(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"ro"}),(0,a.jsx)(e.span,{className:"mord mathnormal",style:{marginRight:"0.02691em"},children:"w"}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"s"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}}),(0,a.jsx)(e.span,{className:"mbin",children:"+"}),(0,a.jsx)(e.span,{className:"mspace",style:{marginRight:"0.2222em"}})]}),(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord",children:"2"}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})]})]})}),"\n",(0,a.jsx)(e.p,{children:"Of course, this is an upper bound, since we iterate through the bottom row only\nonce."}),"\n",(0,a.jsx)(e.h3,{id:"memory-complexity-1",children:"Memory complexity"}),"\n",(0,a.jsxs)(e.p,{children:["We're allocating an array for the pyramid ",(0,a.jsx)(e.strong,{children:"again"})," for our partial results, so\nwe get:"]}),"\n",(0,a.jsx)(e.span,{className:"katex-display",children:(0,a.jsxs)(e.span,{className:"katex",children:[(0,a.jsx)(e.span,{className:"katex-mathml",children:(0,a.jsx)(e.math,{xmlns:"http://www.w3.org/1998/Math/MathML",display:"block",children:(0,a.jsxs)(e.semantics,{children:[(0,a.jsxs)(e.mrow,{children:[(0,a.jsx)(e.mi,{mathvariant:"script",children:"O"}),(0,a.jsx)(e.mo,{stretchy:"false",children:"("}),(0,a.jsx)(e.mi,{children:"n"}),(0,a.jsx)(e.mo,{stretchy:"false",children:")"})]}),(0,a.jsx)(e.annotation,{encoding:"application/x-tex",children:"\\mathcal{O}(n)"})]})})}),(0,a.jsx)(e.span,{className:"katex-html","aria-hidden":"true",children:(0,a.jsxs)(e.span,{className:"base",children:[(0,a.jsx)(e.span,{className:"strut",style:{height:"1em",verticalAlign:"-0.25em"}}),(0,a.jsx)(e.span,{className:"mord mathcal",style:{marginRight:"0.02778em"},children:"O"}),(0,a.jsx)(e.span,{className:"mopen",children:"("}),(0,a.jsx)(e.span,{className:"mord mathnormal",children:"n"}),(0,a.jsx)(e.span,{className:"mclose",children:")"})]})})]})}),"\n",(0,a.jsxs)(e.admonition,{type:"tip",children:[(0,a.jsx)(e.p,{children:"If we were writing this in C++ or Rust, we could've avoided that, but not\nreally."}),(0,a.jsxs)(e.p,{children:["C++ would allow us to ",(0,a.jsx)(e.strong,{children:"copy"})," the pyramid rightaway into the parameter, so we\nwould be able to directly change it. However it's still a copy, even though we\ndon't need to allocate anything ourselves. It's just implicitly done for us."]}),(0,a.jsxs)(e.p,{children:["Rust is more funny in this case. If the pyramids weren't used after the call of\n",(0,a.jsx)(e.code,{children:"longest_slide_down"}),", it would simply ",(0,a.jsx)(e.strong,{children:"move"})," them into the functions. If they\nwere used afterwards, the compiler would force you to either borrow it, or\n",(0,a.jsx)(e.em,{children:"clone-and-move"})," for the function."]}),(0,a.jsx)(e.hr,{}),(0,a.jsxs)(e.p,{children:["Since we're doing it in Java, we get a reference to the ",(0,a.jsx)(e.em,{children:"original"})," array and we\ncan't do whatever we want with it."]})]}),"\n",(0,a.jsx)(e.h2,{id:"summary",children:"Summary"}),"\n",(0,a.jsxs)(e.p,{children:["And we've finally reached the end. We have seen 4 different \u201csolutions\u201d",(0,a.jsx)(e.sup,{children:(0,a.jsx)(e.a,{href:"#user-content-fn-4",id:"user-content-fnref-4","data-footnote-ref":!0,"aria-describedby":"footnote-label",children:"4"})})," of\nthe same problem using different approaches. Different approaches follow the\norder in which you might come up with them, each approach influences its\nsuccessor and represents the way we can enhance the existing implementation."]}),"\n",(0,a.jsx)(e.hr,{}),"\n",(0,a.jsx)(e.admonition,{title:"source",type:"info",children:(0,a.jsxs)(e.p,{children:["You can find source code referenced in the text\n",(0,a.jsx)(e.a,{href:"pathname:///files/algorithms/recursion/pyramid-slide-down.tar.gz",children:"here"}),"."]})}),"\n",(0,a.jsxs)(e.section,{"data-footnotes":!0,className:"footnotes",children:[(0,a.jsx)(e.h2,{className:"sr-only",id:"footnote-label",children:"Footnotes"}),"\n",(0,a.jsxs)(e.ol,{children:["\n",(0,a.jsxs)(e.li,{id:"user-content-fn-1",children:["\n",(0,a.jsxs)(e.p,{children:["cause why not, right!? ",(0,a.jsx)(e.a,{href:"#user-content-fnref-1","data-footnote-backref":"","aria-label":"Back to reference 1",className:"data-footnote-backref",children:"\u21a9"})]}),"\n"]}),"\n",(0,a.jsxs)(e.li,{id:"user-content-fn-2",children:["\n",(0,a.jsxs)(e.p,{children:["except the bottom row ",(0,a.jsx)(e.a,{href:"#user-content-fnref-2","data-footnote-backref":"","aria-label":"Back to reference 2",className:"data-footnote-backref",children:"\u21a9"})]}),"\n"]}),"\n",(0,a.jsxs)(e.li,{id:"user-content-fn-3",children:["\n",(0,a.jsxs)(e.p,{children:["definitely not an RHCP reference ","\ud83d\ude09 ",(0,a.jsx)(e.a,{href:"#user-content-fnref-3","data-footnote-backref":"","aria-label":"Back to reference 3",className:"data-footnote-backref",children:"\u21a9"})]}),"\n"]}),"\n",(0,a.jsxs)(e.li,{id:"user-content-fn-4",children:["\n",(0,a.jsxs)(e.p,{children:["one was not correct, thus the quotes ",(0,a.jsx)(e.a,{href:"#user-content-fnref-4","data-footnote-backref":"","aria-label":"Back to reference 4",className:"data-footnote-backref",children:"\u21a9"})]}),"\n"]}),"\n"]}),"\n"]})]})}function o(s={}){const{wrapper:e}={...(0,i.a)(),...s.components};return e?(0,a.jsx)(e,{...s,children:(0,a.jsx)(h,{...s})}):h(s)}},1151:(s,e,n)=>{n.d(e,{Z:()=>r,a:()=>t});var a=n(7294);const i={},l=a.createContext(i);function t(s){const e=a.useContext(l);return a.useMemo((function(){return"function"==typeof s?s(e):{...e,...s}}),[e,s])}function r(s){let e;return e=s.disableParentContext?"function"==typeof s.components?s.components(i):s.components||i:t(s.components),a.createElement(l.Provider,{value:e},s.children)}}}]); \ No newline at end of file diff --git a/assets/js/7052c0bc.45d31ad9.js b/assets/js/7052c0bc.3681b043.js similarity index 90% rename from assets/js/7052c0bc.45d31ad9.js rename to assets/js/7052c0bc.3681b043.js index ad3ffce..b32cf0a 100644 --- a/assets/js/7052c0bc.45d31ad9.js +++ b/assets/js/7052c0bc.3681b043.js @@ -1 +1 @@ -"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[9731],{2286:(t,e,n)=>{n.r(e),n.d(e,{assets:()=>a,contentTitle:()=>c,default:()=>d,frontMatter:()=>i,metadata:()=>s,toc:()=>p});var o=n(5893),r=n(1151);const i={id:"cpp-intro",title:"Introduction",slug:"/"},c=void 0,s={id:"cpp-intro",title:"Introduction",description:"",source:"@site/cpp/00-intro.md",sourceDirName:".",slug:"/",permalink:"/cpp/",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/cpp/00-intro.md",tags:[],version:"current",lastUpdatedAt:1700847079,formattedLastUpdatedAt:"Nov 24, 2023",sidebarPosition:0,frontMatter:{id:"cpp-intro",title:"Introduction",slug:"/"},sidebar:"autogeneratedBar",next:{title:"Exceptions and RAII",permalink:"/cpp/category/exceptions-and-raii"}},a={},p=[];function u(t){return(0,o.jsx)(o.Fragment,{})}function d(t={}){const{wrapper:e}={...(0,r.a)(),...t.components};return e?(0,o.jsx)(e,{...t,children:(0,o.jsx)(u,{...t})}):u()}},1151:(t,e,n)=>{n.d(e,{Z:()=>s,a:()=>c});var o=n(7294);const r={},i=o.createContext(r);function c(t){const e=o.useContext(i);return o.useMemo((function(){return"function"==typeof t?t(e):{...e,...t}}),[e,t])}function s(t){let e;return e=t.disableParentContext?"function"==typeof t.components?t.components(r):t.components||r:c(t.components),o.createElement(i.Provider,{value:e},t.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[9731],{2286:(t,e,n)=>{n.r(e),n.d(e,{assets:()=>a,contentTitle:()=>c,default:()=>d,frontMatter:()=>i,metadata:()=>s,toc:()=>p});var o=n(5893),r=n(1151);const i={id:"cpp-intro",title:"Introduction",slug:"/"},c=void 0,s={id:"cpp-intro",title:"Introduction",description:"",source:"@site/cpp/00-intro.md",sourceDirName:".",slug:"/",permalink:"/cpp/",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/cpp/00-intro.md",tags:[],version:"current",lastUpdatedAt:1700944879,formattedLastUpdatedAt:"Nov 25, 2023",sidebarPosition:0,frontMatter:{id:"cpp-intro",title:"Introduction",slug:"/"},sidebar:"autogeneratedBar",next:{title:"Exceptions and RAII",permalink:"/cpp/category/exceptions-and-raii"}},a={},p=[];function u(t){return(0,o.jsx)(o.Fragment,{})}function d(t={}){const{wrapper:e}={...(0,r.a)(),...t.components};return e?(0,o.jsx)(e,{...t,children:(0,o.jsx)(u,{...t})}):u()}},1151:(t,e,n)=>{n.d(e,{Z:()=>s,a:()=>c});var o=n(7294);const r={},i=o.createContext(r);function c(t){const e=o.useContext(i);return o.useMemo((function(){return"function"==typeof t?t(e):{...e,...t}}),[e,t])}function s(t){let e;return e=t.disableParentContext?"function"==typeof t.components?t.components(r):t.components||r:c(t.components),o.createElement(i.Provider,{value:e},t.children)}}}]); \ No newline at end of file diff --git a/assets/js/794ef108.6e8dbf85.js b/assets/js/794ef108.49bf3dba.js similarity index 95% rename from assets/js/794ef108.6e8dbf85.js rename to assets/js/794ef108.49bf3dba.js index 2ed5b10..0bb6def 100644 --- a/assets/js/794ef108.6e8dbf85.js +++ b/assets/js/794ef108.49bf3dba.js @@ -1 +1 @@ -"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[3803],{6427:(t,e,n)=>{n.r(e),n.d(e,{assets:()=>a,contentTitle:()=>s,default:()=>l,frontMatter:()=>i,metadata:()=>c,toc:()=>u});var o=n(5893),r=n(1151);const i={id:"c-intro",title:"Introduction",slug:"/"},s=void 0,c={id:"c-intro",title:"Introduction",description:"",source:"@site/c/00-intro.md",sourceDirName:".",slug:"/",permalink:"/c/",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/c/00-intro.md",tags:[],version:"current",lastUpdatedAt:1700847079,formattedLastUpdatedAt:"Nov 24, 2023",sidebarPosition:0,frontMatter:{id:"c-intro",title:"Introduction",slug:"/"},sidebar:"autogeneratedBar",next:{title:"Bonuses",permalink:"/c/category/bonuses"}},a={},u=[];function d(t){return(0,o.jsx)(o.Fragment,{})}function l(t={}){const{wrapper:e}={...(0,r.a)(),...t.components};return e?(0,o.jsx)(e,{...t,children:(0,o.jsx)(d,{...t})}):d()}},1151:(t,e,n)=>{n.d(e,{Z:()=>c,a:()=>s});var o=n(7294);const r={},i=o.createContext(r);function s(t){const e=o.useContext(i);return o.useMemo((function(){return"function"==typeof t?t(e):{...e,...t}}),[e,t])}function c(t){let e;return e=t.disableParentContext?"function"==typeof t.components?t.components(r):t.components||r:s(t.components),o.createElement(i.Provider,{value:e},t.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[3803],{6427:(t,e,n)=>{n.r(e),n.d(e,{assets:()=>a,contentTitle:()=>s,default:()=>l,frontMatter:()=>i,metadata:()=>c,toc:()=>u});var o=n(5893),r=n(1151);const i={id:"c-intro",title:"Introduction",slug:"/"},s=void 0,c={id:"c-intro",title:"Introduction",description:"",source:"@site/c/00-intro.md",sourceDirName:".",slug:"/",permalink:"/c/",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/c/00-intro.md",tags:[],version:"current",lastUpdatedAt:1700944879,formattedLastUpdatedAt:"Nov 25, 2023",sidebarPosition:0,frontMatter:{id:"c-intro",title:"Introduction",slug:"/"},sidebar:"autogeneratedBar",next:{title:"Bonuses",permalink:"/c/category/bonuses"}},a={},u=[];function d(t){return(0,o.jsx)(o.Fragment,{})}function l(t={}){const{wrapper:e}={...(0,r.a)(),...t.components};return e?(0,o.jsx)(e,{...t,children:(0,o.jsx)(d,{...t})}):d()}},1151:(t,e,n)=>{n.d(e,{Z:()=>c,a:()=>s});var o=n(7294);const r={},i=o.createContext(r);function s(t){const e=o.useContext(i);return o.useMemo((function(){return"function"==typeof t?t(e):{...e,...t}}),[e,t])}function c(t){let e;return e=t.disableParentContext?"function"==typeof t.components?t.components(r):t.components||r:s(t.components),o.createElement(i.Provider,{value:e},t.children)}}}]); \ No newline at end of file diff --git a/assets/js/84d1e0d8.25601c58.js b/assets/js/84d1e0d8.ad0891ba.js similarity index 97% rename from assets/js/84d1e0d8.25601c58.js rename to assets/js/84d1e0d8.ad0891ba.js index 8071676..8b40682 100644 --- a/assets/js/84d1e0d8.25601c58.js +++ b/assets/js/84d1e0d8.ad0891ba.js @@ -1 +1 @@ -"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[1885],{9713:(t,e,n)=>{n.r(e),n.d(e,{assets:()=>c,contentTitle:()=>i,default:()=>u,frontMatter:()=>r,metadata:()=>a,toc:()=>d});var o=n(5893),s=n(1151);const r={id:"algorithms-intro",title:"Introduction",slug:"/"},i=void 0,a={id:"algorithms-intro",title:"Introduction",description:"In this part you can find \u201crandom\u201d additional materials I have written over the",source:"@site/algorithms/00-intro.md",sourceDirName:".",slug:"/",permalink:"/algorithms/",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/algorithms/00-intro.md",tags:[],version:"current",lastUpdatedAt:1700847079,formattedLastUpdatedAt:"Nov 24, 2023",sidebarPosition:0,frontMatter:{id:"algorithms-intro",title:"Introduction",slug:"/"},sidebar:"autogeneratedBar",next:{title:"Algorithms and Correctness",permalink:"/algorithms/category/algorithms-and-correctness"}},c={},d=[];function l(t){const e={a:"a",em:"em",p:"p",...(0,s.a)(),...t.components};return(0,o.jsxs)(o.Fragment,{children:[(0,o.jsxs)(e.p,{children:["In this part you can find \u201crandom\u201d additional materials I have written over the\ncourse of teaching ",(0,o.jsx)(e.em,{children:"Algorithms and data structures I"}),"."]}),"\n",(0,o.jsx)(e.p,{children:"It is a various mix of stuff that may have been produced as a follow-up on some\nquestion asked at the seminar or spontanously."}),"\n",(0,o.jsxs)(e.p,{children:["If you have some ideas for posts, please do not hesitate to submit them as issues\nin the linked ",(0,o.jsx)(e.a,{href:"https://gitlab.fi.muni.cz/xfocko/kb/issues",children:"GitLab"}),"."]})]})}function u(t={}){const{wrapper:e}={...(0,s.a)(),...t.components};return e?(0,o.jsx)(e,{...t,children:(0,o.jsx)(l,{...t})}):l(t)}},1151:(t,e,n)=>{n.d(e,{Z:()=>a,a:()=>i});var o=n(7294);const s={},r=o.createContext(s);function i(t){const e=o.useContext(r);return o.useMemo((function(){return"function"==typeof t?t(e):{...e,...t}}),[e,t])}function a(t){let e;return e=t.disableParentContext?"function"==typeof t.components?t.components(s):t.components||s:i(t.components),o.createElement(r.Provider,{value:e},t.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[1885],{9713:(t,e,n)=>{n.r(e),n.d(e,{assets:()=>c,contentTitle:()=>i,default:()=>u,frontMatter:()=>r,metadata:()=>a,toc:()=>d});var o=n(5893),s=n(1151);const r={id:"algorithms-intro",title:"Introduction",slug:"/"},i=void 0,a={id:"algorithms-intro",title:"Introduction",description:"In this part you can find \u201crandom\u201d additional materials I have written over the",source:"@site/algorithms/00-intro.md",sourceDirName:".",slug:"/",permalink:"/algorithms/",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/algorithms/00-intro.md",tags:[],version:"current",lastUpdatedAt:1700944879,formattedLastUpdatedAt:"Nov 25, 2023",sidebarPosition:0,frontMatter:{id:"algorithms-intro",title:"Introduction",slug:"/"},sidebar:"autogeneratedBar",next:{title:"Algorithms and Correctness",permalink:"/algorithms/category/algorithms-and-correctness"}},c={},d=[];function l(t){const e={a:"a",em:"em",p:"p",...(0,s.a)(),...t.components};return(0,o.jsxs)(o.Fragment,{children:[(0,o.jsxs)(e.p,{children:["In this part you can find \u201crandom\u201d additional materials I have written over the\ncourse of teaching ",(0,o.jsx)(e.em,{children:"Algorithms and data structures I"}),"."]}),"\n",(0,o.jsx)(e.p,{children:"It is a various mix of stuff that may have been produced as a follow-up on some\nquestion asked at the seminar or spontanously."}),"\n",(0,o.jsxs)(e.p,{children:["If you have some ideas for posts, please do not hesitate to submit them as issues\nin the linked ",(0,o.jsx)(e.a,{href:"https://gitlab.fi.muni.cz/xfocko/kb/issues",children:"GitLab"}),"."]})]})}function u(t={}){const{wrapper:e}={...(0,s.a)(),...t.components};return e?(0,o.jsx)(e,{...t,children:(0,o.jsx)(l,{...t})}):l(t)}},1151:(t,e,n)=>{n.d(e,{Z:()=>a,a:()=>i});var o=n(7294);const s={},r=o.createContext(s);function i(t){const e=o.useContext(r);return o.useMemo((function(){return"function"==typeof t?t(e):{...e,...t}}),[e,t])}function a(t){let e;return e=t.disableParentContext?"function"==typeof t.components?t.components(s):t.components||s:i(t.components),o.createElement(r.Provider,{value:e},t.children)}}}]); \ No newline at end of file diff --git a/assets/js/b1288602.952a9bbb.js b/assets/js/b1288602.3deae653.js similarity index 98% rename from assets/js/b1288602.952a9bbb.js rename to assets/js/b1288602.3deae653.js index 0bc813b..3a36d12 100644 --- a/assets/js/b1288602.952a9bbb.js +++ b/assets/js/b1288602.3deae653.js @@ -1 +1 @@ -"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[59],{1456:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>c,contentTitle:()=>i,default:()=>d,frontMatter:()=>o,metadata:()=>a,toc:()=>h});var r=t(5893),s=t(1151);const o={title:"Submitting merge requests"},i="Submitting merge requests for review",a={id:"mr",title:"Submitting merge requests",description:"This tutorial aims to show you how to follow basic git workflow and submit changes",source:"@site/c/mr.md",sourceDirName:".",slug:"/mr",permalink:"/c/mr",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/c/mr.md",tags:[],version:"current",lastUpdatedAt:1700847079,formattedLastUpdatedAt:"Nov 24, 2023",frontMatter:{title:"Submitting merge requests"},sidebar:"autogeneratedBar",previous:{title:"Practice exam C",permalink:"/c/pexam/cams"}},c={},h=[{value:"Tutorial",id:"tutorial",level:2},{value:"Step #1 - Starting from the clean repository",id:"step-1---starting-from-the-clean-repository",level:3},{value:"Step #2 - Create new branch",id:"step-2---create-new-branch",level:3},{value:"Step #3 - Do the assignment",id:"step-3---do-the-assignment",level:3},{value:"Step #4 - Commit and upload the changes to GitLab",id:"step-4---commit-and-upload-the-changes-to-gitlab",level:3},{value:"Step #5 - Creating a merge request manually",id:"step-5---creating-a-merge-request-manually",level:3},{value:"Step #6 - Set assignees",id:"step-6---set-assignees",level:3},{value:"Step #7 - Return to default branch",id:"step-7---return-to-default-branch",level:3}];function l(e){const n={a:"a",blockquote:"blockquote",code:"code",em:"em",h1:"h1",h2:"h2",h3:"h3",hr:"hr",li:"li",ol:"ol",p:"p",pre:"pre",strong:"strong",...(0,s.a)(),...e.components};return(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)(n.h1,{id:"submitting-merge-requests-for-review",children:"Submitting merge requests for review"}),"\n",(0,r.jsxs)(n.p,{children:["This tutorial aims to show you how to follow basic git workflow and submit changes\nthrough ",(0,r.jsx)(n.em,{children:"Merge Requests"})," for review."]}),"\n",(0,r.jsxs)(n.p,{children:["The rudimentary idea behind aims for changes to be present on a separate branch\nthat is supposedly ",(0,r.jsx)(n.em,{children:"merged"})," into the default branch. Till then changes can be reviewed\non ",(0,r.jsx)(n.em,{children:"Merge Request"})," and additional changes may be made based on the reviews. Once\nthe changes satisfy requirements, the merge request is merged."]}),"\n",(0,r.jsx)(n.h2,{id:"tutorial",children:"Tutorial"}),"\n",(0,r.jsxs)(n.blockquote,{children:["\n",(0,r.jsxs)(n.p,{children:["Use this tutorial only for bonus assignments ",(0,r.jsx)(n.strong,{children:"made by your tutors"})," or in case\nyou need to make up for the absence."]}),"\n"]}),"\n",(0,r.jsx)(n.h3,{id:"step-1---starting-from-the-clean-repository",children:"Step #1 - Starting from the clean repository"}),"\n",(0,r.jsxs)(n.p,{children:["In your repository (either locally or on aisa) type ",(0,r.jsx)(n.code,{children:"git status"})," and check if your\nrepository is clean and you are present on the main branch (",(0,r.jsx)(n.code,{children:"master"}),", ",(0,r.jsx)(n.code,{children:"main"})," or\n",(0,r.jsx)(n.code,{children:"trunk"}),"). If you do not know what your default branch is, it is probably ",(0,r.jsx)(n.code,{children:"master"}),"\nand you should not be on any other branch."]}),"\n",(0,r.jsx)(n.p,{children:"Output of the command should look like this:"}),"\n",(0,r.jsx)(n.pre,{children:(0,r.jsx)(n.code,{children:"aisa$ git status\nOn branch master # Or main or trunk.\nYour branch is up to date with 'origin/master'.\n\nnothing to commit, working tree clean\n"})}),"\n",(0,r.jsxs)(n.blockquote,{children:["\n",(0,r.jsxs)(n.p,{children:["In case you are on different branch or there are uncommitted changes,\n",(0,r.jsx)(n.strong,{children:"do not continue!!!"})," Clean your repository (commit the changes or discard\nthem), before you continue."]}),"\n"]}),"\n",(0,r.jsx)(n.h3,{id:"step-2---create-new-branch",children:"Step #2 - Create new branch"}),"\n",(0,r.jsx)(n.p,{children:"In your repository write command:"}),"\n",(0,r.jsx)(n.pre,{children:(0,r.jsx)(n.code,{children:"aisa$ git checkout -b BRANCH\nSwitched to a new branch 'BRANCH'\n"})}),"\n",(0,r.jsxs)(n.p,{children:["Instead of ",(0,r.jsx)(n.code,{children:"BRANCH"})," use some reasonable name for the branch. For example if you\nare working on the seminar from 3rd week, name the branch ",(0,r.jsx)(n.code,{children:"seminar-03"}),"."]}),"\n",(0,r.jsx)(n.h3,{id:"step-3---do-the-assignment",children:"Step #3 - Do the assignment"}),"\n",(0,r.jsx)(n.p,{children:"Download the skeleton for the seminar assignment, extract and program. For example\nif you are working on 3rd seminar, you can do so by:"}),"\n",(0,r.jsx)(n.pre,{children:(0,r.jsx)(n.code,{children:"aisa$ wget https://www.fi.muni.cz/pb071/seminars/seminar-03/pb071-seminar-03.zip\naisa$ unzip pb071-seminar-03.zip\n# Now you should have directory 'seminar-03'.\naisa$ rm pb071-seminar-03.zip\naisa$ cd seminar-03\n# You can work on the assignment.\n"})}),"\n",(0,r.jsx)(n.h3,{id:"step-4---commit-and-upload-the-changes-to-gitlab",children:"Step #4 - Commit and upload the changes to GitLab"}),"\n",(0,r.jsxs)(n.p,{children:["The same way you ",(0,r.jsx)(n.em,{children:"add"})," and ",(0,r.jsx)(n.em,{children:"commit"})," files for the homework assignments, you do for\nthe seminar."]}),"\n",(0,r.jsxs)(n.p,{children:["Now you can upload the changes to GitLab. ",(0,r.jsx)(n.code,{children:"git push"})," is not enough, since repository\non GitLab does not know your new branch. You can solve this by adding arguments:"]}),"\n",(0,r.jsx)(n.pre,{children:(0,r.jsx)(n.code,{children:"aisa$ git push origin BRANCH\n...\nremote: To create a merge request for BRANCH, visit:\nremote: https://gitlab.fi.muni.cz/login/pb071/merge_requests/new?merge_request%5Bsource_branch%5D=BRANCH\n...\n"})}),"\n",(0,r.jsx)(n.p,{children:"In the output you should have a link for creating a merge request. If you see this\nlink, open it and skip next step."}),"\n",(0,r.jsx)(n.h3,{id:"step-5---creating-a-merge-request-manually",children:"Step #5 - Creating a merge request manually"}),"\n",(0,r.jsxs)(n.ol,{children:["\n",(0,r.jsx)(n.li,{children:"Open your repository on GitLab."}),"\n",(0,r.jsxs)(n.li,{children:["On the left panel click on ",(0,r.jsx)(n.em,{children:"Merge Requests"}),"."]}),"\n",(0,r.jsxs)(n.li,{children:["Click on ",(0,r.jsx)(n.em,{children:"New Merge Request"}),"."]}),"\n",(0,r.jsxs)(n.li,{children:["In ",(0,r.jsx)(n.em,{children:"Source branch"})," select ",(0,r.jsx)(n.code,{children:"login/pb071"})," and ",(0,r.jsx)(n.code,{children:"BRANCH"}),", which you created."]}),"\n",(0,r.jsxs)(n.li,{children:["In ",(0,r.jsx)(n.em,{children:"Target branch"})," select ",(0,r.jsx)(n.code,{children:"login/pb071"})," and your default branch you have seen\nin the output of the first command. (most likely ",(0,r.jsx)(n.code,{children:"master"}),")"]}),"\n",(0,r.jsxs)(n.li,{children:["Click on ",(0,r.jsx)(n.em,{children:"Compare branches and continue"}),"."]}),"\n"]}),"\n",(0,r.jsx)(n.h3,{id:"step-6---set-assignees",children:"Step #6 - Set assignees"}),"\n",(0,r.jsxs)(n.p,{children:["On the page that is opened, please check at the top that you are creating merge\nrequest ",(0,r.jsx)(n.strong,{children:"from"})," your new branch ",(0,r.jsx)(n.strong,{children:"to"})," your default branch (one of ",(0,r.jsx)(n.code,{children:"master"}),", ",(0,r.jsx)(n.code,{children:"main"}),"\nor ",(0,r.jsx)(n.code,{children:"trunk"}),")."]}),"\n",(0,r.jsxs)(n.p,{children:["Then in the field ",(0,r.jsx)(n.em,{children:"Assignees"})," set your tutors based on the seminar group. You can\nuse login for a quick look up."]}),"\n",(0,r.jsxs)(n.p,{children:["In the end click on ",(0,r.jsx)(n.em,{children:"Submit merge request"}),"."]}),"\n",(0,r.jsx)(n.h3,{id:"step-7---return-to-default-branch",children:"Step #7 - Return to default branch"}),"\n",(0,r.jsx)(n.p,{children:"Homework assignments can be submitted only from branches specified in the rules\nfor the course. Because of that, before you do anything else, you should switch\nback to your default branch."}),"\n",(0,r.jsxs)(n.p,{children:["First of all, same as in step #1, check that your repository is clean with ",(0,r.jsx)(n.code,{children:"git status"}),".\nFor the sake of safety, do not continue without clean repository. Then with command\n",(0,r.jsx)(n.code,{children:"git checkout BRANCH"})," switch to your default branch ",(0,r.jsx)(n.code,{children:"BRANCH"}),"."]}),"\n",(0,r.jsxs)(n.p,{children:["If you do not know which branch is your default, try ",(0,r.jsx)(n.code,{children:"git branch"})," that outputs all branches in your repository. Default branch is typically ",(0,r.jsx)(n.code,{children:"master"}),", but can\nbe ",(0,r.jsx)(n.code,{children:"main"})," or ",(0,r.jsx)(n.code,{children:"trunk"}),"."]}),"\n",(0,r.jsx)(n.pre,{children:(0,r.jsx)(n.code,{children:"aisa$ git status\n# Check if repository is clean\n\n# If you know, what is your default branch, you can skip next command.\naisa$ git branch\n# Find the default branch in the list; should be one of the `master`, `main` or\n# `trunk` and you should not have more than one of those.\n# In case the list clears the terminal and you cannot see shell prompt, you can\n# press `q` to quit the pager.\n\naisa$ git checkout master\n"})}),"\n",(0,r.jsx)(n.hr,{}),"\n",(0,r.jsxs)(n.p,{children:["Adapted from: ",(0,r.jsx)(n.a,{href:"https://www.fi.muni.cz/~xlacko1/pb071/mr.html",children:"https://www.fi.muni.cz/~xlacko1/pb071/mr.html"})]})]})}function d(e={}){const{wrapper:n}={...(0,s.a)(),...e.components};return n?(0,r.jsx)(n,{...e,children:(0,r.jsx)(l,{...e})}):l(e)}},1151:(e,n,t)=>{t.d(n,{Z:()=>a,a:()=>i});var r=t(7294);const s={},o=r.createContext(s);function i(e){const n=r.useContext(o);return r.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function a(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(s):e.components||s:i(e.components),r.createElement(o.Provider,{value:n},e.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[59],{1456:(e,n,t)=>{t.r(n),t.d(n,{assets:()=>c,contentTitle:()=>i,default:()=>d,frontMatter:()=>o,metadata:()=>a,toc:()=>h});var r=t(5893),s=t(1151);const o={title:"Submitting merge requests"},i="Submitting merge requests for review",a={id:"mr",title:"Submitting merge requests",description:"This tutorial aims to show you how to follow basic git workflow and submit changes",source:"@site/c/mr.md",sourceDirName:".",slug:"/mr",permalink:"/c/mr",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/c/mr.md",tags:[],version:"current",lastUpdatedAt:1700944879,formattedLastUpdatedAt:"Nov 25, 2023",frontMatter:{title:"Submitting merge requests"},sidebar:"autogeneratedBar",previous:{title:"Practice exam C",permalink:"/c/pexam/cams"}},c={},h=[{value:"Tutorial",id:"tutorial",level:2},{value:"Step #1 - Starting from the clean repository",id:"step-1---starting-from-the-clean-repository",level:3},{value:"Step #2 - Create new branch",id:"step-2---create-new-branch",level:3},{value:"Step #3 - Do the assignment",id:"step-3---do-the-assignment",level:3},{value:"Step #4 - Commit and upload the changes to GitLab",id:"step-4---commit-and-upload-the-changes-to-gitlab",level:3},{value:"Step #5 - Creating a merge request manually",id:"step-5---creating-a-merge-request-manually",level:3},{value:"Step #6 - Set assignees",id:"step-6---set-assignees",level:3},{value:"Step #7 - Return to default branch",id:"step-7---return-to-default-branch",level:3}];function l(e){const n={a:"a",blockquote:"blockquote",code:"code",em:"em",h1:"h1",h2:"h2",h3:"h3",hr:"hr",li:"li",ol:"ol",p:"p",pre:"pre",strong:"strong",...(0,s.a)(),...e.components};return(0,r.jsxs)(r.Fragment,{children:[(0,r.jsx)(n.h1,{id:"submitting-merge-requests-for-review",children:"Submitting merge requests for review"}),"\n",(0,r.jsxs)(n.p,{children:["This tutorial aims to show you how to follow basic git workflow and submit changes\nthrough ",(0,r.jsx)(n.em,{children:"Merge Requests"})," for review."]}),"\n",(0,r.jsxs)(n.p,{children:["The rudimentary idea behind aims for changes to be present on a separate branch\nthat is supposedly ",(0,r.jsx)(n.em,{children:"merged"})," into the default branch. Till then changes can be reviewed\non ",(0,r.jsx)(n.em,{children:"Merge Request"})," and additional changes may be made based on the reviews. Once\nthe changes satisfy requirements, the merge request is merged."]}),"\n",(0,r.jsx)(n.h2,{id:"tutorial",children:"Tutorial"}),"\n",(0,r.jsxs)(n.blockquote,{children:["\n",(0,r.jsxs)(n.p,{children:["Use this tutorial only for bonus assignments ",(0,r.jsx)(n.strong,{children:"made by your tutors"})," or in case\nyou need to make up for the absence."]}),"\n"]}),"\n",(0,r.jsx)(n.h3,{id:"step-1---starting-from-the-clean-repository",children:"Step #1 - Starting from the clean repository"}),"\n",(0,r.jsxs)(n.p,{children:["In your repository (either locally or on aisa) type ",(0,r.jsx)(n.code,{children:"git status"})," and check if your\nrepository is clean and you are present on the main branch (",(0,r.jsx)(n.code,{children:"master"}),", ",(0,r.jsx)(n.code,{children:"main"})," or\n",(0,r.jsx)(n.code,{children:"trunk"}),"). If you do not know what your default branch is, it is probably ",(0,r.jsx)(n.code,{children:"master"}),"\nand you should not be on any other branch."]}),"\n",(0,r.jsx)(n.p,{children:"Output of the command should look like this:"}),"\n",(0,r.jsx)(n.pre,{children:(0,r.jsx)(n.code,{children:"aisa$ git status\nOn branch master # Or main or trunk.\nYour branch is up to date with 'origin/master'.\n\nnothing to commit, working tree clean\n"})}),"\n",(0,r.jsxs)(n.blockquote,{children:["\n",(0,r.jsxs)(n.p,{children:["In case you are on different branch or there are uncommitted changes,\n",(0,r.jsx)(n.strong,{children:"do not continue!!!"})," Clean your repository (commit the changes or discard\nthem), before you continue."]}),"\n"]}),"\n",(0,r.jsx)(n.h3,{id:"step-2---create-new-branch",children:"Step #2 - Create new branch"}),"\n",(0,r.jsx)(n.p,{children:"In your repository write command:"}),"\n",(0,r.jsx)(n.pre,{children:(0,r.jsx)(n.code,{children:"aisa$ git checkout -b BRANCH\nSwitched to a new branch 'BRANCH'\n"})}),"\n",(0,r.jsxs)(n.p,{children:["Instead of ",(0,r.jsx)(n.code,{children:"BRANCH"})," use some reasonable name for the branch. For example if you\nare working on the seminar from 3rd week, name the branch ",(0,r.jsx)(n.code,{children:"seminar-03"}),"."]}),"\n",(0,r.jsx)(n.h3,{id:"step-3---do-the-assignment",children:"Step #3 - Do the assignment"}),"\n",(0,r.jsx)(n.p,{children:"Download the skeleton for the seminar assignment, extract and program. For example\nif you are working on 3rd seminar, you can do so by:"}),"\n",(0,r.jsx)(n.pre,{children:(0,r.jsx)(n.code,{children:"aisa$ wget https://www.fi.muni.cz/pb071/seminars/seminar-03/pb071-seminar-03.zip\naisa$ unzip pb071-seminar-03.zip\n# Now you should have directory 'seminar-03'.\naisa$ rm pb071-seminar-03.zip\naisa$ cd seminar-03\n# You can work on the assignment.\n"})}),"\n",(0,r.jsx)(n.h3,{id:"step-4---commit-and-upload-the-changes-to-gitlab",children:"Step #4 - Commit and upload the changes to GitLab"}),"\n",(0,r.jsxs)(n.p,{children:["The same way you ",(0,r.jsx)(n.em,{children:"add"})," and ",(0,r.jsx)(n.em,{children:"commit"})," files for the homework assignments, you do for\nthe seminar."]}),"\n",(0,r.jsxs)(n.p,{children:["Now you can upload the changes to GitLab. ",(0,r.jsx)(n.code,{children:"git push"})," is not enough, since repository\non GitLab does not know your new branch. You can solve this by adding arguments:"]}),"\n",(0,r.jsx)(n.pre,{children:(0,r.jsx)(n.code,{children:"aisa$ git push origin BRANCH\n...\nremote: To create a merge request for BRANCH, visit:\nremote: https://gitlab.fi.muni.cz/login/pb071/merge_requests/new?merge_request%5Bsource_branch%5D=BRANCH\n...\n"})}),"\n",(0,r.jsx)(n.p,{children:"In the output you should have a link for creating a merge request. If you see this\nlink, open it and skip next step."}),"\n",(0,r.jsx)(n.h3,{id:"step-5---creating-a-merge-request-manually",children:"Step #5 - Creating a merge request manually"}),"\n",(0,r.jsxs)(n.ol,{children:["\n",(0,r.jsx)(n.li,{children:"Open your repository on GitLab."}),"\n",(0,r.jsxs)(n.li,{children:["On the left panel click on ",(0,r.jsx)(n.em,{children:"Merge Requests"}),"."]}),"\n",(0,r.jsxs)(n.li,{children:["Click on ",(0,r.jsx)(n.em,{children:"New Merge Request"}),"."]}),"\n",(0,r.jsxs)(n.li,{children:["In ",(0,r.jsx)(n.em,{children:"Source branch"})," select ",(0,r.jsx)(n.code,{children:"login/pb071"})," and ",(0,r.jsx)(n.code,{children:"BRANCH"}),", which you created."]}),"\n",(0,r.jsxs)(n.li,{children:["In ",(0,r.jsx)(n.em,{children:"Target branch"})," select ",(0,r.jsx)(n.code,{children:"login/pb071"})," and your default branch you have seen\nin the output of the first command. (most likely ",(0,r.jsx)(n.code,{children:"master"}),")"]}),"\n",(0,r.jsxs)(n.li,{children:["Click on ",(0,r.jsx)(n.em,{children:"Compare branches and continue"}),"."]}),"\n"]}),"\n",(0,r.jsx)(n.h3,{id:"step-6---set-assignees",children:"Step #6 - Set assignees"}),"\n",(0,r.jsxs)(n.p,{children:["On the page that is opened, please check at the top that you are creating merge\nrequest ",(0,r.jsx)(n.strong,{children:"from"})," your new branch ",(0,r.jsx)(n.strong,{children:"to"})," your default branch (one of ",(0,r.jsx)(n.code,{children:"master"}),", ",(0,r.jsx)(n.code,{children:"main"}),"\nor ",(0,r.jsx)(n.code,{children:"trunk"}),")."]}),"\n",(0,r.jsxs)(n.p,{children:["Then in the field ",(0,r.jsx)(n.em,{children:"Assignees"})," set your tutors based on the seminar group. You can\nuse login for a quick look up."]}),"\n",(0,r.jsxs)(n.p,{children:["In the end click on ",(0,r.jsx)(n.em,{children:"Submit merge request"}),"."]}),"\n",(0,r.jsx)(n.h3,{id:"step-7---return-to-default-branch",children:"Step #7 - Return to default branch"}),"\n",(0,r.jsx)(n.p,{children:"Homework assignments can be submitted only from branches specified in the rules\nfor the course. Because of that, before you do anything else, you should switch\nback to your default branch."}),"\n",(0,r.jsxs)(n.p,{children:["First of all, same as in step #1, check that your repository is clean with ",(0,r.jsx)(n.code,{children:"git status"}),".\nFor the sake of safety, do not continue without clean repository. Then with command\n",(0,r.jsx)(n.code,{children:"git checkout BRANCH"})," switch to your default branch ",(0,r.jsx)(n.code,{children:"BRANCH"}),"."]}),"\n",(0,r.jsxs)(n.p,{children:["If you do not know which branch is your default, try ",(0,r.jsx)(n.code,{children:"git branch"})," that outputs all branches in your repository. Default branch is typically ",(0,r.jsx)(n.code,{children:"master"}),", but can\nbe ",(0,r.jsx)(n.code,{children:"main"})," or ",(0,r.jsx)(n.code,{children:"trunk"}),"."]}),"\n",(0,r.jsx)(n.pre,{children:(0,r.jsx)(n.code,{children:"aisa$ git status\n# Check if repository is clean\n\n# If you know, what is your default branch, you can skip next command.\naisa$ git branch\n# Find the default branch in the list; should be one of the `master`, `main` or\n# `trunk` and you should not have more than one of those.\n# In case the list clears the terminal and you cannot see shell prompt, you can\n# press `q` to quit the pager.\n\naisa$ git checkout master\n"})}),"\n",(0,r.jsx)(n.hr,{}),"\n",(0,r.jsxs)(n.p,{children:["Adapted from: ",(0,r.jsx)(n.a,{href:"https://www.fi.muni.cz/~xlacko1/pb071/mr.html",children:"https://www.fi.muni.cz/~xlacko1/pb071/mr.html"})]})]})}function d(e={}){const{wrapper:n}={...(0,s.a)(),...e.components};return n?(0,r.jsx)(n,{...e,children:(0,r.jsx)(l,{...e})}):l(e)}},1151:(e,n,t)=>{t.d(n,{Z:()=>a,a:()=>i});var r=t(7294);const s={},o=r.createContext(s);function i(e){const n=r.useContext(o);return r.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function a(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(s):e.components||s:i(e.components),r.createElement(o.Provider,{value:n},e.children)}}}]); \ No newline at end of file diff --git a/assets/js/d05e838c.90f934d6.js b/assets/js/d05e838c.e388e58d.js similarity index 98% rename from assets/js/d05e838c.90f934d6.js rename to assets/js/d05e838c.e388e58d.js index e1aaee7..64aa5ce 100644 --- a/assets/js/d05e838c.90f934d6.js +++ b/assets/js/d05e838c.e388e58d.js @@ -1 +1 @@ -"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[6544],{3004:(e,n,r)=>{r.r(n),r.d(n,{assets:()=>d,contentTitle:()=>c,default:()=>a,frontMatter:()=>i,metadata:()=>o,toc:()=>l});var s=r(5893),t=r(1151);const i={id:"seminar-05-06",title:"5th and 6th seminar",description:"200IQ encryption.\n"},c=void 0,o={id:"bonuses/seminar-05-06",title:"5th and 6th seminar",description:"200IQ encryption.\n",source:"@site/c/bonuses/05-06.md",sourceDirName:"bonuses",slug:"/bonuses/seminar-05-06",permalink:"/c/bonuses/seminar-05-06",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/c/bonuses/05-06.md",tags:[],version:"current",lastUpdatedAt:1700847079,formattedLastUpdatedAt:"Nov 24, 2023",frontMatter:{id:"seminar-05-06",title:"5th and 6th seminar",description:"200IQ encryption.\n"},sidebar:"autogeneratedBar",previous:{title:"4th seminar",permalink:"/c/bonuses/seminar-04"},next:{title:"8th seminar",permalink:"/c/bonuses/seminar-08"}},d={},l=[{value:"Introduction",id:"introduction",level:2},{value:"Task no. 1: Reverse (0.5 K\u20a1)",id:"task-no-1-reverse-05-k",level:3},{value:"Task no. 2: Vigen\xe8re (0.5 K\u20a1)",id:"task-no-2-vigen\xe8re-05-k",level:3},{value:"Bonus part (0.5 K\u20a1)",id:"bonus-part-05-k",level:4},{value:"Task no. 3: Bit madness (0.5 K\u20a1)",id:"task-no-3-bit-madness-05-k",level:3},{value:"Task no. 4: All combined to BMP (0.5 K\u20a1)",id:"task-no-4-all-combined-to-bmp-05-k",level:3},{value:"Submitting",id:"submitting",level:2}];function h(e){const n={a:"a",code:"code",h2:"h2",h3:"h3",h4:"h4",hr:"hr",li:"li",ol:"ol",p:"p",pre:"pre",ul:"ul",...(0,t.a)(),...e.components};return(0,s.jsxs)(s.Fragment,{children:[(0,s.jsx)(n.p,{children:"For this bonus you can get at maximum 2.5 K\u20a1."}),"\n",(0,s.jsx)(n.p,{children:(0,s.jsx)(n.a,{href:"pathname:///files/c/bonuses/05-06.tar.gz",children:"Source"})}),"\n",(0,s.jsx)(n.h2,{id:"introduction",children:"Introduction"}),"\n",(0,s.jsx)(n.p,{children:"In this bonus you will implement few functions that will be used together for\nimplementing a very special cipher."}),"\n",(0,s.jsx)(n.h3,{id:"task-no-1-reverse-05-k",children:"Task no. 1: Reverse (0.5 K\u20a1)"}),"\n",(0,s.jsxs)(n.p,{children:["Write a function ",(0,s.jsx)(n.code,{children:"char* reverse(const char* text)"})," that returns copy of the input\nstring in reversed order (also uppercase)."]}),"\n",(0,s.jsxs)(n.p,{children:["In case you are given ",(0,s.jsx)(n.code,{children:"NULL"}),", return ",(0,s.jsx)(n.code,{children:"NULL"}),"."]}),"\n",(0,s.jsx)(n.p,{children:"Example (more in tests):"}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{className:"language-c",children:'char* reversed = reverse("Hello world!");\n\nprintf("%s\\n", reversed);\n// "!DLROW OLLEH"\n\nif (reversed != NULL) {\n free(reversed);\n}\n'})}),"\n",(0,s.jsx)(n.h3,{id:"task-no-2-vigen\xe8re-05-k",children:"Task no. 2: Vigen\xe8re (0.5 K\u20a1)"}),"\n",(0,s.jsx)(n.p,{children:"Vigen\xe8re cipher is similar to the Caesar cipher, but you also have a key that is\nused for encrypting (or decrypting)."}),"\n",(0,s.jsx)(n.p,{children:"Your task is to write two functions:"}),"\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsxs)(n.li,{children:[(0,s.jsx)(n.code,{children:"char* vigenere_encrypt(const char* key, const char* text)"})," for encrypting"]}),"\n",(0,s.jsxs)(n.li,{children:[(0,s.jsx)(n.code,{children:"char* vigenere_decrypt(const char* key, const char* text)"})," for decrypting"]}),"\n"]}),"\n",(0,s.jsx)(n.p,{children:"In both of those you should return uppercase characters."}),"\n",(0,s.jsx)(n.p,{children:"Meaning of the parameters you are given:"}),"\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsxs)(n.li,{children:[(0,s.jsx)(n.code,{children:"key"})," - String that represents key that is used for *crypting. It consists of\none word and can have only characters of the alphabet. Does not matter if they\nare uppercase or lowercase."]}),"\n",(0,s.jsxs)(n.li,{children:[(0,s.jsx)(n.code,{children:"text"})," - String that is to be *crypted."]}),"\n"]}),"\n",(0,s.jsxs)(n.p,{children:["Function returns address of the encrypted (or decrypted) string. Or ",(0,s.jsx)(n.code,{children:"NULL"})," in case\nerror occurs."]}),"\n",(0,s.jsx)(n.p,{children:"Example:"}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{className:"language-c",children:'char *encrypted = vigenere_encrypt("CoMPuTeR", "Hello world!");\n\nprintf("%s\\n", encrypted);\n// "JSXAI PSINR!"\n\nif (encrypted != NULL) {\n free(encrypted)\n}\n'})}),"\n",(0,s.jsx)(n.h4,{id:"bonus-part-05-k",children:"Bonus part (0.5 K\u20a1)"}),"\n",(0,s.jsx)(n.p,{children:"If you can utilize helper function that would do both encrypting and decrypting,\nyou can gain 0.5 K\u20a1."}),"\n",(0,s.jsxs)(n.p,{children:["Usage of ",(0,s.jsx)(n.code,{children:"true"}),"/",(0,s.jsx)(n.code,{children:"false"})," to decide path in code is prohibited. It leads to merging\nof both functions into one. Point of this part is to discover a way to do this\ngenerically in such way that there are no separate paths for one or the other. One\nfunction with no branching for both of them, parametrization is your friend :)"]}),"\n",(0,s.jsx)(n.h3,{id:"task-no-3-bit-madness-05-k",children:"Task no. 3: Bit madness (0.5 K\u20a1)"}),"\n",(0,s.jsx)(n.p,{children:"This is a state of the art crypto. Please do not share :)"}),"\n",(0,s.jsx)(n.p,{children:"For encrypting:"}),"\n",(0,s.jsxs)(n.ol,{children:["\n",(0,s.jsx)(n.li,{children:"Split the character that is to be encrypted in halves (4 and 4 bits each)."}),"\n",(0,s.jsx)(n.li,{children:"Bits in 1st half are to be split into pairs. Swap bits in those pairs."}),"\n",(0,s.jsxs)(n.li,{children:["Then use the 4 bits that you created in the 2nd step for ",(0,s.jsx)(n.code,{children:"XOR"})," with the other\n4 bits."]}),"\n"]}),"\n",(0,s.jsxs)(n.p,{children:["This simple and ingenious principle will be illustrated on the following example.\nString we want to encrypt is ",(0,s.jsx)(n.code,{children:"Hello world!"}),". We need to encrypt each letter separately,\nso we will demonstrate on letter ",(0,s.jsx)(n.code,{children:"H"}),":"]}),"\n",(0,s.jsxs)(n.ol,{children:["\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:["Letter ",(0,s.jsx)(n.code,{children:"H"})," is represented in ASCII as ",(0,s.jsx)(n.code,{children:"72"}),"."]}),"\n",(0,s.jsxs)(n.p,{children:[(0,s.jsx)(n.code,{children:"72"})," represented in binary is: ",(0,s.jsx)(n.code,{children:"01001000"}),". So first 4 bits are: ",(0,s.jsx)(n.code,{children:"0100"})," and last\n4 bits are ",(0,s.jsx)(n.code,{children:"1000"}),"."]}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:["First half of bits (",(0,s.jsx)(n.code,{children:"0100"}),") consists of 2 pairs (",(0,s.jsx)(n.code,{children:"01"})," and ",(0,s.jsx)(n.code,{children:"00"}),") which we swap\n(",(0,s.jsx)(n.code,{children:"01 ~> 10"})," and ",(0,s.jsx)(n.code,{children:"00 ~> 00"}),"). That way we get ",(0,s.jsx)(n.code,{children:"1000"}),"."]}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsx)(n.p,{children:"That half is used for xor with the other 4 bits:"}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{children:" 1000 // second half\nXOR 1000 // first half after 2nd step\n--------\n 0000\n"})}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:["Now we combine both halves (first one is ",(0,s.jsx)(n.code,{children:"1000"}),", which we got from the 2nd step\nand second one is ",(0,s.jsx)(n.code,{children:"0000"}),", which we got from the 3rd step) and get ",(0,s.jsx)(n.code,{children:"10000000"}),",\nwhich is encrypted character ",(0,s.jsx)(n.code,{children:"H"})," using this method."]}),"\n"]}),"\n"]}),"\n",(0,s.jsx)(n.p,{children:"In case of decryption, reverse those steps."}),"\n",(0,s.jsx)(n.p,{children:"Your task is to implement functions:"}),"\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.code,{children:"unsigned char* bit_encrypt(const char* text)"})}),"\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.code,{children:"char* bit_decrypt(const unsigned char* text)"})}),"\n"]}),"\n",(0,s.jsx)(n.p,{children:"Example:"}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{className:"language-c",children:'unsigned char* encrypted = bit_encrypt("Hello world!");\n\nfor (int i = 0; i < 12;i++) {\n printf("%x ", encrypted[i]);\n //80 9c 95 95 96 11 bc 96 b9 95 9d 10\n}\n\nif (encrypted != NULL) {\n free(encrypted);\n}\n'})}),"\n",(0,s.jsx)(n.h3,{id:"task-no-4-all-combined-to-bmp-05-k",children:"Task no. 4: All combined to BMP (0.5 K\u20a1)"}),"\n",(0,s.jsx)(n.p,{children:"Authors of the BMP cipher are non-disclosed :)"}),"\n",(0,s.jsx)(n.p,{children:"Create pair of functions:"}),"\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.code,{children:"unsigned char* bmp_encrypt(const char* key, const char* text)"})}),"\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.code,{children:"char* bmp_decrypt(const char* key, const unsigned char* text)"})}),"\n"]}),"\n",(0,s.jsx)(n.p,{children:"BMP cipher consists of following steps for encrypting:"}),"\n",(0,s.jsxs)(n.ol,{children:["\n",(0,s.jsx)(n.li,{children:"Reverse the input string"}),"\n",(0,s.jsx)(n.li,{children:"Use Vigenere on the string you got from step #1"}),"\n",(0,s.jsx)(n.li,{children:"Use bit madness on the string you got from step #2"}),"\n"]}),"\n",(0,s.jsx)(n.p,{children:"For decrypting, reverse the steps."}),"\n",(0,s.jsx)(n.h2,{id:"submitting",children:"Submitting"}),"\n",(0,s.jsx)(n.p,{children:"In case you have any questions, feel free to reach out to me."}),"\n",(0,s.jsx)(n.hr,{})]})}function a(e={}){const{wrapper:n}={...(0,t.a)(),...e.components};return n?(0,s.jsx)(n,{...e,children:(0,s.jsx)(h,{...e})}):h(e)}},1151:(e,n,r)=>{r.d(n,{Z:()=>o,a:()=>c});var s=r(7294);const t={},i=s.createContext(t);function c(e){const n=s.useContext(i);return s.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function o(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(t):e.components||t:c(e.components),s.createElement(i.Provider,{value:n},e.children)}}}]); \ No newline at end of file +"use strict";(self.webpackChunkfi=self.webpackChunkfi||[]).push([[6544],{3004:(e,n,r)=>{r.r(n),r.d(n,{assets:()=>d,contentTitle:()=>c,default:()=>a,frontMatter:()=>i,metadata:()=>o,toc:()=>l});var s=r(5893),t=r(1151);const i={id:"seminar-05-06",title:"5th and 6th seminar",description:"200IQ encryption.\n"},c=void 0,o={id:"bonuses/seminar-05-06",title:"5th and 6th seminar",description:"200IQ encryption.\n",source:"@site/c/bonuses/05-06.md",sourceDirName:"bonuses",slug:"/bonuses/seminar-05-06",permalink:"/c/bonuses/seminar-05-06",draft:!1,unlisted:!1,editUrl:"https://github.com/mfocko/blog/tree/main/c/bonuses/05-06.md",tags:[],version:"current",lastUpdatedAt:1700944879,formattedLastUpdatedAt:"Nov 25, 2023",frontMatter:{id:"seminar-05-06",title:"5th and 6th seminar",description:"200IQ encryption.\n"},sidebar:"autogeneratedBar",previous:{title:"4th seminar",permalink:"/c/bonuses/seminar-04"},next:{title:"8th seminar",permalink:"/c/bonuses/seminar-08"}},d={},l=[{value:"Introduction",id:"introduction",level:2},{value:"Task no. 1: Reverse (0.5 K\u20a1)",id:"task-no-1-reverse-05-k",level:3},{value:"Task no. 2: Vigen\xe8re (0.5 K\u20a1)",id:"task-no-2-vigen\xe8re-05-k",level:3},{value:"Bonus part (0.5 K\u20a1)",id:"bonus-part-05-k",level:4},{value:"Task no. 3: Bit madness (0.5 K\u20a1)",id:"task-no-3-bit-madness-05-k",level:3},{value:"Task no. 4: All combined to BMP (0.5 K\u20a1)",id:"task-no-4-all-combined-to-bmp-05-k",level:3},{value:"Submitting",id:"submitting",level:2}];function h(e){const n={a:"a",code:"code",h2:"h2",h3:"h3",h4:"h4",hr:"hr",li:"li",ol:"ol",p:"p",pre:"pre",ul:"ul",...(0,t.a)(),...e.components};return(0,s.jsxs)(s.Fragment,{children:[(0,s.jsx)(n.p,{children:"For this bonus you can get at maximum 2.5 K\u20a1."}),"\n",(0,s.jsx)(n.p,{children:(0,s.jsx)(n.a,{href:"pathname:///files/c/bonuses/05-06.tar.gz",children:"Source"})}),"\n",(0,s.jsx)(n.h2,{id:"introduction",children:"Introduction"}),"\n",(0,s.jsx)(n.p,{children:"In this bonus you will implement few functions that will be used together for\nimplementing a very special cipher."}),"\n",(0,s.jsx)(n.h3,{id:"task-no-1-reverse-05-k",children:"Task no. 1: Reverse (0.5 K\u20a1)"}),"\n",(0,s.jsxs)(n.p,{children:["Write a function ",(0,s.jsx)(n.code,{children:"char* reverse(const char* text)"})," that returns copy of the input\nstring in reversed order (also uppercase)."]}),"\n",(0,s.jsxs)(n.p,{children:["In case you are given ",(0,s.jsx)(n.code,{children:"NULL"}),", return ",(0,s.jsx)(n.code,{children:"NULL"}),"."]}),"\n",(0,s.jsx)(n.p,{children:"Example (more in tests):"}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{className:"language-c",children:'char* reversed = reverse("Hello world!");\n\nprintf("%s\\n", reversed);\n// "!DLROW OLLEH"\n\nif (reversed != NULL) {\n free(reversed);\n}\n'})}),"\n",(0,s.jsx)(n.h3,{id:"task-no-2-vigen\xe8re-05-k",children:"Task no. 2: Vigen\xe8re (0.5 K\u20a1)"}),"\n",(0,s.jsx)(n.p,{children:"Vigen\xe8re cipher is similar to the Caesar cipher, but you also have a key that is\nused for encrypting (or decrypting)."}),"\n",(0,s.jsx)(n.p,{children:"Your task is to write two functions:"}),"\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsxs)(n.li,{children:[(0,s.jsx)(n.code,{children:"char* vigenere_encrypt(const char* key, const char* text)"})," for encrypting"]}),"\n",(0,s.jsxs)(n.li,{children:[(0,s.jsx)(n.code,{children:"char* vigenere_decrypt(const char* key, const char* text)"})," for decrypting"]}),"\n"]}),"\n",(0,s.jsx)(n.p,{children:"In both of those you should return uppercase characters."}),"\n",(0,s.jsx)(n.p,{children:"Meaning of the parameters you are given:"}),"\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsxs)(n.li,{children:[(0,s.jsx)(n.code,{children:"key"})," - String that represents key that is used for *crypting. It consists of\none word and can have only characters of the alphabet. Does not matter if they\nare uppercase or lowercase."]}),"\n",(0,s.jsxs)(n.li,{children:[(0,s.jsx)(n.code,{children:"text"})," - String that is to be *crypted."]}),"\n"]}),"\n",(0,s.jsxs)(n.p,{children:["Function returns address of the encrypted (or decrypted) string. Or ",(0,s.jsx)(n.code,{children:"NULL"})," in case\nerror occurs."]}),"\n",(0,s.jsx)(n.p,{children:"Example:"}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{className:"language-c",children:'char *encrypted = vigenere_encrypt("CoMPuTeR", "Hello world!");\n\nprintf("%s\\n", encrypted);\n// "JSXAI PSINR!"\n\nif (encrypted != NULL) {\n free(encrypted)\n}\n'})}),"\n",(0,s.jsx)(n.h4,{id:"bonus-part-05-k",children:"Bonus part (0.5 K\u20a1)"}),"\n",(0,s.jsx)(n.p,{children:"If you can utilize helper function that would do both encrypting and decrypting,\nyou can gain 0.5 K\u20a1."}),"\n",(0,s.jsxs)(n.p,{children:["Usage of ",(0,s.jsx)(n.code,{children:"true"}),"/",(0,s.jsx)(n.code,{children:"false"})," to decide path in code is prohibited. It leads to merging\nof both functions into one. Point of this part is to discover a way to do this\ngenerically in such way that there are no separate paths for one or the other. One\nfunction with no branching for both of them, parametrization is your friend :)"]}),"\n",(0,s.jsx)(n.h3,{id:"task-no-3-bit-madness-05-k",children:"Task no. 3: Bit madness (0.5 K\u20a1)"}),"\n",(0,s.jsx)(n.p,{children:"This is a state of the art crypto. Please do not share :)"}),"\n",(0,s.jsx)(n.p,{children:"For encrypting:"}),"\n",(0,s.jsxs)(n.ol,{children:["\n",(0,s.jsx)(n.li,{children:"Split the character that is to be encrypted in halves (4 and 4 bits each)."}),"\n",(0,s.jsx)(n.li,{children:"Bits in 1st half are to be split into pairs. Swap bits in those pairs."}),"\n",(0,s.jsxs)(n.li,{children:["Then use the 4 bits that you created in the 2nd step for ",(0,s.jsx)(n.code,{children:"XOR"})," with the other\n4 bits."]}),"\n"]}),"\n",(0,s.jsxs)(n.p,{children:["This simple and ingenious principle will be illustrated on the following example.\nString we want to encrypt is ",(0,s.jsx)(n.code,{children:"Hello world!"}),". We need to encrypt each letter separately,\nso we will demonstrate on letter ",(0,s.jsx)(n.code,{children:"H"}),":"]}),"\n",(0,s.jsxs)(n.ol,{children:["\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:["Letter ",(0,s.jsx)(n.code,{children:"H"})," is represented in ASCII as ",(0,s.jsx)(n.code,{children:"72"}),"."]}),"\n",(0,s.jsxs)(n.p,{children:[(0,s.jsx)(n.code,{children:"72"})," represented in binary is: ",(0,s.jsx)(n.code,{children:"01001000"}),". So first 4 bits are: ",(0,s.jsx)(n.code,{children:"0100"})," and last\n4 bits are ",(0,s.jsx)(n.code,{children:"1000"}),"."]}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:["First half of bits (",(0,s.jsx)(n.code,{children:"0100"}),") consists of 2 pairs (",(0,s.jsx)(n.code,{children:"01"})," and ",(0,s.jsx)(n.code,{children:"00"}),") which we swap\n(",(0,s.jsx)(n.code,{children:"01 ~> 10"})," and ",(0,s.jsx)(n.code,{children:"00 ~> 00"}),"). That way we get ",(0,s.jsx)(n.code,{children:"1000"}),"."]}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsx)(n.p,{children:"That half is used for xor with the other 4 bits:"}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{children:" 1000 // second half\nXOR 1000 // first half after 2nd step\n--------\n 0000\n"})}),"\n"]}),"\n",(0,s.jsxs)(n.li,{children:["\n",(0,s.jsxs)(n.p,{children:["Now we combine both halves (first one is ",(0,s.jsx)(n.code,{children:"1000"}),", which we got from the 2nd step\nand second one is ",(0,s.jsx)(n.code,{children:"0000"}),", which we got from the 3rd step) and get ",(0,s.jsx)(n.code,{children:"10000000"}),",\nwhich is encrypted character ",(0,s.jsx)(n.code,{children:"H"})," using this method."]}),"\n"]}),"\n"]}),"\n",(0,s.jsx)(n.p,{children:"In case of decryption, reverse those steps."}),"\n",(0,s.jsx)(n.p,{children:"Your task is to implement functions:"}),"\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.code,{children:"unsigned char* bit_encrypt(const char* text)"})}),"\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.code,{children:"char* bit_decrypt(const unsigned char* text)"})}),"\n"]}),"\n",(0,s.jsx)(n.p,{children:"Example:"}),"\n",(0,s.jsx)(n.pre,{children:(0,s.jsx)(n.code,{className:"language-c",children:'unsigned char* encrypted = bit_encrypt("Hello world!");\n\nfor (int i = 0; i < 12;i++) {\n printf("%x ", encrypted[i]);\n //80 9c 95 95 96 11 bc 96 b9 95 9d 10\n}\n\nif (encrypted != NULL) {\n free(encrypted);\n}\n'})}),"\n",(0,s.jsx)(n.h3,{id:"task-no-4-all-combined-to-bmp-05-k",children:"Task no. 4: All combined to BMP (0.5 K\u20a1)"}),"\n",(0,s.jsx)(n.p,{children:"Authors of the BMP cipher are non-disclosed :)"}),"\n",(0,s.jsx)(n.p,{children:"Create pair of functions:"}),"\n",(0,s.jsxs)(n.ul,{children:["\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.code,{children:"unsigned char* bmp_encrypt(const char* key, const char* text)"})}),"\n",(0,s.jsx)(n.li,{children:(0,s.jsx)(n.code,{children:"char* bmp_decrypt(const char* key, const unsigned char* text)"})}),"\n"]}),"\n",(0,s.jsx)(n.p,{children:"BMP cipher consists of following steps for encrypting:"}),"\n",(0,s.jsxs)(n.ol,{children:["\n",(0,s.jsx)(n.li,{children:"Reverse the input string"}),"\n",(0,s.jsx)(n.li,{children:"Use Vigenere on the string you got from step #1"}),"\n",(0,s.jsx)(n.li,{children:"Use bit madness on the string you got from step #2"}),"\n"]}),"\n",(0,s.jsx)(n.p,{children:"For decrypting, reverse the steps."}),"\n",(0,s.jsx)(n.h2,{id:"submitting",children:"Submitting"}),"\n",(0,s.jsx)(n.p,{children:"In case you have any questions, feel free to reach out to me."}),"\n",(0,s.jsx)(n.hr,{})]})}function a(e={}){const{wrapper:n}={...(0,t.a)(),...e.components};return n?(0,s.jsx)(n,{...e,children:(0,s.jsx)(h,{...e})}):h(e)}},1151:(e,n,r)=>{r.d(n,{Z:()=>o,a:()=>c});var s=r(7294);const t={},i=s.createContext(t);function c(e){const n=s.useContext(i);return s.useMemo((function(){return"function"==typeof e?e(n):{...n,...e}}),[n,e])}function o(e){let n;return n=e.disableParentContext?"function"==typeof e.components?e.components(t):e.components||t:c(e.components),s.createElement(i.Provider,{value:n},e.children)}}}]); \ No newline at end of file diff --git a/assets/js/main.1ff5de87.js b/assets/js/main.b71f9097.js similarity index 74% rename from assets/js/main.1ff5de87.js rename to assets/js/main.b71f9097.js index 8a1546f..dedd8cc 100644 --- a/assets/js/main.1ff5de87.js +++ b/assets/js/main.b71f9097.js @@ -1,2 +1,2 @@ -/*! For license information please see main.1ff5de87.js.LICENSE.txt */ -(self.webpackChunkfi=self.webpackChunkfi||[]).push([[179],{830:(e,t,n)=>{"use strict";n.d(t,{W:()=>a});var r=n(7294);function a(){return r.createElement("svg",{width:"20",height:"20",className:"DocSearch-Search-Icon",viewBox:"0 0 20 20"},r.createElement("path",{d:"M14.386 14.386l4.0877 4.0877-4.0877-4.0877c-2.9418 2.9419-7.7115 2.9419-10.6533 0-2.9419-2.9418-2.9419-7.7115 0-10.6533 2.9418-2.9419 7.7115-2.9419 10.6533 0 2.9419 2.9418 2.9419 7.7115 0 10.6533z",stroke:"currentColor",fill:"none",fillRule:"evenodd",strokeLinecap:"round",strokeLinejoin:"round"}))}},723:(e,t,n)=>{"use strict";n.d(t,{Z:()=>p});n(7294);var r=n(8356),a=n.n(r),o=n(6887);const i={"0123bc76":[()=>n.e(3734).then(n.t.bind(n,6554,19)),"~docs/algorithms/tag-algorithms-tags-c-e22.json",6554],"0178f9ad":[()=>n.e(9898).then(n.bind(n,5610)),"@site/algorithms/08-rb-trees/2022-04-05-applications.md",5610],"01a85c17":[()=>Promise.all([n.e(532),n.e(4013)]).then(n.bind(n,4524)),"@theme/BlogTagsListPage",4524],"0220f5fc":[()=>n.e(1378).then(n.t.bind(n,5804,19)),"/home/runner/work/blog/blog/.docusaurus/docusaurus-plugin-content-blog/blog/plugin-route-context-module-100.json",5804],"0608d96f":[()=>n.e(7568).then(n.t.bind(n,7158,19)),"~blog/blog/blog-tags-vps-843-list.json",7158],"06c4a8fc":[()=>n.e(2125).then(n.t.bind(n,4697,19)),"~docs/algorithms/tag-algorithms-tags-testing-0c4.json",4697],"0bfe45d5":[()=>n.e(4269).then(n.t.bind(n,3847,19)),"~blog/blog/blog-tags-rust-0c9-list.json",3847],"0fcbc6ca":[()=>Promise.all([n.e(532),n.e(1851)]).then(n.bind(n,9900)),"@site/src/pages/talks.tsx",9900],"146d9b84":[()=>n.e(9300).then(n.t.bind(n,6671,19)),"~blog/blog/blog-tags-admin-b05-list.json",6671],"14eb3368":[()=>Promise.all([n.e(532),n.e(9817)]).then(n.bind(n,4228)),"@theme/DocCategoryGeneratedIndexPage",4228],"1535ede8":[()=>n.e(5376).then(n.bind(n,4969)),"@site/c/bonuses/10.md",4969],"16cbc838":[()=>n.e(1494).then(n.t.bind(n,8252,19)),"~docs/algorithms/tag-algorithms-tags-iterative-d5b.json",8252],17896441:[()=>Promise.all([n.e(532),n.e(1325),n.e(7918)]).then(n.bind(n,5154)),"@theme/DocItem",5154],"19d7c045":[()=>n.e(4637).then(n.t.bind(n,7772,19)),"~blog/blog/blog-tags-advent-of-code-49f.json",7772],"1a4e3797":[()=>Promise.all([n.e(532),n.e(7920)]).then(n.bind(n,9172)),"@theme/SearchPage",9172],"1a606400":[()=>n.e(494).then(n.t.bind(n,2400,19)),"/home/runner/work/blog/blog/.docusaurus/docusaurus-plugin-content-docs/algorithms/plugin-route-context-module-100.json",2400],"1acf65cc":[()=>n.e(8529).then(n.bind(n,4568)),"@site/c/pexam/b-garbage_collect.md",4568],"22a175ec":[()=>Promise.all([n.e(532),n.e(6890)]).then(n.bind(n,707)),"@site/src/pages/contributions.tsx",707],"24fecc0a":[()=>n.e(3707).then(n.bind(n,9383)),"@site/algorithms/03-time-complexity/2021-03-31-extend.md",9383],"28d80ff8":[()=>n.e(6435).then(n.t.bind(n,7465,19)),"~docs/algorithms/tag-algorithms-tags-sorting-d73.json",7465],29694455:[()=>n.e(3388).then(n.t.bind(n,9828,19)),"~blog/blog/blog-tags-iterators-977-list.json",9828],"2b89902a":[()=>n.e(6342).then(n.t.bind(n,5443,19)),"~docs/algorithms/tag-algorithms-tags-recursion-1bd.json",5443],"3011a4c0":[()=>n.e(7926).then(n.t.bind(n,1670,19)),"~blog/blog/blog-tags-copr-70b-list.json",1670],"34ab65f4":[()=>n.e(3220).then(n.t.bind(n,8865,19)),"~docs/algorithms/tag-algorithms-tags-postconditions-1f3.json",8865],"354a7b72":[()=>n.e(9414).then(n.bind(n,6617)),"@site/algorithms/10-graphs/2022-04-30-bfs-tree.md",6617],"3720c009":[()=>Promise.all([n.e(532),n.e(3751)]).then(n.bind(n,727)),"@theme/DocTagsListPage",727],"377f3aa1":[()=>n.e(1011).then(n.bind(n,7582)),"@site/blog/aoc-2022/02-week-2.md",7582],"3da4b779":[()=>n.e(2177).then(n.bind(n,8737)),"@site/blog/aoc-2022/04-week-4.md",8737],"4200b1a9":[()=>n.e(866).then(n.t.bind(n,4612,19)),"~blog/blog/blog-archive-80c.json",4612],"45c9e308":[()=>n.e(7084).then(n.bind(n,3181)),"@site/cpp/07-exceptions-and-raii/2023-11-24-placeholders.md",3181],"4621632b":[()=>n.e(3519).then(n.t.bind(n,9760,19)),"~blog/blog/blog-tags-cpp-7c7-list.json",9760],"48b268a6":[()=>n.e(1648).then(n.t.bind(n,5067,19)),"~docs/c/category-c-autogeneratedbar-category-bonuses-216.json",5067],"4e546705":[()=>n.e(4327).then(n.t.bind(n,1795,19)),"~docs/c/version-current-metadata-prop-751.json",1795],"4edd2021":[()=>n.e(5975).then(n.t.bind(n,1705,19)),"~blog/blog/blog-tags-cpp-7c7.json",1705],"4f96b16e":[()=>n.e(6306).then(n.bind(n,4693)),"@site/c/pexam/c-cams.md",4693],51624505:[()=>n.e(3731).then(n.bind(n,2609)),"@site/blog/aoc-2022/00-intro.md",2609],"52f2a5bf":[()=>n.e(5430).then(n.t.bind(n,1387,19)),"~blog/blog/blog-tags-red-hat-df4.json",1387],"534d4833":[()=>n.e(9771).then(n.bind(n,3019)),"@site/algorithms/02-algorithms-correctness/2021-03-18-postcondition-ambiguity.md",3019],"595c7293":[()=>n.e(5634).then(n.bind(n,8396)),"@site/c/bonuses/08.md",8396],"5ca803d2":[()=>n.e(9173).then(n.t.bind(n,4890,19)),"/home/runner/work/blog/blog/.docusaurus/docusaurus-plugin-content-docs/c/plugin-route-context-module-100.json",4890],"5e95c892":[()=>n.e(9661).then(n.bind(n,1892)),"@theme/DocsRoot",1892],"5e9f5e1a":[()=>Promise.resolve().then(n.bind(n,6809)),"@generated/docusaurus.config",6809],"5fe5d476":[()=>n.e(2619).then(n.bind(n,4457)),"@site/algorithms/04-recursion/2023-08-17-pyramid-slide-down.md",4457],"62d847b3":[()=>n.e(8520).then(n.t.bind(n,1901,19)),"~blog/blog/blog-tags-advent-of-code-2022-3db-list.json",1901],"66d5ef6c":[()=>n.e(9228).then(n.t.bind(n,4087,19)),"~blog/blog/blog-tags-tags-4c2.json",4087],"686a7a89":[()=>n.e(728).then(n.t.bind(n,7507,19)),"~docs/algorithms/tag-algorithms-tags-graphs-31d.json",7507],"6875c492":[()=>Promise.all([n.e(532),n.e(1325),n.e(130),n.e(8610)]).then(n.bind(n,1714)),"@theme/BlogTagsPostsPage",1714],"6bc697d0":[()=>n.e(5287).then(n.t.bind(n,8529,19)),"/home/runner/work/blog/blog/.docusaurus/docusaurus-plugin-content-docs/cpp/plugin-route-context-module-100.json",8529],"6e3cbca1":[()=>n.e(3276).then(n.t.bind(n,9538,19)),"~docs/algorithms/version-current-metadata-prop-751.json",9538],"7052c0bc":[()=>n.e(9731).then(n.bind(n,2286)),"@site/cpp/00-intro.md",2286],"75cccf44":[()=>n.e(4256).then(n.bind(n,8215)),"@site/blog/leetcode/sort-matrix-diagonally.md?truncated=true",8215],"765ea78b":[()=>n.e(3039).then(n.t.bind(n,3010,19)),"~blog/blog/blog-tags-\ud83c\udfed-551.json",3010],"794ef108":[()=>n.e(3803).then(n.bind(n,6427)),"@site/c/00-intro.md",6427],"7e6d325b":[()=>n.e(3184).then(n.t.bind(n,6139,19)),"~docs/cpp/version-current-metadata-prop-751.json",6139],"84d1e0d8":[()=>n.e(1885).then(n.bind(n,9713)),"@site/algorithms/00-intro.md",9713],"86cd1460":[()=>n.e(1235).then(n.t.bind(n,8968,19)),"~blog/blog/blog-tags-leetcode-042.json",8968],"8b1802c5":[()=>n.e(8480).then(n.t.bind(n,832,19)),"~blog/blog/blog-tags-advent-of-code-49f-list.json",832],"8c0e532b":[()=>n.e(822).then(n.t.bind(n,3968,19)),"~blog/blog/blog-tags-vps-843.json",3968],"8d31a880":[()=>n.e(9066).then(n.t.bind(n,2232,19)),"~docs/algorithms/tag-algorithms-tags-python-48f.json",2232],"8e6bb954":[()=>n.e(5775).then(n.t.bind(n,6206,19)),"~docs/algorithms/tag-algorithms-tags-exponential-60a.json",6206],"9287eafd":[()=>n.e(5521).then(n.t.bind(n,716,19)),"~blog/blog/blog-tags-rust-0c9.json",716],"933b95b3":[()=>n.e(3887).then(n.t.bind(n,7405,19)),"~docs/algorithms/category-algorithms-autogeneratedbar-category-recursion-257.json",7405],"947341b7":[()=>n.e(1145).then(n.t.bind(n,2897,19)),"~docs/algorithms/tag-algorithms-tags-bfs-69f.json",2897],"95b96bb9":[()=>n.e(3561).then(n.t.bind(n,4577,19)),"~blog/blog/blog-post-list-prop-blog.json",4577],"95f41f0b":[()=>n.e(9385).then(n.bind(n,3195)),"@site/blog/aoc-2022/01-week-1.md?truncated=true",3195],"962da50c":[()=>n.e(2264).then(n.t.bind(n,9705,19)),"~docs/c/category-c-autogeneratedbar-category-practice-exams-e97.json",9705],"976c4f3b":[()=>n.e(4562).then(n.t.bind(n,9019,19)),"~docs/algorithms/tag-algorithms-tags-java-6c3.json",9019],"97a42631":[()=>n.e(1464).then(n.t.bind(n,7343,19)),"~docs/algorithms/tags-list-current-prop-15a.json",7343],"9a3dc578":[()=>n.e(655).then(n.t.bind(n,9916,19)),"~docs/algorithms/tag-algorithms-tags-dynamic-array-5d3.json",9916],"9df0e937":[()=>n.e(2210).then(n.t.bind(n,5256,19)),"~docs/algorithms/category-algorithms-autogeneratedbar-category-graphs-2e2.json",5256],"9e4087bc":[()=>n.e(3608).then(n.bind(n,3169)),"@theme/BlogArchivePage",3169],a082abd3:[()=>n.e(8786).then(n.t.bind(n,3276,19)),"~blog/blog/blog-tags-admin-b05.json",3276],a4c10cf4:[()=>n.e(4382).then(n.t.bind(n,685,19)),"~docs/algorithms/tag-algorithms-tags-time-complexity-c50.json",685],a6a48ea2:[()=>n.e(3618).then(n.bind(n,1176)),"@site/blog/aoc-2022/02-week-2.md?truncated=true",1176],a6aa9e1f:[()=>Promise.all([n.e(532),n.e(1325),n.e(130),n.e(3089)]).then(n.bind(n,46)),"@theme/BlogListPage",46],a7098721:[()=>n.e(1050).then(n.t.bind(n,6615,19)),"~blog/blog/blog-c06.json",6615],a7bd4aaa:[()=>n.e(8518).then(n.bind(n,8564)),"@theme/DocVersionRoot",8564],a80747a0:[()=>n.e(5824).then(n.t.bind(n,4464,19)),"~blog/blog/blog-tags-advent-of-code-2022-3db.json",4464],a94703ab:[()=>Promise.all([n.e(532),n.e(4368)]).then(n.bind(n,2674)),"@theme/DocRoot",2674],ab2721d4:[()=>n.e(7755).then(n.bind(n,3037)),"@site/blog/aoc-2022/04-week-4.md?truncated=true",3037],af8b72a7:[()=>n.e(5658).then(n.bind(n,507)),"@site/blog/2023-08-02-copr.md?truncated=true",507],b0291f37:[()=>n.e(6097).then(n.t.bind(n,7085,19)),"/home/runner/work/blog/blog/.docusaurus/docusaurus-theme-search-algolia/default/plugin-route-context-module-100.json",7085],b1288602:[()=>n.e(59).then(n.bind(n,1456)),"@site/c/mr.md",1456],b25fbc58:[()=>n.e(9197).then(n.t.bind(n,5617,19)),"~blog/blog/blog-tags-\ud83c\udfed-551-list.json",5617],b45dccf0:[()=>n.e(9679).then(n.t.bind(n,8296,19)),"~blog/blog/blog-tags-copr-70b.json",8296],b5a32f14:[()=>n.e(2433).then(n.bind(n,1976)),"@site/blog/2023-08-02-copr.md",1976],b8cbf382:[()=>n.e(7438).then(n.t.bind(n,4632,19)),"~docs/algorithms/tag-algorithms-tags-greedy-02f.json",4632],b9f7f5c4:[()=>n.e(9179).then(n.bind(n,6699)),"@site/cpp/environment.md",6699],bb882650:[()=>n.e(8091).then(n.bind(n,6765)),"@site/blog/aoc-2022/03-week-3.md?truncated=true",6765],bb984793:[()=>n.e(6864).then(n.t.bind(n,2505,19)),"~docs/algorithms/tag-algorithms-tags-karel-df7.json",2505],bc0c9d90:[()=>n.e(354).then(n.bind(n,476)),"@site/c/bonuses/04.md",476],bc2d22bc:[()=>n.e(6519).then(n.t.bind(n,428,19)),"~docs/algorithms/tag-algorithms-tags-bottom-up-dp-4f9.json",428],c4f5d8e4:[()=>Promise.all([n.e(532),n.e(4195)]).then(n.bind(n,3261)),"@site/src/pages/index.js",3261],c580b66a:[()=>n.e(6573).then(n.t.bind(n,5021,19)),"~docs/algorithms/tag-algorithms-tags-top-down-dp-c2f.json",5021],ccc49370:[()=>Promise.all([n.e(532),n.e(1325),n.e(130),n.e(6103)]).then(n.bind(n,5203)),"@theme/BlogPostPage",5203],cfa2b263:[()=>n.e(3086).then(n.bind(n,4437)),"@site/blog/leetcode/sort-matrix-diagonally.md",4437],d05e838c:[()=>n.e(6544).then(n.bind(n,3004)),"@site/c/bonuses/05-06.md",3004],d1aceb2e:[()=>n.e(1353).then(n.bind(n,1466)),"@site/algorithms/04-recursion/2022-11-29-karel-1.md",1466],d255bd7f:[()=>n.e(6292).then(n.t.bind(n,341,19)),"~docs/algorithms/tag-algorithms-tags-red-black-trees-c61.json",341],d309b5b1:[()=>n.e(8908).then(n.t.bind(n,6102,19)),"~docs/algorithms/category-algorithms-autogeneratedbar-category-algorithms-and-correctness-d51.json",6102],d4b1e057:[()=>n.e(1492).then(n.t.bind(n,2842,19)),"~docs/algorithms/tag-algorithms-tags-balanced-trees-b3e.json",2842],d57b4369:[()=>n.e(6179).then(n.t.bind(n,2715,19)),"~docs/algorithms/tag-algorithms-tags-csharp-d1d.json",2715],d675395f:[()=>n.e(2741).then(n.t.bind(n,5745,19)),"/home/runner/work/blog/blog/.docusaurus/docusaurus-plugin-content-pages/default/plugin-route-context-module-100.json",5745],d79dd549:[()=>n.e(5169).then(n.t.bind(n,9261,19)),"~blog/blog/blog-tags-red-hat-df4-list.json",9261],d7f7fb17:[()=>n.e(1171).then(n.bind(n,3455)),"@site/blog/aoc-2022/00-intro.md?truncated=true",3455],dd841e73:[()=>n.e(2482).then(n.t.bind(n,155,19)),"~docs/algorithms/tag-algorithms-tags-dynamic-programming-3e6.json",155],ddc7679f:[()=>n.e(569).then(n.bind(n,4322)),"@site/algorithms/10-graphs/2021-05-18-iterative-and-iterators.md",4322],dead8108:[()=>n.e(8807).then(n.bind(n,1431)),"@site/c/bonuses/03.md",1431],decbf9d1:[()=>n.e(2445).then(n.t.bind(n,8876,19)),"~docs/algorithms/category-algorithms-autogeneratedbar-category-asymptotic-notation-and-time-complexity-e0d.json",8876],df0885f0:[()=>n.e(4343).then(n.t.bind(n,4175,19)),"~docs/algorithms/tag-algorithms-tags-iterators-13a.json",4175],df203c0f:[()=>Promise.all([n.e(532),n.e(9924)]).then(n.bind(n,491)),"@theme/DocTagDocListPage",491],dff2ebad:[()=>n.e(146).then(n.bind(n,2492)),"@site/blog/aoc-2022/01-week-1.md",2492],e1d2ae23:[()=>n.e(1475).then(n.t.bind(n,6302,19)),"~docs/algorithms/tag-algorithms-tags-applications-020.json",6302],e31003e9:[()=>n.e(1960).then(n.t.bind(n,1695,19)),"~docs/cpp/category-cpp-autogeneratedbar-category-exceptions-and-raii-6e9.json",1695],e89da83e:[()=>n.e(8757).then(n.t.bind(n,7416,19)),"~blog/blog/blog-tags-leetcode-042-list.json",7416],eba2374c:[()=>n.e(8387).then(n.t.bind(n,7662,19)),"~docs/algorithms/tag-algorithms-tags-backtracking-bb2.json",7662],f48be158:[()=>n.e(4064).then(n.bind(n,2326)),"@site/blog/aoc-2022/03-week-3.md",2326],fb4361d3:[()=>n.e(6327).then(n.t.bind(n,9631,19)),"~docs/algorithms/category-algorithms-autogeneratedbar-category-red-black-trees-d8a.json",9631],ff472cd9:[()=>n.e(8643).then(n.t.bind(n,7122,19)),"~blog/blog/blog-tags-iterators-977.json",7122],ff82dde7:[()=>Promise.all([n.e(532),n.e(8472)]).then(n.bind(n,158)),"@site/algorithms/08-rb-trees/2023-06-10-rules.md",158]};var s=n(5893);function l(e){let{error:t,retry:n,pastDelay:r}=e;return t?(0,s.jsxs)("div",{style:{textAlign:"center",color:"#fff",backgroundColor:"#fa383e",borderColor:"#fa383e",borderStyle:"solid",borderRadius:"0.25rem",borderWidth:"1px",boxSizing:"border-box",display:"block",padding:"1rem",flex:"0 0 50%",marginLeft:"25%",marginRight:"25%",marginTop:"5rem",maxWidth:"50%",width:"100%"},children:[(0,s.jsx)("p",{children:String(t)}),(0,s.jsx)("div",{children:(0,s.jsx)("button",{type:"button",onClick:n,children:"Retry"})})]}):r?(0,s.jsx)("div",{style:{display:"flex",justifyContent:"center",alignItems:"center",height:"100vh"},children:(0,s.jsx)("svg",{id:"loader",style:{width:128,height:110,position:"absolute",top:"calc(100vh - 64%)"},viewBox:"0 0 45 45",xmlns:"http://www.w3.org/2000/svg",stroke:"#61dafb",children:(0,s.jsxs)("g",{fill:"none",fillRule:"evenodd",transform:"translate(1 1)",strokeWidth:"2",children:[(0,s.jsxs)("circle",{cx:"22",cy:"22",r:"6",strokeOpacity:"0",children:[(0,s.jsx)("animate",{attributeName:"r",begin:"1.5s",dur:"3s",values:"6;22",calcMode:"linear",repeatCount:"indefinite"}),(0,s.jsx)("animate",{attributeName:"stroke-opacity",begin:"1.5s",dur:"3s",values:"1;0",calcMode:"linear",repeatCount:"indefinite"}),(0,s.jsx)("animate",{attributeName:"stroke-width",begin:"1.5s",dur:"3s",values:"2;0",calcMode:"linear",repeatCount:"indefinite"})]}),(0,s.jsxs)("circle",{cx:"22",cy:"22",r:"6",strokeOpacity:"0",children:[(0,s.jsx)("animate",{attributeName:"r",begin:"3s",dur:"3s",values:"6;22",calcMode:"linear",repeatCount:"indefinite"}),(0,s.jsx)("animate",{attributeName:"stroke-opacity",begin:"3s",dur:"3s",values:"1;0",calcMode:"linear",repeatCount:"indefinite"}),(0,s.jsx)("animate",{attributeName:"stroke-width",begin:"3s",dur:"3s",values:"2;0",calcMode:"linear",repeatCount:"indefinite"})]}),(0,s.jsx)("circle",{cx:"22",cy:"22",r:"8",children:(0,s.jsx)("animate",{attributeName:"r",begin:"0s",dur:"1.5s",values:"6;1;2;3;4;5;6",calcMode:"linear",repeatCount:"indefinite"})})]})})}):null}var c=n(9670),u=n(226);function d(e,t){if("*"===e)return a()({loading:l,loader:()=>n.e(1772).then(n.bind(n,1772)),modules:["@theme/NotFound"],webpack:()=>[1772],render(e,t){const n=e.default;return(0,s.jsx)(u.z,{value:{plugin:{name:"native",id:"default"}},children:(0,s.jsx)(n,{...t})})}});const r=o[`${e}-${t}`],d={},p=[],f=[],g=(0,c.Z)(r);return Object.entries(g).forEach((e=>{let[t,n]=e;const r=i[n];r&&(d[t]=r[0],p.push(r[1]),f.push(r[2]))})),a().Map({loading:l,loader:d,modules:p,webpack:()=>f,render(t,n){const a=JSON.parse(JSON.stringify(r));Object.entries(t).forEach((t=>{let[n,r]=t;const o=r.default;if(!o)throw new Error(`The page component at ${e} doesn't have a default export. This makes it impossible to render anything. Consider default-exporting a React component.`);"object"!=typeof o&&"function"!=typeof o||Object.keys(r).filter((e=>"default"!==e)).forEach((e=>{o[e]=r[e]}));let i=a;const s=n.split(".");s.slice(0,-1).forEach((e=>{i=i[e]})),i[s[s.length-1]]=o}));const o=a.__comp;delete a.__comp;const i=a.__context;return delete a.__context,(0,s.jsx)(u.z,{value:i,children:(0,s.jsx)(o,{...a,...n})})}})}const p=[{path:"/blog/",component:d("/blog/","608"),exact:!0},{path:"/blog/2023/08/02/copr/",component:d("/blog/2023/08/02/copr/","69d"),exact:!0},{path:"/blog/aoc-2022/1st-week/",component:d("/blog/aoc-2022/1st-week/","df4"),exact:!0},{path:"/blog/aoc-2022/2nd-week/",component:d("/blog/aoc-2022/2nd-week/","783"),exact:!0},{path:"/blog/aoc-2022/3rd-week/",component:d("/blog/aoc-2022/3rd-week/","7c5"),exact:!0},{path:"/blog/aoc-2022/4th-week/",component:d("/blog/aoc-2022/4th-week/","1ac"),exact:!0},{path:"/blog/aoc-2022/intro/",component:d("/blog/aoc-2022/intro/","ada"),exact:!0},{path:"/blog/archive/",component:d("/blog/archive/","22d"),exact:!0},{path:"/blog/leetcode/sort-diagonally/",component:d("/blog/leetcode/sort-diagonally/","d97"),exact:!0},{path:"/blog/tags/",component:d("/blog/tags/","f23"),exact:!0},{path:"/blog/tags/\ud83c\udfed/",component:d("/blog/tags/\ud83c\udfed/","ffd"),exact:!0},{path:"/blog/tags/admin/",component:d("/blog/tags/admin/","d3a"),exact:!0},{path:"/blog/tags/advent-of-code-2022/",component:d("/blog/tags/advent-of-code-2022/","7bd"),exact:!0},{path:"/blog/tags/advent-of-code/",component:d("/blog/tags/advent-of-code/","313"),exact:!0},{path:"/blog/tags/copr/",component:d("/blog/tags/copr/","959"),exact:!0},{path:"/blog/tags/cpp/",component:d("/blog/tags/cpp/","770"),exact:!0},{path:"/blog/tags/iterators/",component:d("/blog/tags/iterators/","2eb"),exact:!0},{path:"/blog/tags/leetcode/",component:d("/blog/tags/leetcode/","e31"),exact:!0},{path:"/blog/tags/red-hat/",component:d("/blog/tags/red-hat/","a58"),exact:!0},{path:"/blog/tags/rust/",component:d("/blog/tags/rust/","281"),exact:!0},{path:"/blog/tags/vps/",component:d("/blog/tags/vps/","1b8"),exact:!0},{path:"/contributions/",component:d("/contributions/","541"),exact:!0},{path:"/search/",component:d("/search/","c7b"),exact:!0},{path:"/talks/",component:d("/talks/","819"),exact:!0},{path:"/algorithms/",component:d("/algorithms/","67b"),routes:[{path:"/algorithms/",component:d("/algorithms/","96e"),routes:[{path:"/algorithms/tags/",component:d("/algorithms/tags/","bb8"),exact:!0},{path:"/algorithms/tags/applications/",component:d("/algorithms/tags/applications/","b32"),exact:!0},{path:"/algorithms/tags/backtracking/",component:d("/algorithms/tags/backtracking/","e2d"),exact:!0},{path:"/algorithms/tags/balanced-trees/",component:d("/algorithms/tags/balanced-trees/","591"),exact:!0},{path:"/algorithms/tags/bfs/",component:d("/algorithms/tags/bfs/","334"),exact:!0},{path:"/algorithms/tags/bottom-up-dp/",component:d("/algorithms/tags/bottom-up-dp/","9e5"),exact:!0},{path:"/algorithms/tags/c/",component:d("/algorithms/tags/c/","cc5"),exact:!0},{path:"/algorithms/tags/csharp/",component:d("/algorithms/tags/csharp/","7a9"),exact:!0},{path:"/algorithms/tags/dynamic-array/",component:d("/algorithms/tags/dynamic-array/","00e"),exact:!0},{path:"/algorithms/tags/dynamic-programming/",component:d("/algorithms/tags/dynamic-programming/","f82"),exact:!0},{path:"/algorithms/tags/exponential/",component:d("/algorithms/tags/exponential/","a74"),exact:!0},{path:"/algorithms/tags/graphs/",component:d("/algorithms/tags/graphs/","d5b"),exact:!0},{path:"/algorithms/tags/greedy/",component:d("/algorithms/tags/greedy/","079"),exact:!0},{path:"/algorithms/tags/iterative/",component:d("/algorithms/tags/iterative/","783"),exact:!0},{path:"/algorithms/tags/iterators/",component:d("/algorithms/tags/iterators/","1bc"),exact:!0},{path:"/algorithms/tags/java/",component:d("/algorithms/tags/java/","2b4"),exact:!0},{path:"/algorithms/tags/karel/",component:d("/algorithms/tags/karel/","79f"),exact:!0},{path:"/algorithms/tags/postconditions/",component:d("/algorithms/tags/postconditions/","a27"),exact:!0},{path:"/algorithms/tags/python/",component:d("/algorithms/tags/python/","eb2"),exact:!0},{path:"/algorithms/tags/recursion/",component:d("/algorithms/tags/recursion/","2b0"),exact:!0},{path:"/algorithms/tags/red-black-trees/",component:d("/algorithms/tags/red-black-trees/","9ca"),exact:!0},{path:"/algorithms/tags/sorting/",component:d("/algorithms/tags/sorting/","7ca"),exact:!0},{path:"/algorithms/tags/testing/",component:d("/algorithms/tags/testing/","2af"),exact:!0},{path:"/algorithms/tags/time-complexity/",component:d("/algorithms/tags/time-complexity/","2d3"),exact:!0},{path:"/algorithms/tags/top-down-dp/",component:d("/algorithms/tags/top-down-dp/","779"),exact:!0},{path:"/algorithms/",component:d("/algorithms/","2a9"),routes:[{path:"/algorithms/",component:d("/algorithms/","9b0"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/algorithms-correctness/postcondition-ambiguity/",component:d("/algorithms/algorithms-correctness/postcondition-ambiguity/","c18"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/category/algorithms-and-correctness/",component:d("/algorithms/category/algorithms-and-correctness/","ea2"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/category/asymptotic-notation-and-time-complexity/",component:d("/algorithms/category/asymptotic-notation-and-time-complexity/","fba"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/category/graphs/",component:d("/algorithms/category/graphs/","a92"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/category/recursion/",component:d("/algorithms/category/recursion/","61f"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/category/red-black-trees/",component:d("/algorithms/category/red-black-trees/","0c0"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/graphs/bfs-tree/",component:d("/algorithms/graphs/bfs-tree/","2fb"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/graphs/iterative-and-iterators/",component:d("/algorithms/graphs/iterative-and-iterators/","bfd"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/rb-trees/applications/",component:d("/algorithms/rb-trees/applications/","46a"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/rb-trees/rules/",component:d("/algorithms/rb-trees/rules/","21a"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/recursion/karel-1/",component:d("/algorithms/recursion/karel-1/","600"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/recursion/pyramid-slide-down/",component:d("/algorithms/recursion/pyramid-slide-down/","947"),exact:!0,sidebar:"autogeneratedBar"},{path:"/algorithms/time-complexity/extend/",component:d("/algorithms/time-complexity/extend/","250"),exact:!0,sidebar:"autogeneratedBar"}]}]}]},{path:"/c/",component:d("/c/","dae"),routes:[{path:"/c/",component:d("/c/","fc8"),routes:[{path:"/c/",component:d("/c/","1c4"),routes:[{path:"/c/",component:d("/c/","a0f"),exact:!0,sidebar:"autogeneratedBar"},{path:"/c/bonuses/seminar-03/",component:d("/c/bonuses/seminar-03/","aaa"),exact:!0,sidebar:"autogeneratedBar"},{path:"/c/bonuses/seminar-04/",component:d("/c/bonuses/seminar-04/","ffe"),exact:!0,sidebar:"autogeneratedBar"},{path:"/c/bonuses/seminar-05-06/",component:d("/c/bonuses/seminar-05-06/","4cd"),exact:!0,sidebar:"autogeneratedBar"},{path:"/c/bonuses/seminar-08/",component:d("/c/bonuses/seminar-08/","09a"),exact:!0,sidebar:"autogeneratedBar"},{path:"/c/bonuses/seminar-10/",component:d("/c/bonuses/seminar-10/","b9e"),exact:!0,sidebar:"autogeneratedBar"},{path:"/c/category/bonuses/",component:d("/c/category/bonuses/","17e"),exact:!0,sidebar:"autogeneratedBar"},{path:"/c/category/practice-exams/",component:d("/c/category/practice-exams/","009"),exact:!0,sidebar:"autogeneratedBar"},{path:"/c/mr/",component:d("/c/mr/","4c5"),exact:!0,sidebar:"autogeneratedBar"},{path:"/c/pexam/cams/",component:d("/c/pexam/cams/","a10"),exact:!0,sidebar:"autogeneratedBar"},{path:"/c/pexam/garbage_collect/",component:d("/c/pexam/garbage_collect/","44e"),exact:!0,sidebar:"autogeneratedBar"}]}]}]},{path:"/cpp/",component:d("/cpp/","269"),routes:[{path:"/cpp/",component:d("/cpp/","187"),routes:[{path:"/cpp/",component:d("/cpp/","102"),routes:[{path:"/cpp/",component:d("/cpp/","fcd"),exact:!0,sidebar:"autogeneratedBar"},{path:"/cpp/category/exceptions-and-raii/",component:d("/cpp/category/exceptions-and-raii/","cfa"),exact:!0,sidebar:"autogeneratedBar"},{path:"/cpp/environment/",component:d("/cpp/environment/","e0b"),exact:!0,sidebar:"autogeneratedBar"},{path:"/cpp/exceptions-and-raii/placeholders/",component:d("/cpp/exceptions-and-raii/placeholders/","9b3"),exact:!0,sidebar:"autogeneratedBar"}]}]}]},{path:"/",component:d("/","dfb"),exact:!0},{path:"*",component:d("*")}]},8934:(e,t,n)=>{"use strict";n.d(t,{_:()=>o,t:()=>i});var r=n(7294),a=n(5893);const o=r.createContext(!1);function i(e){let{children:t}=e;const[n,i]=(0,r.useState)(!1);return(0,r.useEffect)((()=>{i(!0)}),[]),(0,a.jsx)(o.Provider,{value:n,children:t})}},7221:(e,t,n)=>{"use strict";var r=n(7294),a=n(745),o=n(3727),i=n(405),s=n(412);const l=[n(2497),n(3310),n(8320),n(7439),n(7800)];var c=n(723),u=n(6550),d=n(8790),p=n(5893);function f(e){let{children:t}=e;return(0,p.jsx)(p.Fragment,{children:t})}var g=n(5742),m=n(2263),h=n(4996),b=n(6668),y=n(833),v=n(4711),w=n(9727),k=n(3320),x=n(8780),S=n(197);function _(){const{i18n:{currentLocale:e,defaultLocale:t,localeConfigs:n}}=(0,m.Z)(),r=(0,v.l)(),a=n[e].htmlLang,o=e=>e.replace("-","_");return(0,p.jsxs)(g.Z,{children:[Object.entries(n).map((e=>{let[t,{htmlLang:n}]=e;return(0,p.jsx)("link",{rel:"alternate",href:r.createUrl({locale:t,fullyQualified:!0}),hrefLang:n},t)})),(0,p.jsx)("link",{rel:"alternate",href:r.createUrl({locale:t,fullyQualified:!0}),hrefLang:"x-default"}),(0,p.jsx)("meta",{property:"og:locale",content:o(a)}),Object.values(n).filter((e=>a!==e.htmlLang)).map((e=>(0,p.jsx)("meta",{property:"og:locale:alternate",content:o(e.htmlLang)},`meta-og-${e.htmlLang}`)))]})}function E(e){let{permalink:t}=e;const{siteConfig:{url:n}}=(0,m.Z)(),r=function(){const{siteConfig:{url:e,baseUrl:t,trailingSlash:n}}=(0,m.Z)(),{pathname:r}=(0,u.TH)();return e+(0,x.applyTrailingSlash)((0,h.Z)(r),{trailingSlash:n,baseUrl:t})}(),a=t?`${n}${t}`:r;return(0,p.jsxs)(g.Z,{children:[(0,p.jsx)("meta",{property:"og:url",content:a}),(0,p.jsx)("link",{rel:"canonical",href:a})]})}function C(){const{i18n:{currentLocale:e}}=(0,m.Z)(),{metadata:t,image:n}=(0,b.L)();return(0,p.jsxs)(p.Fragment,{children:[(0,p.jsxs)(g.Z,{children:[(0,p.jsx)("meta",{name:"twitter:card",content:"summary_large_image"}),(0,p.jsx)("body",{className:w.h})]}),n&&(0,p.jsx)(y.d,{image:n}),(0,p.jsx)(E,{}),(0,p.jsx)(_,{}),(0,p.jsx)(S.Z,{tag:k.HX,locale:e}),(0,p.jsx)(g.Z,{children:t.map(((e,t)=>(0,p.jsx)("meta",{...e},t)))})]})}const T=new Map;function A(e){if(T.has(e.pathname))return{...e,pathname:T.get(e.pathname)};if((0,d.f)(c.Z,e.pathname).some((e=>{let{route:t}=e;return!0===t.exact})))return T.set(e.pathname,e.pathname),e;const t=e.pathname.trim().replace(/(?:\/index)?\.html$/,"")||"/";return T.set(e.pathname,t),{...e,pathname:t}}var N=n(8934),j=n(8940),L=n(469);function P(e){for(var t=arguments.length,n=new Array(t>1?t-1:0),r=1;r{const r=t.default?.[e]??t[e];return r?.(...n)}));return()=>a.forEach((e=>e?.()))}const R=function(e){let{children:t,location:n,previousLocation:r}=e;return(0,L.Z)((()=>{r!==n&&(!function(e){let{location:t,previousLocation:n}=e;if(!n)return;const r=t.pathname===n.pathname,a=t.hash===n.hash,o=t.search===n.search;if(r&&a&&!o)return;const{hash:i}=t;if(i){const e=decodeURIComponent(i.substring(1)),t=document.getElementById(e);t?.scrollIntoView()}else window.scrollTo(0,0)}({location:n,previousLocation:r}),P("onRouteDidUpdate",{previousLocation:r,location:n}))}),[r,n]),t};function O(e){const t=Array.from(new Set([e,decodeURI(e)])).map((e=>(0,d.f)(c.Z,e))).flat();return Promise.all(t.map((e=>e.route.component.preload?.())))}class I extends r.Component{previousLocation;routeUpdateCleanupCb;constructor(e){super(e),this.previousLocation=null,this.routeUpdateCleanupCb=s.Z.canUseDOM?P("onRouteUpdate",{previousLocation:null,location:this.props.location}):()=>{},this.state={nextRouteHasLoaded:!0}}shouldComponentUpdate(e,t){if(e.location===this.props.location)return t.nextRouteHasLoaded;const n=e.location;return this.previousLocation=this.props.location,this.setState({nextRouteHasLoaded:!1}),this.routeUpdateCleanupCb=P("onRouteUpdate",{previousLocation:this.previousLocation,location:n}),O(n.pathname).then((()=>{this.routeUpdateCleanupCb(),this.setState({nextRouteHasLoaded:!0})})).catch((e=>{console.warn(e),window.location.reload()})),!1}render(){const{children:e,location:t}=this.props;return(0,p.jsx)(R,{previousLocation:this.previousLocation,location:t,children:(0,p.jsx)(u.AW,{location:t,render:()=>e})})}}const F=I,M="__docusaurus-base-url-issue-banner-container",D="__docusaurus-base-url-issue-banner",B="__docusaurus-base-url-issue-banner-suggestion-container";function z(e){return`\ndocument.addEventListener('DOMContentLoaded', function maybeInsertBanner() {\n var shouldInsert = typeof window['docusaurus'] === 'undefined';\n shouldInsert && insertBanner();\n});\n\nfunction insertBanner() {\n var bannerContainer = document.createElement('div');\n bannerContainer.id = '${M}';\n var bannerHtml = ${JSON.stringify(function(e){return`\n
    \n

    Your Docusaurus site did not load properly.

    \n

    A very common reason is a wrong site baseUrl configuration.

    \n

    Current configured baseUrl = ${e} ${"/"===e?" (default value)":""}

    \n

    We suggest trying baseUrl =

    \n
    \n`}(e)).replace(/{if("undefined"==typeof document)return void n();const r=document.createElement("link");r.setAttribute("rel","prefetch"),r.setAttribute("href",e),r.onload=()=>t(),r.onerror=()=>n();const a=document.getElementsByTagName("head")[0]??document.getElementsByName("script")[0]?.parentNode;a?.appendChild(r)}))}:function(e){return new Promise(((t,n)=>{const r=new XMLHttpRequest;r.open("GET",e,!0),r.withCredentials=!0,r.onload=()=>{200===r.status?t():n()},r.send(null)}))};var Y=n(9670);const Q=new Set,X=new Set,J=()=>navigator.connection?.effectiveType.includes("2g")||navigator.connection?.saveData,ee={prefetch(e){if(!(e=>!J()&&!X.has(e)&&!Q.has(e))(e))return!1;Q.add(e);const t=(0,d.f)(c.Z,e).flatMap((e=>{return t=e.route.path,Object.entries(q).filter((e=>{let[n]=e;return n.replace(/-[^-]+$/,"")===t})).flatMap((e=>{let[,t]=e;return Object.values((0,Y.Z)(t))}));var t}));return Promise.all(t.map((e=>{const t=n.gca(e);return t&&!t.includes("undefined")?K(t).catch((()=>{})):Promise.resolve()})))},preload:e=>!!(e=>!J()&&!X.has(e))(e)&&(X.add(e),O(e))},te=Object.freeze(ee),ne=Boolean(!0);if(s.Z.canUseDOM){window.docusaurus=te;const e=document.getElementById("__docusaurus"),t=(0,p.jsx)(i.B6,{children:(0,p.jsx)(o.VK,{children:(0,p.jsx)(G,{})})}),n=(e,t)=>{console.error("Docusaurus React Root onRecoverableError:",e,t)},s=()=>{if(ne)r.startTransition((()=>{a.hydrateRoot(e,t,{onRecoverableError:n})}));else{const o=a.createRoot(e,{onRecoverableError:n});r.startTransition((()=>{o.render(t)}))}};O(window.location.pathname).then(s)}},8940:(e,t,n)=>{"use strict";n.d(t,{_:()=>d,M:()=>p});var r=n(7294),a=n(6809);const o=JSON.parse('{"docusaurus-plugin-content-docs":{"algorithms":{"path":"/algorithms","versions":[{"name":"current","label":"Next","isLast":true,"path":"/algorithms","mainDocId":"algorithms-intro","docs":[{"id":"algorithms-correctness/postcondition-ambiguity","path":"/algorithms/algorithms-correctness/postcondition-ambiguity","sidebar":"autogeneratedBar"},{"id":"algorithms-intro","path":"/algorithms/","sidebar":"autogeneratedBar"},{"id":"graphs/bfs-tree","path":"/algorithms/graphs/bfs-tree","sidebar":"autogeneratedBar"},{"id":"graphs/iterative-and-iterators","path":"/algorithms/graphs/iterative-and-iterators","sidebar":"autogeneratedBar"},{"id":"rb-trees/applications","path":"/algorithms/rb-trees/applications","sidebar":"autogeneratedBar"},{"id":"rb-trees/rules","path":"/algorithms/rb-trees/rules","sidebar":"autogeneratedBar"},{"id":"recursion/karel-1","path":"/algorithms/recursion/karel-1","sidebar":"autogeneratedBar"},{"id":"recursion/pyramid-slide-down","path":"/algorithms/recursion/pyramid-slide-down","sidebar":"autogeneratedBar"},{"id":"time-complexity/extend","path":"/algorithms/time-complexity/extend","sidebar":"autogeneratedBar"},{"id":"/category/algorithms-and-correctness","path":"/algorithms/category/algorithms-and-correctness","sidebar":"autogeneratedBar"},{"id":"/category/asymptotic-notation-and-time-complexity","path":"/algorithms/category/asymptotic-notation-and-time-complexity","sidebar":"autogeneratedBar"},{"id":"/category/recursion","path":"/algorithms/category/recursion","sidebar":"autogeneratedBar"},{"id":"/category/red-black-trees","path":"/algorithms/category/red-black-trees","sidebar":"autogeneratedBar"},{"id":"/category/graphs","path":"/algorithms/category/graphs","sidebar":"autogeneratedBar"}],"draftIds":[],"sidebars":{"autogeneratedBar":{"link":{"path":"/algorithms/","label":"algorithms-intro"}}}}],"breadcrumbs":true},"cpp":{"path":"/cpp","versions":[{"name":"current","label":"Next","isLast":true,"path":"/cpp","mainDocId":"cpp-intro","docs":[{"id":"cpp-intro","path":"/cpp/","sidebar":"autogeneratedBar"},{"id":"environment","path":"/cpp/environment","sidebar":"autogeneratedBar"},{"id":"exceptions-and-raii/2023-11-24-placeholders","path":"/cpp/exceptions-and-raii/placeholders","sidebar":"autogeneratedBar"},{"id":"/category/exceptions-and-raii","path":"/cpp/category/exceptions-and-raii","sidebar":"autogeneratedBar"}],"draftIds":[],"sidebars":{"autogeneratedBar":{"link":{"path":"/cpp/","label":"cpp-intro"}}}}],"breadcrumbs":true},"c":{"path":"/c","versions":[{"name":"current","label":"Next","isLast":true,"path":"/c","mainDocId":"c-intro","docs":[{"id":"bonuses/seminar-03","path":"/c/bonuses/seminar-03","sidebar":"autogeneratedBar"},{"id":"bonuses/seminar-04","path":"/c/bonuses/seminar-04","sidebar":"autogeneratedBar"},{"id":"bonuses/seminar-05-06","path":"/c/bonuses/seminar-05-06","sidebar":"autogeneratedBar"},{"id":"bonuses/seminar-08","path":"/c/bonuses/seminar-08","sidebar":"autogeneratedBar"},{"id":"bonuses/seminar-10","path":"/c/bonuses/seminar-10","sidebar":"autogeneratedBar"},{"id":"c-intro","path":"/c/","sidebar":"autogeneratedBar"},{"id":"mr","path":"/c/mr","sidebar":"autogeneratedBar"},{"id":"pexam/b-garbage_collect","path":"/c/pexam/garbage_collect","sidebar":"autogeneratedBar"},{"id":"pexam/c-cams","path":"/c/pexam/cams","sidebar":"autogeneratedBar"},{"id":"/category/bonuses","path":"/c/category/bonuses","sidebar":"autogeneratedBar"},{"id":"/category/practice-exams","path":"/c/category/practice-exams","sidebar":"autogeneratedBar"}],"draftIds":[],"sidebars":{"autogeneratedBar":{"link":{"path":"/c/","label":"c-intro"}}}}],"breadcrumbs":true}}}'),i=JSON.parse('{"defaultLocale":"en","locales":["en"],"path":"i18n","currentLocale":"en","localeConfigs":{"en":{"label":"English","direction":"ltr","htmlLang":"en","calendar":"gregory","path":"en"}}}');var s=n(7529);const l=JSON.parse('{"docusaurusVersion":"3.0.0","siteVersion":"0.0.0","pluginVersions":{"docusaurus-plugin-content-pages":{"type":"package","name":"@docusaurus/plugin-content-pages","version":"3.0.0"},"docusaurus-plugin-sitemap":{"type":"package","name":"@docusaurus/plugin-sitemap","version":"3.0.0"},"docusaurus-theme-classic":{"type":"package","name":"@docusaurus/theme-classic","version":"3.0.0"},"docusaurus-theme-search-algolia":{"type":"package","name":"@docusaurus/theme-search-algolia","version":"3.0.0"},"docusaurus-plugin-content-docs":{"type":"package","name":"@docusaurus/plugin-content-docs","version":"3.0.0"},"docusaurus-plugin-content-blog":{"type":"package","name":"@docusaurus/plugin-content-blog","version":"3.0.0"},"docusaurus-plugin-sass":{"type":"package","name":"docusaurus-plugin-sass","version":"0.2.5"},"docusaurus-plugin-client-redirects":{"type":"package","name":"@docusaurus/plugin-client-redirects","version":"3.0.0"},"docusaurus-theme-mermaid":{"type":"package","name":"@docusaurus/theme-mermaid","version":"3.0.0"}}}');var c=n(5893);const u={siteConfig:a.default,siteMetadata:l,globalData:o,i18n:i,codeTranslations:s},d=r.createContext(u);function p(e){let{children:t}=e;return(0,c.jsx)(d.Provider,{value:u,children:t})}},4763:(e,t,n)=>{"use strict";n.d(t,{Z:()=>f});var r=n(7294),a=n(412),o=n(5742),i=n(8780),s=n(8207),l=n(5893);function c(e){let{error:t,tryAgain:n}=e;return(0,l.jsxs)("div",{style:{display:"flex",flexDirection:"column",justifyContent:"center",alignItems:"flex-start",minHeight:"100vh",width:"100%",maxWidth:"80ch",fontSize:"20px",margin:"0 auto",padding:"1rem"},children:[(0,l.jsx)("h1",{style:{fontSize:"3rem"},children:"This page crashed"}),(0,l.jsx)("button",{type:"button",onClick:n,style:{margin:"1rem 0",fontSize:"2rem",cursor:"pointer",borderRadius:20,padding:"1rem"},children:"Try again"}),(0,l.jsx)(u,{error:t})]})}function u(e){let{error:t}=e;const n=(0,i.getErrorCausalChain)(t).map((e=>e.message)).join("\n\nCause:\n");return(0,l.jsx)("p",{style:{whiteSpace:"pre-wrap"},children:n})}function d(e){let{error:t,tryAgain:n}=e;return(0,l.jsxs)(f,{fallback:()=>(0,l.jsx)(c,{error:t,tryAgain:n}),children:[(0,l.jsx)(o.Z,{children:(0,l.jsx)("title",{children:"Page Error"})}),(0,l.jsx)(s.Z,{children:(0,l.jsx)(c,{error:t,tryAgain:n})})]})}const p=e=>(0,l.jsx)(d,{...e});class f extends r.Component{constructor(e){super(e),this.state={error:null}}componentDidCatch(e){a.Z.canUseDOM&&this.setState({error:e})}render(){const{children:e}=this.props,{error:t}=this.state;if(t){const e={error:t,tryAgain:()=>this.setState({error:null})};return(this.props.fallback??p)(e)}return e??null}}},412:(e,t,n)=>{"use strict";n.d(t,{Z:()=>a});const r="undefined"!=typeof window&&"document"in window&&"createElement"in window.document,a={canUseDOM:r,canUseEventListeners:r&&("addEventListener"in window||"attachEvent"in window),canUseIntersectionObserver:r&&"IntersectionObserver"in window,canUseViewport:r&&"screen"in window}},5742:(e,t,n)=>{"use strict";n.d(t,{Z:()=>o});n(7294);var r=n(405),a=n(5893);function o(e){return(0,a.jsx)(r.ql,{...e})}},9960:(e,t,n)=>{"use strict";n.d(t,{Z:()=>f});var r=n(7294),a=n(3727),o=n(8780),i=n(2263),s=n(3919),l=n(412),c=n(5893);const u=r.createContext({collectLink:()=>{}});var d=n(4996);function p(e,t){let{isNavLink:n,to:p,href:f,activeClassName:g,isActive:m,"data-noBrokenLinkCheck":h,autoAddBaseUrl:b=!0,...y}=e;const{siteConfig:{trailingSlash:v,baseUrl:w}}=(0,i.Z)(),{withBaseUrl:k}=(0,d.C)(),x=(0,r.useContext)(u),S=(0,r.useRef)(null);(0,r.useImperativeHandle)(t,(()=>S.current));const _=p||f;const E=(0,s.Z)(_),C=_?.replace("pathname://","");let T=void 0!==C?(A=C,b&&(e=>e.startsWith("/"))(A)?k(A):A):void 0;var A;T&&E&&(T=(0,o.applyTrailingSlash)(T,{trailingSlash:v,baseUrl:w}));const N=(0,r.useRef)(!1),j=n?a.OL:a.rU,L=l.Z.canUseIntersectionObserver,P=(0,r.useRef)(),R=()=>{N.current||null==T||(window.docusaurus.preload(T),N.current=!0)};(0,r.useEffect)((()=>(!L&&E&&null!=T&&window.docusaurus.prefetch(T),()=>{L&&P.current&&P.current.disconnect()})),[P,T,L,E]);const O=T?.startsWith("#")??!1,I=!T||!E||O;return I||h||x.collectLink(T),I?(0,c.jsx)("a",{ref:S,href:T,..._&&!E&&{target:"_blank",rel:"noopener noreferrer"},...y}):(0,c.jsx)(j,{...y,onMouseEnter:R,onTouchStart:R,innerRef:e=>{S.current=e,L&&e&&E&&(P.current=new window.IntersectionObserver((t=>{t.forEach((t=>{e===t.target&&(t.isIntersecting||t.intersectionRatio>0)&&(P.current.unobserve(e),P.current.disconnect(),null!=T&&window.docusaurus.prefetch(T))}))})),P.current.observe(e))},to:T,...n&&{isActive:m,activeClassName:g}})}const f=r.forwardRef(p)},5999:(e,t,n)=>{"use strict";n.d(t,{Z:()=>c,I:()=>l});var r=n(7294),a=n(5893);function o(e,t){const n=e.split(/(\{\w+\})/).map(((e,n)=>{if(n%2==1){const n=t?.[e.slice(1,-1)];if(void 0!==n)return n}return e}));return n.some((e=>(0,r.isValidElement)(e)))?n.map(((e,t)=>(0,r.isValidElement)(e)?r.cloneElement(e,{key:t}):e)).filter((e=>""!==e)):n.join("")}var i=n(7529);function s(e){let{id:t,message:n}=e;if(void 0===t&&void 0===n)throw new Error("Docusaurus translation declarations must have at least a translation id or a default translation message");return i[t??n]??n??t}function l(e,t){let{message:n,id:r}=e;return o(s({message:n,id:r}),t)}function c(e){let{children:t,id:n,values:r}=e;if(t&&"string"!=typeof t)throw console.warn("Illegal children",t),new Error("The Docusaurus component only accept simple string values");const i=s({message:t,id:n});return(0,a.jsx)(a.Fragment,{children:o(i,r)})}},9935:(e,t,n)=>{"use strict";n.d(t,{m:()=>r});const r="default"},3919:(e,t,n)=>{"use strict";function r(e){return/^(?:\w*:|\/\/)/.test(e)}function a(e){return void 0!==e&&!r(e)}n.d(t,{Z:()=>a,b:()=>r})},4996:(e,t,n)=>{"use strict";n.d(t,{C:()=>i,Z:()=>s});var r=n(7294),a=n(2263),o=n(3919);function i(){const{siteConfig:{baseUrl:e,url:t}}=(0,a.Z)(),n=(0,r.useCallback)(((n,r)=>function(e,t,n,r){let{forcePrependBaseUrl:a=!1,absolute:i=!1}=void 0===r?{}:r;if(!n||n.startsWith("#")||(0,o.b)(n))return n;if(a)return t+n.replace(/^\//,"");if(n===t.replace(/\/$/,""))return t;const s=n.startsWith(t)?n:t+n.replace(/^\//,"");return i?e+s:s}(t,e,n,r)),[t,e]);return{withBaseUrl:n}}function s(e,t){void 0===t&&(t={});const{withBaseUrl:n}=i();return n(e,t)}},2263:(e,t,n)=>{"use strict";n.d(t,{Z:()=>o});var r=n(7294),a=n(8940);function o(){return(0,r.useContext)(a._)}},2389:(e,t,n)=>{"use strict";n.d(t,{Z:()=>o});var r=n(7294),a=n(8934);function o(){return(0,r.useContext)(a._)}},469:(e,t,n)=>{"use strict";n.d(t,{Z:()=>a});var r=n(7294);const a=n(412).Z.canUseDOM?r.useLayoutEffect:r.useEffect},9670:(e,t,n)=>{"use strict";n.d(t,{Z:()=>a});const r=e=>"object"==typeof e&&!!e&&Object.keys(e).length>0;function a(e){const t={};return function e(n,a){Object.entries(n).forEach((n=>{let[o,i]=n;const s=a?`${a}.${o}`:o;r(i)?e(i,s):t[s]=i}))}(e),t}},226:(e,t,n)=>{"use strict";n.d(t,{_:()=>o,z:()=>i});var r=n(7294),a=n(5893);const o=r.createContext(null);function i(e){let{children:t,value:n}=e;const i=r.useContext(o),s=(0,r.useMemo)((()=>function(e){let{parent:t,value:n}=e;if(!t){if(!n)throw new Error("Unexpected: no Docusaurus route context found");if(!("plugin"in n))throw new Error("Unexpected: Docusaurus topmost route context has no `plugin` attribute");return n}const r={...t.data,...n?.data};return{plugin:t.plugin,data:r}}({parent:i,value:n})),[i,n]);return(0,a.jsx)(o.Provider,{value:s,children:t})}},143:(e,t,n)=>{"use strict";n.d(t,{Iw:()=>b,gA:()=>f,WS:()=>g,_r:()=>d,Jo:()=>y,zh:()=>p,yW:()=>h,gB:()=>m});var r=n(6550),a=n(2263),o=n(9935);function i(e,t){void 0===t&&(t={});const n=function(){const{globalData:e}=(0,a.Z)();return e}()[e];if(!n&&t.failfast)throw new Error(`Docusaurus plugin global data not found for "${e}" plugin.`);return n}const s=e=>e.versions.find((e=>e.isLast));function l(e,t){const n=s(e);return[...e.versions.filter((e=>e!==n)),n].find((e=>!!(0,r.LX)(t,{path:e.path,exact:!1,strict:!1})))}function c(e,t){const n=l(e,t),a=n?.docs.find((e=>!!(0,r.LX)(t,{path:e.path,exact:!0,strict:!1})));return{activeVersion:n,activeDoc:a,alternateDocVersions:a?function(t){const n={};return e.versions.forEach((e=>{e.docs.forEach((r=>{r.id===t&&(n[e.name]=r)}))})),n}(a.id):{}}}const u={},d=()=>i("docusaurus-plugin-content-docs")??u,p=e=>function(e,t,n){void 0===t&&(t=o.m),void 0===n&&(n={});const r=i(e),a=r?.[t];if(!a&&n.failfast)throw new Error(`Docusaurus plugin global data not found for "${e}" plugin with id "${t}".`);return a}("docusaurus-plugin-content-docs",e,{failfast:!0});function f(e){void 0===e&&(e={});const t=d(),{pathname:n}=(0,r.TH)();return function(e,t,n){void 0===n&&(n={});const a=Object.entries(e).sort(((e,t)=>t[1].path.localeCompare(e[1].path))).find((e=>{let[,n]=e;return!!(0,r.LX)(t,{path:n.path,exact:!1,strict:!1})})),o=a?{pluginId:a[0],pluginData:a[1]}:void 0;if(!o&&n.failfast)throw new Error(`Can't find active docs plugin for "${t}" pathname, while it was expected to be found. Maybe you tried to use a docs feature that can only be used on a docs-related page? Existing docs plugin paths are: ${Object.values(e).map((e=>e.path)).join(", ")}`);return o}(t,n,e)}function g(e){void 0===e&&(e={});const t=f(e),{pathname:n}=(0,r.TH)();if(!t)return;return{activePlugin:t,activeVersion:l(t.pluginData,n)}}function m(e){return p(e).versions}function h(e){const t=p(e);return s(t)}function b(e){const t=p(e),{pathname:n}=(0,r.TH)();return c(t,n)}function y(e){const t=p(e),{pathname:n}=(0,r.TH)();return function(e,t){const n=s(e);return{latestDocSuggestion:c(e,t).alternateDocVersions[n.name],latestVersionSuggestion:n}}(t,n)}},8320:(e,t,n)=>{"use strict";n.r(t),n.d(t,{default:()=>o});var r=n(4865),a=n.n(r);a().configure({showSpinner:!1});const o={onRouteUpdate(e){let{location:t,previousLocation:n}=e;if(n&&t.pathname!==n.pathname){const e=window.setTimeout((()=>{a().start()}),200);return()=>window.clearTimeout(e)}},onRouteDidUpdate(){a().done()}}},3310:(e,t,n)=>{"use strict";n.r(t);var r=n(4965),a=n(6809);!function(e){const{themeConfig:{prism:t}}=a.default,{additionalLanguages:r}=t;globalThis.Prism=e,r.forEach((e=>{"php"===e&&n(6854),n(218)(`./prism-${e}`)})),delete globalThis.Prism}(r.p1)},7955:(e,t,n)=>{"use strict";n.d(t,{Z:()=>c});n(7294);var r=n(6010),a=n(5999),o=n(6668),i=n(9960);const s={anchorWithStickyNavbar:"anchorWithStickyNavbar_LWe7",anchorWithHideOnScrollNavbar:"anchorWithHideOnScrollNavbar_WYt5"};var l=n(5893);function c(e){let{as:t,id:n,...c}=e;const{navbar:{hideOnScroll:u}}=(0,o.L)();if("h1"===t||!n)return(0,l.jsx)(t,{...c,id:void 0});const d=(0,a.I)({id:"theme.common.headingLinkTitle",message:"Direct link to {heading}",description:"Title for link to heading"},{heading:"string"==typeof c.children?c.children:n});return(0,l.jsxs)(t,{...c,className:(0,r.Z)("anchor",u?s.anchorWithHideOnScrollNavbar:s.anchorWithStickyNavbar,c.className),id:n,children:[c.children,(0,l.jsx)(i.Z,{className:"hash-link",to:`#${n}`,"aria-label":d,title:d,children:"\u200b"})]})}},9471:(e,t,n)=>{"use strict";n.d(t,{Z:()=>o});n(7294);const r={iconExternalLink:"iconExternalLink_nPIU"};var a=n(5893);function o(e){let{width:t=13.5,height:n=13.5}=e;return(0,a.jsx)("svg",{width:t,height:n,"aria-hidden":"true",viewBox:"0 0 24 24",className:r.iconExternalLink,children:(0,a.jsx)("path",{fill:"currentColor",d:"M21 13v10h-21v-19h12v2h-10v15h17v-8h2zm3-12h-10.988l4.035 4-6.977 7.07 2.828 2.828 6.977-7.07 4.125 4.172v-11z"})})}},8207:(e,t,n)=>{"use strict";n.d(t,{Z:()=>Tt});var r=n(7294),a=n(6010),o=n(4763),i=n(833),s=n(6550),l=n(5999),c=n(5936),u=n(5893);const d="__docusaurus_skipToContent_fallback";function p(e){e.setAttribute("tabindex","-1"),e.focus(),e.removeAttribute("tabindex")}function f(){const e=(0,r.useRef)(null),{action:t}=(0,s.k6)(),n=(0,r.useCallback)((e=>{e.preventDefault();const t=document.querySelector("main:first-of-type")??document.getElementById(d);t&&p(t)}),[]);return(0,c.S)((n=>{let{location:r}=n;e.current&&!r.hash&&"PUSH"===t&&p(e.current)})),{containerRef:e,onClick:n}}const g=(0,l.I)({id:"theme.common.skipToMainContent",description:"The skip to content label used for accessibility, allowing to rapidly navigate to main content with keyboard tab/enter navigation",message:"Skip to main content"});function m(e){const t=e.children??g,{containerRef:n,onClick:r}=f();return(0,u.jsx)("div",{ref:n,role:"region","aria-label":g,children:(0,u.jsx)("a",{...e,href:`#${d}`,onClick:r,children:t})})}var h=n(5281),b=n(9727);const y={skipToContent:"skipToContent_fXgn"};function v(){return(0,u.jsx)(m,{className:y.skipToContent})}var w=n(6668),k=n(9689);function x(e){let{width:t=21,height:n=21,color:r="currentColor",strokeWidth:a=1.2,className:o,...i}=e;return(0,u.jsx)("svg",{viewBox:"0 0 15 15",width:t,height:n,...i,children:(0,u.jsx)("g",{stroke:r,strokeWidth:a,children:(0,u.jsx)("path",{d:"M.75.75l13.5 13.5M14.25.75L.75 14.25"})})})}const S={closeButton:"closeButton_CVFx"};function _(e){return(0,u.jsx)("button",{type:"button","aria-label":(0,l.I)({id:"theme.AnnouncementBar.closeButtonAriaLabel",message:"Close",description:"The ARIA label for close button of announcement bar"}),...e,className:(0,a.Z)("clean-btn close",S.closeButton,e.className),children:(0,u.jsx)(x,{width:14,height:14,strokeWidth:3.1})})}const E={content:"content_knG7"};function C(e){const{announcementBar:t}=(0,w.L)(),{content:n}=t;return(0,u.jsx)("div",{...e,className:(0,a.Z)(E.content,e.className),dangerouslySetInnerHTML:{__html:n}})}const T={announcementBar:"announcementBar_mb4j",announcementBarPlaceholder:"announcementBarPlaceholder_vyr4",announcementBarClose:"announcementBarClose_gvF7",announcementBarContent:"announcementBarContent_xLdY"};function A(){const{announcementBar:e}=(0,w.L)(),{isActive:t,close:n}=(0,k.nT)();if(!t)return null;const{backgroundColor:r,textColor:a,isCloseable:o}=e;return(0,u.jsxs)("div",{className:T.announcementBar,style:{backgroundColor:r,color:a},role:"banner",children:[o&&(0,u.jsx)("div",{className:T.announcementBarPlaceholder}),(0,u.jsx)(C,{className:T.announcementBarContent}),o&&(0,u.jsx)(_,{onClick:n,className:T.announcementBarClose})]})}var N=n(3163),j=n(2466);var L=n(902),P=n(3102);const R=r.createContext(null);function O(e){let{children:t}=e;const n=function(){const e=(0,N.e)(),t=(0,P.HY)(),[n,a]=(0,r.useState)(!1),o=null!==t.component,i=(0,L.D9)(o);return(0,r.useEffect)((()=>{o&&!i&&a(!0)}),[o,i]),(0,r.useEffect)((()=>{o?e.shown||a(!0):a(!1)}),[e.shown,o]),(0,r.useMemo)((()=>[n,a]),[n])}();return(0,u.jsx)(R.Provider,{value:n,children:t})}function I(e){if(e.component){const t=e.component;return(0,u.jsx)(t,{...e.props})}}function F(){const e=(0,r.useContext)(R);if(!e)throw new L.i6("NavbarSecondaryMenuDisplayProvider");const[t,n]=e,a=(0,r.useCallback)((()=>n(!1)),[n]),o=(0,P.HY)();return(0,r.useMemo)((()=>({shown:t,hide:a,content:I(o)})),[a,o,t])}function M(e){let{header:t,primaryMenu:n,secondaryMenu:r}=e;const{shown:o}=F();return(0,u.jsxs)("div",{className:"navbar-sidebar",children:[t,(0,u.jsxs)("div",{className:(0,a.Z)("navbar-sidebar__items",{"navbar-sidebar__items--show-secondary":o}),children:[(0,u.jsx)("div",{className:"navbar-sidebar__item menu",children:n}),(0,u.jsx)("div",{className:"navbar-sidebar__item menu",children:r})]})]})}var D=n(2949),B=n(2389);function z(e){return(0,u.jsx)("svg",{viewBox:"0 0 24 24",width:24,height:24,...e,children:(0,u.jsx)("path",{fill:"currentColor",d:"M12,9c1.65,0,3,1.35,3,3s-1.35,3-3,3s-3-1.35-3-3S10.35,9,12,9 M12,7c-2.76,0-5,2.24-5,5s2.24,5,5,5s5-2.24,5-5 S14.76,7,12,7L12,7z M2,13l2,0c0.55,0,1-0.45,1-1s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S1.45,13,2,13z M20,13l2,0c0.55,0,1-0.45,1-1 s-0.45-1-1-1l-2,0c-0.55,0-1,0.45-1,1S19.45,13,20,13z M11,2v2c0,0.55,0.45,1,1,1s1-0.45,1-1V2c0-0.55-0.45-1-1-1S11,1.45,11,2z M11,20v2c0,0.55,0.45,1,1,1s1-0.45,1-1v-2c0-0.55-0.45-1-1-1C11.45,19,11,19.45,11,20z M5.99,4.58c-0.39-0.39-1.03-0.39-1.41,0 c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0s0.39-1.03,0-1.41L5.99,4.58z M18.36,16.95 c-0.39-0.39-1.03-0.39-1.41,0c-0.39,0.39-0.39,1.03,0,1.41l1.06,1.06c0.39,0.39,1.03,0.39,1.41,0c0.39-0.39,0.39-1.03,0-1.41 L18.36,16.95z M19.42,5.99c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06c-0.39,0.39-0.39,1.03,0,1.41 s1.03,0.39,1.41,0L19.42,5.99z M7.05,18.36c0.39-0.39,0.39-1.03,0-1.41c-0.39-0.39-1.03-0.39-1.41,0l-1.06,1.06 c-0.39,0.39-0.39,1.03,0,1.41s1.03,0.39,1.41,0L7.05,18.36z"})})}function $(e){return(0,u.jsx)("svg",{viewBox:"0 0 24 24",width:24,height:24,...e,children:(0,u.jsx)("path",{fill:"currentColor",d:"M9.37,5.51C9.19,6.15,9.1,6.82,9.1,7.5c0,4.08,3.32,7.4,7.4,7.4c0.68,0,1.35-0.09,1.99-0.27C17.45,17.19,14.93,19,12,19 c-3.86,0-7-3.14-7-7C5,9.07,6.81,6.55,9.37,5.51z M12,3c-4.97,0-9,4.03-9,9s4.03,9,9,9s9-4.03,9-9c0-0.46-0.04-0.92-0.1-1.36 c-0.98,1.37-2.58,2.26-4.4,2.26c-2.98,0-5.4-2.42-5.4-5.4c0-1.81,0.89-3.42,2.26-4.4C12.92,3.04,12.46,3,12,3L12,3z"})})}const U={toggle:"toggle_vylO",toggleButton:"toggleButton_gllP",darkToggleIcon:"darkToggleIcon_wfgR",lightToggleIcon:"lightToggleIcon_pyhR",toggleButtonDisabled:"toggleButtonDisabled_aARS"};function Z(e){let{className:t,buttonClassName:n,value:r,onChange:o}=e;const i=(0,B.Z)(),s=(0,l.I)({message:"Switch between dark and light mode (currently {mode})",id:"theme.colorToggle.ariaLabel",description:"The ARIA label for the navbar color mode toggle"},{mode:"dark"===r?(0,l.I)({message:"dark mode",id:"theme.colorToggle.ariaLabel.mode.dark",description:"The name for the dark color mode"}):(0,l.I)({message:"light mode",id:"theme.colorToggle.ariaLabel.mode.light",description:"The name for the light color mode"})});return(0,u.jsx)("div",{className:(0,a.Z)(U.toggle,t),children:(0,u.jsxs)("button",{className:(0,a.Z)("clean-btn",U.toggleButton,!i&&U.toggleButtonDisabled,n),type:"button",onClick:()=>o("dark"===r?"light":"dark"),disabled:!i,title:s,"aria-label":s,"aria-live":"polite",children:[(0,u.jsx)(z,{className:(0,a.Z)(U.toggleIcon,U.lightToggleIcon)}),(0,u.jsx)($,{className:(0,a.Z)(U.toggleIcon,U.darkToggleIcon)})]})})}const H=r.memo(Z),V={darkNavbarColorModeToggle:"darkNavbarColorModeToggle_X3D1"};function W(e){let{className:t}=e;const n=(0,w.L)().navbar.style,r=(0,w.L)().colorMode.disableSwitch,{colorMode:a,setColorMode:o}=(0,D.I)();return r?null:(0,u.jsx)(H,{className:t,buttonClassName:"dark"===n?V.darkNavbarColorModeToggle:void 0,value:a,onChange:o})}var G=n(1327);function q(){return(0,u.jsx)(G.Z,{className:"navbar__brand",imageClassName:"navbar__logo",titleClassName:"navbar__title text--truncate"})}function K(){const e=(0,N.e)();return(0,u.jsx)("button",{type:"button","aria-label":(0,l.I)({id:"theme.docs.sidebar.closeSidebarButtonAriaLabel",message:"Close navigation bar",description:"The ARIA label for close button of mobile sidebar"}),className:"clean-btn navbar-sidebar__close",onClick:()=>e.toggle(),children:(0,u.jsx)(x,{color:"var(--ifm-color-emphasis-600)"})})}function Y(){return(0,u.jsxs)("div",{className:"navbar-sidebar__brand",children:[(0,u.jsx)(q,{}),(0,u.jsx)(W,{className:"margin-right--md"}),(0,u.jsx)(K,{})]})}var Q=n(9960),X=n(4996),J=n(3919),ee=n(8022),te=n(9471);function ne(e){let{activeBasePath:t,activeBaseRegex:n,to:r,href:a,label:o,html:i,isDropdownLink:s,prependBaseUrlToHref:l,...c}=e;const d=(0,X.Z)(r),p=(0,X.Z)(t),f=(0,X.Z)(a,{forcePrependBaseUrl:!0}),g=o&&a&&!(0,J.Z)(a),m=i?{dangerouslySetInnerHTML:{__html:i}}:{children:(0,u.jsxs)(u.Fragment,{children:[o,g&&(0,u.jsx)(te.Z,{...s&&{width:12,height:12}})]})};return a?(0,u.jsx)(Q.Z,{href:l?f:a,...c,...m}):(0,u.jsx)(Q.Z,{to:d,isNavLink:!0,...(t||n)&&{isActive:(e,t)=>n?(0,ee.F)(n,t.pathname):t.pathname.startsWith(p)},...c,...m})}function re(e){let{className:t,isDropdownItem:n=!1,...r}=e;const o=(0,u.jsx)(ne,{className:(0,a.Z)(n?"dropdown__link":"navbar__item navbar__link",t),isDropdownLink:n,...r});return n?(0,u.jsx)("li",{children:o}):o}function ae(e){let{className:t,isDropdownItem:n,...r}=e;return(0,u.jsx)("li",{className:"menu__list-item",children:(0,u.jsx)(ne,{className:(0,a.Z)("menu__link",t),...r})})}function oe(e){let{mobile:t=!1,position:n,...r}=e;const a=t?ae:re;return(0,u.jsx)(a,{...r,activeClassName:r.activeClassName??(t?"menu__link--active":"navbar__link--active")})}var ie=n(6043),se=n(8596),le=n(2263);function ce(e,t){return e.some((e=>function(e,t){return!!(0,se.Mg)(e.to,t)||!!(0,ee.F)(e.activeBaseRegex,t)||!(!e.activeBasePath||!t.startsWith(e.activeBasePath))}(e,t)))}function ue(e){let{items:t,position:n,className:o,onClick:i,...s}=e;const l=(0,r.useRef)(null),[c,d]=(0,r.useState)(!1);return(0,r.useEffect)((()=>{const e=e=>{l.current&&!l.current.contains(e.target)&&d(!1)};return document.addEventListener("mousedown",e),document.addEventListener("touchstart",e),document.addEventListener("focusin",e),()=>{document.removeEventListener("mousedown",e),document.removeEventListener("touchstart",e),document.removeEventListener("focusin",e)}}),[l]),(0,u.jsxs)("div",{ref:l,className:(0,a.Z)("navbar__item","dropdown","dropdown--hoverable",{"dropdown--right":"right"===n,"dropdown--show":c}),children:[(0,u.jsx)(ne,{"aria-haspopup":"true","aria-expanded":c,role:"button",href:s.to?void 0:"#",className:(0,a.Z)("navbar__link",o),...s,onClick:s.to?void 0:e=>e.preventDefault(),onKeyDown:e=>{"Enter"===e.key&&(e.preventDefault(),d(!c))},children:s.children??s.label}),(0,u.jsx)("ul",{className:"dropdown__menu",children:t.map(((e,t)=>(0,r.createElement)(Ze,{isDropdownItem:!0,activeClassName:"dropdown__link--active",...e,key:t})))})]})}function de(e){let{items:t,className:n,position:o,onClick:i,...l}=e;const c=function(){const{siteConfig:{baseUrl:e}}=(0,le.Z)(),{pathname:t}=(0,s.TH)();return t.replace(e,"/")}(),d=ce(t,c),{collapsed:p,toggleCollapsed:f,setCollapsed:g}=(0,ie.u)({initialState:()=>!d});return(0,r.useEffect)((()=>{d&&g(!d)}),[c,d,g]),(0,u.jsxs)("li",{className:(0,a.Z)("menu__list-item",{"menu__list-item--collapsed":p}),children:[(0,u.jsx)(ne,{role:"button",className:(0,a.Z)("menu__link menu__link--sublist menu__link--sublist-caret",n),...l,onClick:e=>{e.preventDefault(),f()},children:l.children??l.label}),(0,u.jsx)(ie.z,{lazy:!0,as:"ul",className:"menu__list",collapsed:p,children:t.map(((e,t)=>(0,r.createElement)(Ze,{mobile:!0,isDropdownItem:!0,onClick:i,activeClassName:"menu__link--active",...e,key:t})))})]})}function pe(e){let{mobile:t=!1,...n}=e;const r=t?de:ue;return(0,u.jsx)(r,{...n})}var fe=n(4711);function ge(e){let{width:t=20,height:n=20,...r}=e;return(0,u.jsx)("svg",{viewBox:"0 0 24 24",width:t,height:n,"aria-hidden":!0,...r,children:(0,u.jsx)("path",{fill:"currentColor",d:"M12.87 15.07l-2.54-2.51.03-.03c1.74-1.94 2.98-4.17 3.71-6.53H17V4h-7V2H8v2H1v1.99h11.17C11.5 7.92 10.44 9.75 9 11.35 8.07 10.32 7.3 9.19 6.69 8h-2c.73 1.63 1.73 3.17 2.98 4.56l-5.09 5.02L4 19l5-5 3.11 3.11.76-2.04zM18.5 10h-2L12 22h2l1.12-3h4.75L21 22h2l-4.5-12zm-2.62 7l1.62-4.33L19.12 17h-3.24z"})})}const me="iconLanguage_nlXk";function he(){return r.createElement("svg",{width:"15",height:"15",className:"DocSearch-Control-Key-Icon"},r.createElement("path",{d:"M4.505 4.496h2M5.505 5.496v5M8.216 4.496l.055 5.993M10 7.5c.333.333.5.667.5 1v2M12.326 4.5v5.996M8.384 4.496c1.674 0 2.116 0 2.116 1.5s-.442 1.5-2.116 1.5M3.205 9.303c-.09.448-.277 1.21-1.241 1.203C1 10.5.5 9.513.5 8V7c0-1.57.5-2.5 1.464-2.494.964.006 1.134.598 1.24 1.342M12.553 10.5h1.953",strokeWidth:"1.2",stroke:"currentColor",fill:"none",strokeLinecap:"square"}))}var be=n(830),ye=["translations"];function ve(){return ve=Object.assign||function(e){for(var t=1;te.length)&&(t=e.length);for(var n=0,r=new Array(t);n=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var Se="Ctrl";var _e=r.forwardRef((function(e,t){var n=e.translations,a=void 0===n?{}:n,o=xe(e,ye),i=a.buttonText,s=void 0===i?"Search":i,l=a.buttonAriaLabel,c=void 0===l?"Search":l,u=we((0,r.useState)(null),2),d=u[0],p=u[1];return(0,r.useEffect)((function(){"undefined"!=typeof navigator&&(/(Mac|iPhone|iPod|iPad)/i.test(navigator.platform)?p("\u2318"):p(Se))}),[]),r.createElement("button",ve({type:"button",className:"DocSearch DocSearch-Button","aria-label":c},o,{ref:t}),r.createElement("span",{className:"DocSearch-Button-Container"},r.createElement(be.W,null),r.createElement("span",{className:"DocSearch-Button-Placeholder"},s)),r.createElement("span",{className:"DocSearch-Button-Keys"},null!==d&&r.createElement(r.Fragment,null,r.createElement("kbd",{className:"DocSearch-Button-Key"},d===Se?r.createElement(he,null):d),r.createElement("kbd",{className:"DocSearch-Button-Key"},"K"))))})),Ee=n(5742),Ce=n(6177),Te=n(239),Ae=n(3320);var Ne=n(3935);const je={button:{buttonText:(0,l.I)({id:"theme.SearchBar.label",message:"Search",description:"The ARIA label and placeholder for search button"}),buttonAriaLabel:(0,l.I)({id:"theme.SearchBar.label",message:"Search",description:"The ARIA label and placeholder for search button"})},modal:{searchBox:{resetButtonTitle:(0,l.I)({id:"theme.SearchModal.searchBox.resetButtonTitle",message:"Clear the query",description:"The label and ARIA label for search box reset button"}),resetButtonAriaLabel:(0,l.I)({id:"theme.SearchModal.searchBox.resetButtonTitle",message:"Clear the query",description:"The label and ARIA label for search box reset button"}),cancelButtonText:(0,l.I)({id:"theme.SearchModal.searchBox.cancelButtonText",message:"Cancel",description:"The label and ARIA label for search box cancel button"}),cancelButtonAriaLabel:(0,l.I)({id:"theme.SearchModal.searchBox.cancelButtonText",message:"Cancel",description:"The label and ARIA label for search box cancel button"})},startScreen:{recentSearchesTitle:(0,l.I)({id:"theme.SearchModal.startScreen.recentSearchesTitle",message:"Recent",description:"The title for recent searches"}),noRecentSearchesText:(0,l.I)({id:"theme.SearchModal.startScreen.noRecentSearchesText",message:"No recent searches",description:"The text when no recent searches"}),saveRecentSearchButtonTitle:(0,l.I)({id:"theme.SearchModal.startScreen.saveRecentSearchButtonTitle",message:"Save this search",description:"The label for save recent search button"}),removeRecentSearchButtonTitle:(0,l.I)({id:"theme.SearchModal.startScreen.removeRecentSearchButtonTitle",message:"Remove this search from history",description:"The label for remove recent search button"}),favoriteSearchesTitle:(0,l.I)({id:"theme.SearchModal.startScreen.favoriteSearchesTitle",message:"Favorite",description:"The title for favorite searches"}),removeFavoriteSearchButtonTitle:(0,l.I)({id:"theme.SearchModal.startScreen.removeFavoriteSearchButtonTitle",message:"Remove this search from favorites",description:"The label for remove favorite search button"})},errorScreen:{titleText:(0,l.I)({id:"theme.SearchModal.errorScreen.titleText",message:"Unable to fetch results",description:"The title for error screen of search modal"}),helpText:(0,l.I)({id:"theme.SearchModal.errorScreen.helpText",message:"You might want to check your network connection.",description:"The help text for error screen of search modal"})},footer:{selectText:(0,l.I)({id:"theme.SearchModal.footer.selectText",message:"to select",description:"The explanatory text of the action for the enter key"}),selectKeyAriaLabel:(0,l.I)({id:"theme.SearchModal.footer.selectKeyAriaLabel",message:"Enter key",description:"The ARIA label for the Enter key button that makes the selection"}),navigateText:(0,l.I)({id:"theme.SearchModal.footer.navigateText",message:"to navigate",description:"The explanatory text of the action for the Arrow up and Arrow down key"}),navigateUpKeyAriaLabel:(0,l.I)({id:"theme.SearchModal.footer.navigateUpKeyAriaLabel",message:"Arrow up",description:"The ARIA label for the Arrow up key button that makes the navigation"}),navigateDownKeyAriaLabel:(0,l.I)({id:"theme.SearchModal.footer.navigateDownKeyAriaLabel",message:"Arrow down",description:"The ARIA label for the Arrow down key button that makes the navigation"}),closeText:(0,l.I)({id:"theme.SearchModal.footer.closeText",message:"to close",description:"The explanatory text of the action for Escape key"}),closeKeyAriaLabel:(0,l.I)({id:"theme.SearchModal.footer.closeKeyAriaLabel",message:"Escape key",description:"The ARIA label for the Escape key button that close the modal"}),searchByText:(0,l.I)({id:"theme.SearchModal.footer.searchByText",message:"Search by",description:"The text explain that the search is making by Algolia"})},noResultsScreen:{noResultsText:(0,l.I)({id:"theme.SearchModal.noResultsScreen.noResultsText",message:"No results for",description:"The text explains that there are no results for the following search"}),suggestedQueryText:(0,l.I)({id:"theme.SearchModal.noResultsScreen.suggestedQueryText",message:"Try searching for",description:"The text for the suggested query when no results are found for the following search"}),reportMissingResultsText:(0,l.I)({id:"theme.SearchModal.noResultsScreen.reportMissingResultsText",message:"Believe this query should return results?",description:"The text for the question where the user thinks there are missing results"}),reportMissingResultsLinkText:(0,l.I)({id:"theme.SearchModal.noResultsScreen.reportMissingResultsLinkText",message:"Let us know.",description:"The text for the link to report missing results"})}},placeholder:(0,l.I)({id:"theme.SearchModal.placeholder",message:"Search docs",description:"The placeholder of the input of the DocSearch pop-up modal"})};let Le=null;function Pe(e){let{hit:t,children:n}=e;return(0,u.jsx)(Q.Z,{to:t.url,children:n})}function Re(e){let{state:t,onClose:n}=e;const r=(0,Ce.M)();return(0,u.jsx)(Q.Z,{to:r(t.query),onClick:n,children:(0,u.jsx)(l.Z,{id:"theme.SearchBar.seeAll",values:{count:t.context.nbHits},children:"See all {count} results"})})}function Oe(e){let{contextualSearch:t,externalUrlRegex:a,...o}=e;const{siteMetadata:i}=(0,le.Z)(),l=(0,Te.l)(),c=function(){const{locale:e,tags:t}=(0,Ae._q)();return[`language:${e}`,t.map((e=>`docusaurus_tag:${e}`))]}(),d=o.searchParameters?.facetFilters??[],p=t?function(e,t){const n=e=>"string"==typeof e?[e]:e;return[...n(e),...n(t)]}(c,d):d,f={...o.searchParameters,facetFilters:p},g=(0,s.k6)(),m=(0,r.useRef)(null),h=(0,r.useRef)(null),[b,y]=(0,r.useState)(!1),[v,w]=(0,r.useState)(void 0),k=(0,r.useCallback)((()=>Le?Promise.resolve():Promise.all([n.e(1426).then(n.bind(n,1426)),Promise.all([n.e(532),n.e(6945)]).then(n.bind(n,6945)),Promise.all([n.e(532),n.e(8894)]).then(n.bind(n,8894))]).then((e=>{let[{DocSearchModal:t}]=e;Le=t}))),[]),x=(0,r.useCallback)((()=>{k().then((()=>{m.current=document.createElement("div"),document.body.insertBefore(m.current,document.body.firstChild),y(!0)}))}),[k,y]),S=(0,r.useCallback)((()=>{y(!1),m.current?.remove()}),[y]),_=(0,r.useCallback)((e=>{k().then((()=>{y(!0),w(e.key)}))}),[k,y,w]),E=(0,r.useRef)({navigate(e){let{itemUrl:t}=e;(0,ee.F)(a,t)?window.location.href=t:g.push(t)}}).current,C=(0,r.useRef)((e=>o.transformItems?o.transformItems(e):e.map((e=>({...e,url:l(e.url)}))))).current,T=(0,r.useMemo)((()=>e=>(0,u.jsx)(Re,{...e,onClose:S})),[S]),A=(0,r.useCallback)((e=>(e.addAlgoliaAgent("docusaurus",i.docusaurusVersion),e)),[i.docusaurusVersion]);return function(e){var t=e.isOpen,n=e.onOpen,a=e.onClose,o=e.onInput,i=e.searchButtonRef;r.useEffect((function(){function e(e){var r;(27===e.keyCode&&t||"k"===(null===(r=e.key)||void 0===r?void 0:r.toLowerCase())&&(e.metaKey||e.ctrlKey)||!function(e){var t=e.target,n=t.tagName;return t.isContentEditable||"INPUT"===n||"SELECT"===n||"TEXTAREA"===n}(e)&&"/"===e.key&&!t)&&(e.preventDefault(),t?a():document.body.classList.contains("DocSearch--active")||document.body.classList.contains("DocSearch--active")||n()),i&&i.current===document.activeElement&&o&&/[a-zA-Z0-9]/.test(String.fromCharCode(e.keyCode))&&o(e)}return window.addEventListener("keydown",e),function(){window.removeEventListener("keydown",e)}}),[t,n,a,o,i])}({isOpen:b,onOpen:x,onClose:S,onInput:_,searchButtonRef:h}),(0,u.jsxs)(u.Fragment,{children:[(0,u.jsx)(Ee.Z,{children:(0,u.jsx)("link",{rel:"preconnect",href:`https://${o.appId}-dsn.algolia.net`,crossOrigin:"anonymous"})}),(0,u.jsx)(_e,{onTouchStart:k,onFocus:k,onMouseOver:k,onClick:x,ref:h,translations:je.button}),b&&Le&&m.current&&(0,Ne.createPortal)((0,u.jsx)(Le,{onClose:S,initialScrollY:window.scrollY,initialQuery:v,navigator:E,transformItems:C,hitComponent:Pe,transformSearchClient:A,...o.searchPagePath&&{resultsFooterComponent:T},...o,searchParameters:f,placeholder:je.placeholder,translations:je.modal}),m.current)]})}function Ie(){const{siteConfig:e}=(0,le.Z)();return(0,u.jsx)(Oe,{...e.themeConfig.algolia})}const Fe={navbarSearchContainer:"navbarSearchContainer_Bca1"};function Me(e){let{children:t,className:n}=e;return(0,u.jsx)("div",{className:(0,a.Z)(n,Fe.navbarSearchContainer),children:t})}var De=n(143),Be=n(3438);var ze=n(373);const $e=e=>e.docs.find((t=>t.id===e.mainDocId));const Ue={default:oe,localeDropdown:function(e){let{mobile:t,dropdownItemsBefore:n,dropdownItemsAfter:r,queryString:a="",...o}=e;const{i18n:{currentLocale:i,locales:c,localeConfigs:d}}=(0,le.Z)(),p=(0,fe.l)(),{search:f,hash:g}=(0,s.TH)(),m=[...n,...c.map((e=>{const n=`${`pathname://${p.createUrl({locale:e,fullyQualified:!1})}`}${f}${g}${a}`;return{label:d[e].label,lang:d[e].htmlLang,to:n,target:"_self",autoAddBaseUrl:!1,className:e===i?t?"menu__link--active":"dropdown__link--active":""}})),...r],h=t?(0,l.I)({message:"Languages",id:"theme.navbar.mobileLanguageDropdown.label",description:"The label for the mobile language switcher dropdown"}):d[i].label;return(0,u.jsx)(pe,{...o,mobile:t,label:(0,u.jsxs)(u.Fragment,{children:[(0,u.jsx)(ge,{className:me}),h]}),items:m})},search:function(e){let{mobile:t,className:n}=e;return t?null:(0,u.jsx)(Me,{className:n,children:(0,u.jsx)(Ie,{})})},dropdown:pe,html:function(e){let{value:t,className:n,mobile:r=!1,isDropdownItem:o=!1}=e;const i=o?"li":"div";return(0,u.jsx)(i,{className:(0,a.Z)({navbar__item:!r&&!o,"menu__list-item":r},n),dangerouslySetInnerHTML:{__html:t}})},doc:function(e){let{docId:t,label:n,docsPluginId:r,...a}=e;const{activeDoc:o}=(0,De.Iw)(r),i=(0,Be.vY)(t,r),s=o?.path===i?.path;return null===i||i.unlisted&&!s?null:(0,u.jsx)(oe,{exact:!0,...a,isActive:()=>s||!!o?.sidebar&&o.sidebar===i.sidebar,label:n??i.id,to:i.path})},docSidebar:function(e){let{sidebarId:t,label:n,docsPluginId:r,...a}=e;const{activeDoc:o}=(0,De.Iw)(r),i=(0,Be.oz)(t,r).link;if(!i)throw new Error(`DocSidebarNavbarItem: Sidebar with ID "${t}" doesn't have anything to be linked to.`);return(0,u.jsx)(oe,{exact:!0,...a,isActive:()=>o?.sidebar===t,label:n??i.label,to:i.path})},docsVersion:function(e){let{label:t,to:n,docsPluginId:r,...a}=e;const o=(0,Be.lO)(r)[0],i=t??o.label,s=n??(e=>e.docs.find((t=>t.id===e.mainDocId)))(o).path;return(0,u.jsx)(oe,{...a,label:i,to:s})},docsVersionDropdown:function(e){let{mobile:t,docsPluginId:n,dropdownActiveClassDisabled:r,dropdownItemsBefore:a,dropdownItemsAfter:o,...i}=e;const{search:c,hash:d}=(0,s.TH)(),p=(0,De.Iw)(n),f=(0,De.gB)(n),{savePreferredVersionName:g}=(0,ze.J)(n),m=[...a,...f.map((e=>{const t=p.alternateDocVersions[e.name]??$e(e);return{label:e.label,to:`${t.path}${c}${d}`,isActive:()=>e===p.activeVersion,onClick:()=>g(e.name)}})),...o],h=(0,Be.lO)(n)[0],b=t&&m.length>1?(0,l.I)({id:"theme.navbar.mobileVersionsDropdown.label",message:"Versions",description:"The label for the navbar versions dropdown on mobile view"}):h.label,y=t&&m.length>1?void 0:$e(h).path;return m.length<=1?(0,u.jsx)(oe,{...i,mobile:t,label:b,to:y,isActive:r?()=>!1:void 0}):(0,u.jsx)(pe,{...i,mobile:t,label:b,to:y,items:m,isActive:r?()=>!1:void 0})}};function Ze(e){let{type:t,...n}=e;const r=function(e,t){return e&&"default"!==e?e:"items"in t?"dropdown":"default"}(t,n),a=Ue[r];if(!a)throw new Error(`No NavbarItem component found for type "${t}".`);return(0,u.jsx)(a,{...n})}function He(){const e=(0,N.e)(),t=(0,w.L)().navbar.items;return(0,u.jsx)("ul",{className:"menu__list",children:t.map(((t,n)=>(0,r.createElement)(Ze,{mobile:!0,...t,onClick:()=>e.toggle(),key:n})))})}function Ve(e){return(0,u.jsx)("button",{...e,type:"button",className:"clean-btn navbar-sidebar__back",children:(0,u.jsx)(l.Z,{id:"theme.navbar.mobileSidebarSecondaryMenu.backButtonLabel",description:"The label of the back button to return to main menu, inside the mobile navbar sidebar secondary menu (notably used to display the docs sidebar)",children:"\u2190 Back to main menu"})})}function We(){const e=0===(0,w.L)().navbar.items.length,t=F();return(0,u.jsxs)(u.Fragment,{children:[!e&&(0,u.jsx)(Ve,{onClick:()=>t.hide()}),t.content]})}function Ge(){const e=(0,N.e)();var t;return void 0===(t=e.shown)&&(t=!0),(0,r.useEffect)((()=>(document.body.style.overflow=t?"hidden":"visible",()=>{document.body.style.overflow="visible"})),[t]),e.shouldRender?(0,u.jsx)(M,{header:(0,u.jsx)(Y,{}),primaryMenu:(0,u.jsx)(He,{}),secondaryMenu:(0,u.jsx)(We,{})}):null}const qe={navbarHideable:"navbarHideable_m1mJ",navbarHidden:"navbarHidden_jGov"};function Ke(e){return(0,u.jsx)("div",{role:"presentation",...e,className:(0,a.Z)("navbar-sidebar__backdrop",e.className)})}function Ye(e){let{children:t}=e;const{navbar:{hideOnScroll:n,style:o}}=(0,w.L)(),i=(0,N.e)(),{navbarRef:s,isNavbarVisible:d}=function(e){const[t,n]=(0,r.useState)(e),a=(0,r.useRef)(!1),o=(0,r.useRef)(0),i=(0,r.useCallback)((e=>{null!==e&&(o.current=e.getBoundingClientRect().height)}),[]);return(0,j.RF)(((t,r)=>{let{scrollY:i}=t;if(!e)return;if(i=s?n(!1):i+c{if(!e)return;const r=t.location.hash;if(r?document.getElementById(r.substring(1)):void 0)return a.current=!0,void n(!1);n(!0)})),{navbarRef:i,isNavbarVisible:t}}(n);return(0,u.jsxs)("nav",{ref:s,"aria-label":(0,l.I)({id:"theme.NavBar.navAriaLabel",message:"Main",description:"The ARIA label for the main navigation"}),className:(0,a.Z)("navbar","navbar--fixed-top",n&&[qe.navbarHideable,!d&&qe.navbarHidden],{"navbar--dark":"dark"===o,"navbar--primary":"primary"===o,"navbar-sidebar--show":i.shown}),children:[t,(0,u.jsx)(Ke,{onClick:i.toggle}),(0,u.jsx)(Ge,{})]})}var Qe=n(9690);const Xe="right";function Je(e){let{width:t=30,height:n=30,className:r,...a}=e;return(0,u.jsx)("svg",{className:r,width:t,height:n,viewBox:"0 0 30 30","aria-hidden":"true",...a,children:(0,u.jsx)("path",{stroke:"currentColor",strokeLinecap:"round",strokeMiterlimit:"10",strokeWidth:"2",d:"M4 7h22M4 15h22M4 23h22"})})}function et(){const{toggle:e,shown:t}=(0,N.e)();return(0,u.jsx)("button",{onClick:e,"aria-label":(0,l.I)({id:"theme.docs.sidebar.toggleSidebarButtonAriaLabel",message:"Toggle navigation bar",description:"The ARIA label for hamburger menu button of mobile navigation"}),"aria-expanded":t,className:"navbar__toggle clean-btn",type:"button",children:(0,u.jsx)(Je,{})})}const tt={colorModeToggle:"colorModeToggle_DEke"};function nt(e){let{items:t}=e;return(0,u.jsx)(u.Fragment,{children:t.map(((e,t)=>(0,u.jsx)(Qe.QW,{onError:t=>new Error(`A theme navbar item failed to render.\nPlease double-check the following navbar item (themeConfig.navbar.items) of your Docusaurus config:\n${JSON.stringify(e,null,2)}`,{cause:t}),children:(0,u.jsx)(Ze,{...e})},t)))})}function rt(e){let{left:t,right:n}=e;return(0,u.jsxs)("div",{className:"navbar__inner",children:[(0,u.jsx)("div",{className:"navbar__items",children:t}),(0,u.jsx)("div",{className:"navbar__items navbar__items--right",children:n})]})}function at(){const e=(0,N.e)(),t=(0,w.L)().navbar.items,[n,r]=function(e){function t(e){return"left"===(e.position??Xe)}return[e.filter(t),e.filter((e=>!t(e)))]}(t),a=t.find((e=>"search"===e.type));return(0,u.jsx)(rt,{left:(0,u.jsxs)(u.Fragment,{children:[!e.disabled&&(0,u.jsx)(et,{}),(0,u.jsx)(q,{}),(0,u.jsx)(nt,{items:n})]}),right:(0,u.jsxs)(u.Fragment,{children:[(0,u.jsx)(nt,{items:r}),(0,u.jsx)(W,{className:tt.colorModeToggle}),!a&&(0,u.jsx)(Me,{children:(0,u.jsx)(Ie,{})})]})})}function ot(){return(0,u.jsx)(Ye,{children:(0,u.jsx)(at,{})})}function it(e){let{item:t}=e;const{to:n,href:r,label:a,prependBaseUrlToHref:o,...i}=t,s=(0,X.Z)(n),l=(0,X.Z)(r,{forcePrependBaseUrl:!0});return(0,u.jsxs)(Q.Z,{className:"footer__link-item",...r?{href:o?l:r}:{to:s},...i,children:[a,r&&!(0,J.Z)(r)&&(0,u.jsx)(te.Z,{})]})}function st(e){let{item:t}=e;return t.html?(0,u.jsx)("li",{className:"footer__item",dangerouslySetInnerHTML:{__html:t.html}}):(0,u.jsx)("li",{className:"footer__item",children:(0,u.jsx)(it,{item:t})},t.href??t.to)}function lt(e){let{column:t}=e;return(0,u.jsxs)("div",{className:"col footer__col",children:[(0,u.jsx)("div",{className:"footer__title",children:t.title}),(0,u.jsx)("ul",{className:"footer__items clean-list",children:t.items.map(((e,t)=>(0,u.jsx)(st,{item:e},t)))})]})}function ct(e){let{columns:t}=e;return(0,u.jsx)("div",{className:"row footer__links",children:t.map(((e,t)=>(0,u.jsx)(lt,{column:e},t)))})}function ut(){return(0,u.jsx)("span",{className:"footer__link-separator",children:"\xb7"})}function dt(e){let{item:t}=e;return t.html?(0,u.jsx)("span",{className:"footer__link-item",dangerouslySetInnerHTML:{__html:t.html}}):(0,u.jsx)(it,{item:t})}function pt(e){let{links:t}=e;return(0,u.jsx)("div",{className:"footer__links text--center",children:(0,u.jsx)("div",{className:"footer__links",children:t.map(((e,n)=>(0,u.jsxs)(r.Fragment,{children:[(0,u.jsx)(dt,{item:e}),t.length!==n+1&&(0,u.jsx)(ut,{})]},n)))})})}function ft(e){let{links:t}=e;return function(e){return"title"in e[0]}(t)?(0,u.jsx)(ct,{columns:t}):(0,u.jsx)(pt,{links:t})}var gt=n(9965);const mt={footerLogoLink:"footerLogoLink_BH7S"};function ht(e){let{logo:t}=e;const{withBaseUrl:n}=(0,X.C)(),r={light:n(t.src),dark:n(t.srcDark??t.src)};return(0,u.jsx)(gt.Z,{className:(0,a.Z)("footer__logo",t.className),alt:t.alt,sources:r,width:t.width,height:t.height,style:t.style})}function bt(e){let{logo:t}=e;return t.href?(0,u.jsx)(Q.Z,{href:t.href,className:mt.footerLogoLink,target:t.target,children:(0,u.jsx)(ht,{logo:t})}):(0,u.jsx)(ht,{logo:t})}function yt(e){let{copyright:t}=e;return(0,u.jsx)("div",{className:"footer__copyright",dangerouslySetInnerHTML:{__html:t}})}function vt(e){let{style:t,links:n,logo:r,copyright:o}=e;return(0,u.jsx)("footer",{className:(0,a.Z)("footer",{"footer--dark":"dark"===t}),children:(0,u.jsxs)("div",{className:"container container-fluid",children:[n,(r||o)&&(0,u.jsxs)("div",{className:"footer__bottom text--center",children:[r&&(0,u.jsx)("div",{className:"margin-bottom--sm",children:r}),o]})]})})}function wt(){const{footer:e}=(0,w.L)();if(!e)return null;const{copyright:t,links:n,logo:r,style:a}=e;return(0,u.jsx)(vt,{style:a,links:n&&n.length>0&&(0,u.jsx)(ft,{links:n}),logo:r&&(0,u.jsx)(bt,{logo:r}),copyright:t&&(0,u.jsx)(yt,{copyright:t})})}const kt=r.memo(wt),xt=(0,L.Qc)([D.S,k.pl,j.OC,ze.L5,i.VC,function(e){let{children:t}=e;return(0,u.jsx)(P.n2,{children:(0,u.jsx)(N.M,{children:(0,u.jsx)(O,{children:t})})})}]);function St(e){let{children:t}=e;return(0,u.jsx)(xt,{children:t})}var _t=n(7955);function Et(e){let{error:t,tryAgain:n}=e;return(0,u.jsx)("main",{className:"container margin-vert--xl",children:(0,u.jsx)("div",{className:"row",children:(0,u.jsxs)("div",{className:"col col--6 col--offset-3",children:[(0,u.jsx)(_t.Z,{as:"h1",className:"hero__title",children:(0,u.jsx)(l.Z,{id:"theme.ErrorPageContent.title",description:"The title of the fallback page when the page crashed",children:"This page crashed."})}),(0,u.jsx)("div",{className:"margin-vert--lg",children:(0,u.jsx)(Qe.Cw,{onClick:n,className:"button button--primary shadow--lw"})}),(0,u.jsx)("hr",{}),(0,u.jsx)("div",{className:"margin-vert--md",children:(0,u.jsx)(Qe.aG,{error:t})})]})})})}const Ct={mainWrapper:"mainWrapper_z2l0"};function Tt(e){const{children:t,noFooter:n,wrapperClassName:r,title:s,description:l}=e;return(0,b.t)(),(0,u.jsxs)(St,{children:[(0,u.jsx)(i.d,{title:s,description:l}),(0,u.jsx)(v,{}),(0,u.jsx)(A,{}),(0,u.jsx)(ot,{}),(0,u.jsx)("div",{id:d,className:(0,a.Z)(h.k.wrapper.main,Ct.mainWrapper,r),children:(0,u.jsx)(o.Z,{fallback:e=>(0,u.jsx)(Et,{...e}),children:t})}),!n&&(0,u.jsx)(kt,{})]})}},1327:(e,t,n)=>{"use strict";n.d(t,{Z:()=>u});n(7294);var r=n(9960),a=n(4996),o=n(2263),i=n(6668),s=n(9965),l=n(5893);function c(e){let{logo:t,alt:n,imageClassName:r}=e;const o={light:(0,a.Z)(t.src),dark:(0,a.Z)(t.srcDark||t.src)},i=(0,l.jsx)(s.Z,{className:t.className,sources:o,height:t.height,width:t.width,alt:n,style:t.style});return r?(0,l.jsx)("div",{className:r,children:i}):i}function u(e){const{siteConfig:{title:t}}=(0,o.Z)(),{navbar:{title:n,logo:s}}=(0,i.L)(),{imageClassName:u,titleClassName:d,...p}=e,f=(0,a.Z)(s?.href||"/"),g=n?"":t,m=s?.alt??g;return(0,l.jsxs)(r.Z,{to:f,...p,...s?.target&&{target:s.target},children:[s&&(0,l.jsx)(c,{logo:s,alt:m,imageClassName:u}),null!=n&&(0,l.jsx)("b",{className:d,children:n})]})}},197:(e,t,n)=>{"use strict";n.d(t,{Z:()=>o});n(7294);var r=n(5742),a=n(5893);function o(e){let{locale:t,version:n,tag:o}=e;const i=t;return(0,a.jsxs)(r.Z,{children:[t&&(0,a.jsx)("meta",{name:"docusaurus_locale",content:t}),n&&(0,a.jsx)("meta",{name:"docusaurus_version",content:n}),o&&(0,a.jsx)("meta",{name:"docusaurus_tag",content:o}),i&&(0,a.jsx)("meta",{name:"docsearch:language",content:i}),n&&(0,a.jsx)("meta",{name:"docsearch:version",content:n}),o&&(0,a.jsx)("meta",{name:"docsearch:docusaurus_tag",content:o})]})}},9965:(e,t,n)=>{"use strict";n.d(t,{Z:()=>u});var r=n(7294),a=n(6010),o=n(2389),i=n(2949);const s={themedComponent:"themedComponent_mlkZ","themedComponent--light":"themedComponent--light_NVdE","themedComponent--dark":"themedComponent--dark_xIcU"};var l=n(5893);function c(e){let{className:t,children:n}=e;const c=(0,o.Z)(),{colorMode:u}=(0,i.I)();return(0,l.jsx)(l.Fragment,{children:(c?"dark"===u?["dark"]:["light"]:["light","dark"]).map((e=>{const o=n({theme:e,className:(0,a.Z)(t,s.themedComponent,s[`themedComponent--${e}`])});return(0,l.jsx)(r.Fragment,{children:o},e)}))})}function u(e){const{sources:t,className:n,alt:r,...a}=e;return(0,l.jsx)(c,{className:n,children:e=>{let{theme:n,className:o}=e;return(0,l.jsx)("img",{src:t[n],alt:r,className:o,...a})}})}},6043:(e,t,n)=>{"use strict";n.d(t,{u:()=>c,z:()=>b});var r=n(7294),a=n(412),o=n(469),i=n(1442),s=n(5893);const l="ease-in-out";function c(e){let{initialState:t}=e;const[n,a]=(0,r.useState)(t??!1),o=(0,r.useCallback)((()=>{a((e=>!e))}),[]);return{collapsed:n,setCollapsed:a,toggleCollapsed:o}}const u={display:"none",overflow:"hidden",height:"0px"},d={display:"block",overflow:"visible",height:"auto"};function p(e,t){const n=t?u:d;e.style.display=n.display,e.style.overflow=n.overflow,e.style.height=n.height}function f(e){let{collapsibleRef:t,collapsed:n,animation:a}=e;const o=(0,r.useRef)(!1);(0,r.useEffect)((()=>{const e=t.current;function r(){const t=e.scrollHeight,n=a?.duration??function(e){if((0,i.n)())return 1;const t=e/36;return Math.round(10*(4+15*t**.25+t/5))}(t);return{transition:`height ${n}ms ${a?.easing??l}`,height:`${t}px`}}function s(){const t=r();e.style.transition=t.transition,e.style.height=t.height}if(!o.current)return p(e,n),void(o.current=!0);return e.style.willChange="height",function(){const t=requestAnimationFrame((()=>{n?(s(),requestAnimationFrame((()=>{e.style.height=u.height,e.style.overflow=u.overflow}))):(e.style.display="block",requestAnimationFrame((()=>{s()})))}));return()=>cancelAnimationFrame(t)}()}),[t,n,a])}function g(e){if(!a.Z.canUseDOM)return e?u:d}function m(e){let{as:t="div",collapsed:n,children:a,animation:o,onCollapseTransitionEnd:i,className:l,disableSSRStyle:c}=e;const u=(0,r.useRef)(null);return f({collapsibleRef:u,collapsed:n,animation:o}),(0,s.jsx)(t,{ref:u,style:c?void 0:g(n),onTransitionEnd:e=>{"height"===e.propertyName&&(p(u.current,n),i?.(n))},className:l,children:a})}function h(e){let{collapsed:t,...n}=e;const[a,i]=(0,r.useState)(!t),[l,c]=(0,r.useState)(t);return(0,o.Z)((()=>{t||i(!0)}),[t]),(0,o.Z)((()=>{a&&c(t)}),[a,t]),a?(0,s.jsx)(m,{...n,collapsed:l}):null}function b(e){let{lazy:t,...n}=e;const r=t?h:m;return(0,s.jsx)(r,{...n})}},9689:(e,t,n)=>{"use strict";n.d(t,{nT:()=>m,pl:()=>g});var r=n(7294),a=n(2389),o=n(12),i=n(902),s=n(6668),l=n(5893);const c=(0,o.WA)("docusaurus.announcement.dismiss"),u=(0,o.WA)("docusaurus.announcement.id"),d=()=>"true"===c.get(),p=e=>c.set(String(e)),f=r.createContext(null);function g(e){let{children:t}=e;const n=function(){const{announcementBar:e}=(0,s.L)(),t=(0,a.Z)(),[n,o]=(0,r.useState)((()=>!!t&&d()));(0,r.useEffect)((()=>{o(d())}),[]);const i=(0,r.useCallback)((()=>{p(!0),o(!0)}),[]);return(0,r.useEffect)((()=>{if(!e)return;const{id:t}=e;let n=u.get();"annoucement-bar"===n&&(n="announcement-bar");const r=t!==n;u.set(t),r&&p(!1),!r&&d()||o(!1)}),[e]),(0,r.useMemo)((()=>({isActive:!!e&&!n,close:i})),[e,n,i])}();return(0,l.jsx)(f.Provider,{value:n,children:t})}function m(){const e=(0,r.useContext)(f);if(!e)throw new i.i6("AnnouncementBarProvider");return e}},2949:(e,t,n)=>{"use strict";n.d(t,{I:()=>b,S:()=>h});var r=n(7294),a=n(412),o=n(902),i=n(12),s=n(6668),l=n(5893);const c=r.createContext(void 0),u="theme",d=(0,i.WA)(u),p={light:"light",dark:"dark"},f=e=>e===p.dark?p.dark:p.light,g=e=>a.Z.canUseDOM?f(document.documentElement.getAttribute("data-theme")):f(e),m=e=>{d.set(f(e))};function h(e){let{children:t}=e;const n=function(){const{colorMode:{defaultMode:e,disableSwitch:t,respectPrefersColorScheme:n}}=(0,s.L)(),[a,o]=(0,r.useState)(g(e));(0,r.useEffect)((()=>{t&&d.del()}),[t]);const i=(0,r.useCallback)((function(t,r){void 0===r&&(r={});const{persist:a=!0}=r;t?(o(t),a&&m(t)):(o(n?window.matchMedia("(prefers-color-scheme: dark)").matches?p.dark:p.light:e),d.del())}),[n,e]);(0,r.useEffect)((()=>{document.documentElement.setAttribute("data-theme",f(a))}),[a]),(0,r.useEffect)((()=>{if(t)return;const e=e=>{if(e.key!==u)return;const t=d.get();null!==t&&i(f(t))};return window.addEventListener("storage",e),()=>window.removeEventListener("storage",e)}),[t,i]);const l=(0,r.useRef)(!1);return(0,r.useEffect)((()=>{if(t&&!n)return;const e=window.matchMedia("(prefers-color-scheme: dark)"),r=()=>{window.matchMedia("print").matches||l.current?l.current=window.matchMedia("print").matches:i(null)};return e.addListener(r),()=>e.removeListener(r)}),[i,t,n]),(0,r.useMemo)((()=>({colorMode:a,setColorMode:i,get isDarkTheme(){return a===p.dark},setLightTheme(){i(p.light)},setDarkTheme(){i(p.dark)}})),[a,i])}();return(0,l.jsx)(c.Provider,{value:n,children:t})}function b(){const e=(0,r.useContext)(c);if(null==e)throw new o.i6("ColorModeProvider","Please see https://docusaurus.io/docs/api/themes/configuration#use-color-mode.");return e}},373:(e,t,n)=>{"use strict";n.d(t,{J:()=>v,L5:()=>b,Oh:()=>w});var r=n(7294),a=n(143),o=n(9935),i=n(6668),s=n(3438),l=n(902),c=n(12),u=n(5893);const d=e=>`docs-preferred-version-${e}`,p={save:(e,t,n)=>{(0,c.WA)(d(e),{persistence:t}).set(n)},read:(e,t)=>(0,c.WA)(d(e),{persistence:t}).get(),clear:(e,t)=>{(0,c.WA)(d(e),{persistence:t}).del()}},f=e=>Object.fromEntries(e.map((e=>[e,{preferredVersionName:null}])));const g=r.createContext(null);function m(){const e=(0,a._r)(),t=(0,i.L)().docs.versionPersistence,n=(0,r.useMemo)((()=>Object.keys(e)),[e]),[o,s]=(0,r.useState)((()=>f(n)));(0,r.useEffect)((()=>{s(function(e){let{pluginIds:t,versionPersistence:n,allDocsData:r}=e;function a(e){const t=p.read(e,n);return r[e].versions.some((e=>e.name===t))?{preferredVersionName:t}:(p.clear(e,n),{preferredVersionName:null})}return Object.fromEntries(t.map((e=>[e,a(e)])))}({allDocsData:e,versionPersistence:t,pluginIds:n}))}),[e,t,n]);return[o,(0,r.useMemo)((()=>({savePreferredVersion:function(e,n){p.save(e,t,n),s((t=>({...t,[e]:{preferredVersionName:n}})))}})),[t])]}function h(e){let{children:t}=e;const n=m();return(0,u.jsx)(g.Provider,{value:n,children:t})}function b(e){let{children:t}=e;return s.cE?(0,u.jsx)(h,{children:t}):(0,u.jsx)(u.Fragment,{children:t})}function y(){const e=(0,r.useContext)(g);if(!e)throw new l.i6("DocsPreferredVersionContextProvider");return e}function v(e){void 0===e&&(e=o.m);const t=(0,a.zh)(e),[n,i]=y(),{preferredVersionName:s}=n[e];return{preferredVersion:t.versions.find((e=>e.name===s))??null,savePreferredVersionName:(0,r.useCallback)((t=>{i.savePreferredVersion(e,t)}),[i,e])}}function w(){const e=(0,a._r)(),[t]=y();function n(n){const r=e[n],{preferredVersionName:a}=t[n];return r.versions.find((e=>e.name===a))??null}const r=Object.keys(e);return Object.fromEntries(r.map((e=>[e,n(e)])))}},1116:(e,t,n)=>{"use strict";n.d(t,{V:()=>c,b:()=>l});var r=n(7294),a=n(902),o=n(5893);const i=Symbol("EmptyContext"),s=r.createContext(i);function l(e){let{children:t,name:n,items:a}=e;const i=(0,r.useMemo)((()=>n&&a?{name:n,items:a}:null),[n,a]);return(0,o.jsx)(s.Provider,{value:i,children:t})}function c(){const e=(0,r.useContext)(s);if(e===i)throw new a.i6("DocsSidebarProvider");return e}},4477:(e,t,n)=>{"use strict";n.d(t,{E:()=>l,q:()=>s});var r=n(7294),a=n(902),o=n(5893);const i=r.createContext(null);function s(e){let{children:t,version:n}=e;return(0,o.jsx)(i.Provider,{value:n,children:t})}function l(){const e=(0,r.useContext)(i);if(null===e)throw new a.i6("DocsVersionProvider");return e}},3163:(e,t,n)=>{"use strict";n.d(t,{M:()=>p,e:()=>f});var r=n(7294),a=n(3102),o=n(7524),i=n(1980),s=n(6668),l=n(902),c=n(5893);const u=r.createContext(void 0);function d(){const e=function(){const e=(0,a.HY)(),{items:t}=(0,s.L)().navbar;return 0===t.length&&!e.component}(),t=(0,o.i)(),n=!e&&"mobile"===t,[l,c]=(0,r.useState)(!1);(0,i.Rb)((()=>{if(l)return c(!1),!1}));const u=(0,r.useCallback)((()=>{c((e=>!e))}),[]);return(0,r.useEffect)((()=>{"desktop"===t&&c(!1)}),[t]),(0,r.useMemo)((()=>({disabled:e,shouldRender:n,toggle:u,shown:l})),[e,n,u,l])}function p(e){let{children:t}=e;const n=d();return(0,c.jsx)(u.Provider,{value:n,children:t})}function f(){const e=r.useContext(u);if(void 0===e)throw new l.i6("NavbarMobileSidebarProvider");return e}},3102:(e,t,n)=>{"use strict";n.d(t,{HY:()=>l,Zo:()=>c,n2:()=>s});var r=n(7294),a=n(902),o=n(5893);const i=r.createContext(null);function s(e){let{children:t}=e;const n=(0,r.useState)({component:null,props:null});return(0,o.jsx)(i.Provider,{value:n,children:t})}function l(){const e=(0,r.useContext)(i);if(!e)throw new a.i6("NavbarSecondaryMenuContentProvider");return e[0]}function c(e){let{component:t,props:n}=e;const o=(0,r.useContext)(i);if(!o)throw new a.i6("NavbarSecondaryMenuContentProvider");const[,s]=o,l=(0,a.Ql)(n);return(0,r.useEffect)((()=>{s({component:t,props:l})}),[s,t,l]),(0,r.useEffect)((()=>()=>s({component:null,props:null})),[s]),null}},9727:(e,t,n)=>{"use strict";n.d(t,{h:()=>a,t:()=>o});var r=n(7294);const a="navigation-with-keyboard";function o(){(0,r.useEffect)((()=>{function e(e){"keydown"===e.type&&"Tab"===e.key&&document.body.classList.add(a),"mousedown"===e.type&&document.body.classList.remove(a)}return document.addEventListener("keydown",e),document.addEventListener("mousedown",e),()=>{document.body.classList.remove(a),document.removeEventListener("keydown",e),document.removeEventListener("mousedown",e)}}),[])}},6177:(e,t,n)=>{"use strict";n.d(t,{K:()=>s,M:()=>l});var r=n(7294),a=n(2263),o=n(1980);const i="q";function s(){return(0,o.Nc)(i)}function l(){const{siteConfig:{baseUrl:e,themeConfig:t}}=(0,a.Z)(),{algolia:{searchPagePath:n}}=t;return(0,r.useCallback)((t=>`${e}${n}?${i}=${encodeURIComponent(t)}`),[e,n])}},7524:(e,t,n)=>{"use strict";n.d(t,{i:()=>s});var r=n(7294),a=n(412);const o={desktop:"desktop",mobile:"mobile",ssr:"ssr"},i=996;function s(){const[e,t]=(0,r.useState)((()=>"ssr"));return(0,r.useEffect)((()=>{function e(){t(function(){if(!a.Z.canUseDOM)throw new Error("getWindowSize() should only be called after React hydration");return window.innerWidth>i?o.desktop:o.mobile}())}return e(),window.addEventListener("resize",e),()=>{window.removeEventListener("resize",e)}}),[]),e}},5281:(e,t,n)=>{"use strict";n.d(t,{k:()=>r});const r={page:{blogListPage:"blog-list-page",blogPostPage:"blog-post-page",blogTagsListPage:"blog-tags-list-page",blogTagPostListPage:"blog-tags-post-list-page",docsDocPage:"docs-doc-page",docsTagsListPage:"docs-tags-list-page",docsTagDocListPage:"docs-tags-doc-list-page",mdxPage:"mdx-page"},wrapper:{main:"main-wrapper",blogPages:"blog-wrapper",docsPages:"docs-wrapper",mdxPages:"mdx-wrapper"},common:{editThisPage:"theme-edit-this-page",lastUpdated:"theme-last-updated",backToTopButton:"theme-back-to-top-button",codeBlock:"theme-code-block",admonition:"theme-admonition",unlistedBanner:"theme-unlisted-banner",admonitionType:e=>`theme-admonition-${e}`},layout:{},docs:{docVersionBanner:"theme-doc-version-banner",docVersionBadge:"theme-doc-version-badge",docBreadcrumbs:"theme-doc-breadcrumbs",docMarkdown:"theme-doc-markdown",docTocMobile:"theme-doc-toc-mobile",docTocDesktop:"theme-doc-toc-desktop",docFooter:"theme-doc-footer",docFooterTagsRow:"theme-doc-footer-tags-row",docFooterEditMetaRow:"theme-doc-footer-edit-meta-row",docSidebarContainer:"theme-doc-sidebar-container",docSidebarMenu:"theme-doc-sidebar-menu",docSidebarItemCategory:"theme-doc-sidebar-item-category",docSidebarItemLink:"theme-doc-sidebar-item-link",docSidebarItemCategoryLevel:e=>`theme-doc-sidebar-item-category-level-${e}`,docSidebarItemLinkLevel:e=>`theme-doc-sidebar-item-link-level-${e}`},blog:{}}},1442:(e,t,n)=>{"use strict";function r(){return window.matchMedia("(prefers-reduced-motion: reduce)").matches}n.d(t,{n:()=>r})},3438:(e,t,n)=>{"use strict";n.d(t,{LM:()=>g,MN:()=>T,SN:()=>C,_F:()=>y,cE:()=>p,f:()=>w,jA:()=>m,lO:()=>S,oz:()=>_,s1:()=>x,vY:()=>E,xz:()=>f});var r=n(7294),a=n(6550),o=n(8790),i=n(143),s=n(373),l=n(4477),c=n(1116),u=n(7392),d=n(8596);const p=!!i._r;function f(e){const t=(0,l.E)();if(!e)return;const n=t.docs[e];if(!n)throw new Error(`no version doc found by id=${e}`);return n}function g(e){return"link"!==e.type||e.unlisted?"category"===e.type?function(e){if(e.href&&!e.linkUnlisted)return e.href;for(const t of e.items){const e=g(t);if(e)return e}}(e):void 0:e.href}function m(){const{pathname:e}=(0,a.TH)(),t=(0,c.V)();if(!t)throw new Error("Unexpected: cant find current sidebar in context");const n=k({sidebarItems:t.items,pathname:e,onlyCategories:!0}).slice(-1)[0];if(!n)throw new Error(`${e} is not associated with a category. useCurrentSidebarCategory() should only be used on category index pages.`);return n}const h=(e,t)=>void 0!==e&&(0,d.Mg)(e,t),b=(e,t)=>e.some((e=>y(e,t)));function y(e,t){return"link"===e.type?h(e.href,t):"category"===e.type&&(h(e.href,t)||b(e.items,t))}function v(e,t){switch(e.type){case"category":return y(e,t)||e.items.some((e=>v(e,t)));case"link":return!e.unlisted||y(e,t);default:return!1}}function w(e,t){return(0,r.useMemo)((()=>e.filter((e=>v(e,t)))),[e,t])}function k(e){let{sidebarItems:t,pathname:n,onlyCategories:r=!1}=e;const a=[];return function e(t){for(const o of t)if("category"===o.type&&((0,d.Mg)(o.href,n)||e(o.items))||"link"===o.type&&(0,d.Mg)(o.href,n)){return r&&"category"!==o.type||a.unshift(o),!0}return!1}(t),a}function x(){const e=(0,c.V)(),{pathname:t}=(0,a.TH)(),n=(0,i.gA)()?.pluginData.breadcrumbs;return!1!==n&&e?k({sidebarItems:e.items,pathname:t}):null}function S(e){const{activeVersion:t}=(0,i.Iw)(e),{preferredVersion:n}=(0,s.J)(e),a=(0,i.yW)(e);return(0,r.useMemo)((()=>(0,u.j)([t,n,a].filter(Boolean))),[t,n,a])}function _(e,t){const n=S(t);return(0,r.useMemo)((()=>{const t=n.flatMap((e=>e.sidebars?Object.entries(e.sidebars):[])),r=t.find((t=>t[0]===e));if(!r)throw new Error(`Can't find any sidebar with id "${e}" in version${n.length>1?"s":""} ${n.map((e=>e.name)).join(", ")}".\nAvailable sidebar ids are:\n- ${t.map((e=>e[0])).join("\n- ")}`);return r[1]}),[e,n])}function E(e,t){const n=S(t);return(0,r.useMemo)((()=>{const t=n.flatMap((e=>e.docs)),r=t.find((t=>t.id===e));if(!r){if(n.flatMap((e=>e.draftIds)).includes(e))return null;throw new Error(`Couldn't find any doc with id "${e}" in version${n.length>1?"s":""} "${n.map((e=>e.name)).join(", ")}".\nAvailable doc ids are:\n- ${(0,u.j)(t.map((e=>e.id))).join("\n- ")}`)}return r}),[e,n])}function C(e){let{route:t}=e;const n=(0,a.TH)(),r=(0,l.E)(),i=t.routes,s=i.find((e=>(0,a.LX)(n.pathname,e)));if(!s)return null;const c=s.sidebar,u=c?r.docsSidebars[c]:void 0;return{docElement:(0,o.H)(i),sidebarName:c,sidebarItems:u}}function T(e){return e.filter((e=>!("category"===e.type||"link"===e.type)||!!g(e)))}},9690:(e,t,n)=>{"use strict";n.d(t,{aG:()=>u,Ac:()=>c,Cw:()=>l,QW:()=>d});var r=n(7294),a=n(5999),o=n(8780);const i={errorBoundaryError:"errorBoundaryError_a6uf",errorBoundaryFallback:"errorBoundaryFallback_VBag"};var s=n(5893);function l(e){return(0,s.jsx)("button",{type:"button",...e,children:(0,s.jsx)(a.Z,{id:"theme.ErrorPageContent.tryAgain",description:"The label of the button to try again rendering when the React error boundary captures an error",children:"Try again"})})}function c(e){let{error:t,tryAgain:n}=e;return(0,s.jsxs)("div",{className:i.errorBoundaryFallback,children:[(0,s.jsx)("p",{children:t.message}),(0,s.jsx)(l,{onClick:n})]})}function u(e){let{error:t}=e;const n=(0,o.getErrorCausalChain)(t).map((e=>e.message)).join("\n\nCause:\n");return(0,s.jsx)("p",{className:i.errorBoundaryError,children:n})}class d extends r.Component{componentDidCatch(e,t){throw this.props.onError(e,t)}render(){return this.props.children}}},2128:(e,t,n)=>{"use strict";n.d(t,{p:()=>a});var r=n(2263);function a(e){const{siteConfig:t}=(0,r.Z)(),{title:n,titleDelimiter:a}=t;return e?.trim().length?`${e.trim()} ${a} ${n}`:n}},1980:(e,t,n)=>{"use strict";n.d(t,{Nc:()=>l,Rb:()=>i,_X:()=>s});var r=n(7294),a=n(6550),o=n(902);function i(e){!function(e){const t=(0,a.k6)(),n=(0,o.zX)(e);(0,r.useEffect)((()=>t.block(((e,t)=>n(e,t)))),[t,n])}(((t,n)=>{if("POP"===n)return e(t,n)}))}function s(e){return function(e){const t=(0,a.k6)();return(0,r.useSyncExternalStore)(t.listen,(()=>e(t)),(()=>e(t)))}((t=>null===e?null:new URLSearchParams(t.location.search).get(e)))}function l(e){const t=s(e)??"",n=function(){const e=(0,a.k6)();return(0,r.useCallback)(((t,n,r)=>{const a=new URLSearchParams(e.location.search);n?a.set(t,n):a.delete(t),(r?.push?e.push:e.replace)({search:a.toString()})}),[e])}();return[t,(0,r.useCallback)(((t,r)=>{n(e,t,r)}),[n,e])]}},7392:(e,t,n)=>{"use strict";function r(e,t){return void 0===t&&(t=(e,t)=>e===t),e.filter(((n,r)=>e.findIndex((e=>t(e,n)))!==r))}function a(e){return Array.from(new Set(e))}n.d(t,{j:()=>a,l:()=>r})},833:(e,t,n)=>{"use strict";n.d(t,{FG:()=>f,d:()=>d,VC:()=>g});var r=n(7294),a=n(6010),o=n(5742),i=n(226);function s(){const e=r.useContext(i._);if(!e)throw new Error("Unexpected: no Docusaurus route context found");return e}var l=n(4996),c=n(2128),u=n(5893);function d(e){let{title:t,description:n,keywords:r,image:a,children:i}=e;const s=(0,c.p)(t),{withBaseUrl:d}=(0,l.C)(),p=a?d(a,{absolute:!0}):void 0;return(0,u.jsxs)(o.Z,{children:[t&&(0,u.jsx)("title",{children:s}),t&&(0,u.jsx)("meta",{property:"og:title",content:s}),n&&(0,u.jsx)("meta",{name:"description",content:n}),n&&(0,u.jsx)("meta",{property:"og:description",content:n}),r&&(0,u.jsx)("meta",{name:"keywords",content:Array.isArray(r)?r.join(","):r}),p&&(0,u.jsx)("meta",{property:"og:image",content:p}),p&&(0,u.jsx)("meta",{name:"twitter:image",content:p}),i]})}const p=r.createContext(void 0);function f(e){let{className:t,children:n}=e;const i=r.useContext(p),s=(0,a.Z)(i,t);return(0,u.jsxs)(p.Provider,{value:s,children:[(0,u.jsx)(o.Z,{children:(0,u.jsx)("html",{className:s})}),n]})}function g(e){let{children:t}=e;const n=s(),r=`plugin-${n.plugin.name.replace(/docusaurus-(?:plugin|theme)-(?:content-)?/gi,"")}`;const o=`plugin-id-${n.plugin.id}`;return(0,u.jsx)(f,{className:(0,a.Z)(r,o),children:t})}},902:(e,t,n)=>{"use strict";n.d(t,{D9:()=>s,Qc:()=>u,Ql:()=>c,i6:()=>l,zX:()=>i});var r=n(7294),a=n(469),o=n(5893);function i(e){const t=(0,r.useRef)(e);return(0,a.Z)((()=>{t.current=e}),[e]),(0,r.useCallback)((function(){return t.current(...arguments)}),[])}function s(e){const t=(0,r.useRef)();return(0,a.Z)((()=>{t.current=e})),t.current}class l extends Error{constructor(e,t){super(),this.name="ReactContextError",this.message=`Hook ${this.stack?.split("\n")[1]?.match(/at (?:\w+\.)?(?\w+)/)?.groups.name??""} is called outside the <${e}>. ${t??""}`}}function c(e){const t=Object.entries(e);return t.sort(((e,t)=>e[0].localeCompare(t[0]))),(0,r.useMemo)((()=>e),t.flat())}function u(e){return t=>{let{children:n}=t;return(0,o.jsx)(o.Fragment,{children:e.reduceRight(((e,t)=>(0,o.jsx)(t,{children:e})),n)})}}},8022:(e,t,n)=>{"use strict";function r(e,t){return void 0!==e&&void 0!==t&&new RegExp(e,"gi").test(t)}n.d(t,{F:()=>r})},8596:(e,t,n)=>{"use strict";n.d(t,{Mg:()=>i,Ns:()=>s});var r=n(7294),a=n(723),o=n(2263);function i(e,t){const n=e=>(!e||e.endsWith("/")?e:`${e}/`)?.toLowerCase();return n(e)===n(t)}function s(){const{baseUrl:e}=(0,o.Z)().siteConfig;return(0,r.useMemo)((()=>function(e){let{baseUrl:t,routes:n}=e;function r(e){return e.path===t&&!0===e.exact}function a(e){return e.path===t&&!e.exact}return function e(t){if(0===t.length)return;return t.find(r)||e(t.filter(a).flatMap((e=>e.routes??[])))}(n)}({routes:a.Z,baseUrl:e})),[e])}},2466:(e,t,n)=>{"use strict";n.d(t,{Ct:()=>m,OC:()=>u,RF:()=>f,o5:()=>g});var r=n(7294),a=n(412),o=n(2389),i=n(469),s=n(902),l=n(5893);const c=r.createContext(void 0);function u(e){let{children:t}=e;const n=function(){const e=(0,r.useRef)(!0);return(0,r.useMemo)((()=>({scrollEventsEnabledRef:e,enableScrollEvents:()=>{e.current=!0},disableScrollEvents:()=>{e.current=!1}})),[])}();return(0,l.jsx)(c.Provider,{value:n,children:t})}function d(){const e=(0,r.useContext)(c);if(null==e)throw new s.i6("ScrollControllerProvider");return e}const p=()=>a.Z.canUseDOM?{scrollX:window.pageXOffset,scrollY:window.pageYOffset}:null;function f(e,t){void 0===t&&(t=[]);const{scrollEventsEnabledRef:n}=d(),a=(0,r.useRef)(p()),o=(0,s.zX)(e);(0,r.useEffect)((()=>{const e=()=>{if(!n.current)return;const e=p();o(e,a.current),a.current=e},t={passive:!0};return e(),window.addEventListener("scroll",e,t),()=>window.removeEventListener("scroll",e,t)}),[o,n,...t])}function g(){const e=d(),t=function(){const e=(0,r.useRef)({elem:null,top:0}),t=(0,r.useCallback)((t=>{e.current={elem:t,top:t.getBoundingClientRect().top}}),[]),n=(0,r.useCallback)((()=>{const{current:{elem:t,top:n}}=e;if(!t)return{restored:!1};const r=t.getBoundingClientRect().top-n;return r&&window.scrollBy({left:0,top:r}),e.current={elem:null,top:0},{restored:0!==r}}),[]);return(0,r.useMemo)((()=>({save:t,restore:n})),[n,t])}(),n=(0,r.useRef)(void 0),a=(0,r.useCallback)((r=>{t.save(r),e.disableScrollEvents(),n.current=()=>{const{restored:r}=t.restore();if(n.current=void 0,r){const t=()=>{e.enableScrollEvents(),window.removeEventListener("scroll",t)};window.addEventListener("scroll",t)}else e.enableScrollEvents()}}),[e,t]);return(0,i.Z)((()=>{queueMicrotask((()=>n.current?.()))})),{blockElementScrollPositionUntilNextRender:a}}function m(){const e=(0,r.useRef)(null),t=(0,o.Z)()&&"smooth"===getComputedStyle(document.documentElement).scrollBehavior;return{startScroll:n=>{e.current=t?function(e){return window.scrollTo({top:e,behavior:"smooth"}),()=>{}}(n):function(e){let t=null;const n=document.documentElement.scrollTop>e;return function r(){const a=document.documentElement.scrollTop;(n&&a>e||!n&&at&&cancelAnimationFrame(t)}(n)},cancelScroll:()=>e.current?.()}}},3320:(e,t,n)=>{"use strict";n.d(t,{HX:()=>i,_q:()=>l,os:()=>s});var r=n(143),a=n(2263),o=n(373);const i="default";function s(e,t){return`docs-${e}-${t}`}function l(){const{i18n:e}=(0,a.Z)(),t=(0,r._r)(),n=(0,r.WS)(),l=(0,o.Oh)();const c=[i,...Object.keys(t).map((function(e){const r=n?.activePlugin.pluginId===e?n.activeVersion:void 0,a=l[e],o=t[e].versions.find((e=>e.isLast));return s(e,(r??a??o).name)}))];return{locale:e.currentLocale,tags:c}}},12:(e,t,n)=>{"use strict";n.d(t,{Nk:()=>u,WA:()=>c});var r=n(7294);const a="localStorage";function o(e){let{key:t,oldValue:n,newValue:r,storage:a}=e;if(n===r)return;const o=document.createEvent("StorageEvent");o.initStorageEvent("storage",!1,!1,t,n,r,window.location.href,a),window.dispatchEvent(o)}function i(e){if(void 0===e&&(e=a),"undefined"==typeof window)throw new Error("Browser storage is not available on Node.js/Docusaurus SSR process.");if("none"===e)return null;try{return window[e]}catch(n){return t=n,s||(console.warn("Docusaurus browser storage is not available.\nPossible reasons: running Docusaurus in an iframe, in an incognito browser session, or using too strict browser privacy settings.",t),s=!0),null}var t}let s=!1;const l={get:()=>null,set:()=>{},del:()=>{},listen:()=>()=>{}};function c(e,t){if("undefined"==typeof window)return function(e){function t(){throw new Error(`Illegal storage API usage for storage key "${e}".\nDocusaurus storage APIs are not supposed to be called on the server-rendering process.\nPlease only call storage APIs in effects and event handlers.`)}return{get:t,set:t,del:t,listen:t}}(e);const n=i(t?.persistence);return null===n?l:{get:()=>{try{return n.getItem(e)}catch(t){return console.error(`Docusaurus storage error, can't get key=${e}`,t),null}},set:t=>{try{const r=n.getItem(e);n.setItem(e,t),o({key:e,oldValue:r,newValue:t,storage:n})}catch(r){console.error(`Docusaurus storage error, can't set ${e}=${t}`,r)}},del:()=>{try{const t=n.getItem(e);n.removeItem(e),o({key:e,oldValue:t,newValue:null,storage:n})}catch(t){console.error(`Docusaurus storage error, can't delete key=${e}`,t)}},listen:t=>{try{const r=r=>{r.storageArea===n&&r.key===e&&t(r)};return window.addEventListener("storage",r),()=>window.removeEventListener("storage",r)}catch(r){return console.error(`Docusaurus storage error, can't listen for changes of key=${e}`,r),()=>{}}}}}function u(e,t){const n=(0,r.useRef)((()=>null===e?l:c(e,t))).current(),a=(0,r.useCallback)((e=>"undefined"==typeof window?()=>{}:n.listen(e)),[n]);return[(0,r.useSyncExternalStore)(a,(()=>"undefined"==typeof window?null:n.get()),(()=>null)),n]}},4711:(e,t,n)=>{"use strict";n.d(t,{l:()=>i});var r=n(2263),a=n(6550),o=n(8780);function i(){const{siteConfig:{baseUrl:e,url:t,trailingSlash:n},i18n:{defaultLocale:i,currentLocale:s}}=(0,r.Z)(),{pathname:l}=(0,a.TH)(),c=(0,o.applyTrailingSlash)(l,{trailingSlash:n,baseUrl:e}),u=s===i?e:e.replace(`/${s}/`,"/"),d=c.replace(e,"");return{createUrl:function(e){let{locale:n,fullyQualified:r}=e;return`${r?t:""}${function(e){return e===i?`${u}`:`${u}${e}/`}(n)}${d}`}}}},5936:(e,t,n)=>{"use strict";n.d(t,{S:()=>i});var r=n(7294),a=n(6550),o=n(902);function i(e){const t=(0,a.TH)(),n=(0,o.D9)(t),i=(0,o.zX)(e);(0,r.useEffect)((()=>{n&&t!==n&&i({location:t,previousLocation:n})}),[i,t,n])}},6668:(e,t,n)=>{"use strict";n.d(t,{L:()=>a});var r=n(2263);function a(){return(0,r.Z)().siteConfig.themeConfig}},6278:(e,t,n)=>{"use strict";n.d(t,{L:()=>a});var r=n(2263);function a(){const{siteConfig:{themeConfig:e}}=(0,r.Z)();return e}},239:(e,t,n)=>{"use strict";n.d(t,{l:()=>s});var r=n(7294),a=n(8022),o=n(4996),i=n(6278);function s(){const{withBaseUrl:e}=(0,o.C)(),{algolia:{externalUrlRegex:t,replaceSearchResultPathname:n}}=(0,i.L)();return(0,r.useCallback)((r=>{const o=new URL(r);if((0,a.F)(t,o.href))return r;const i=`${o.pathname+o.hash}`;return e(function(e,t){return t?e.replaceAll(new RegExp(t.from,"g"),t.to):e}(i,n))}),[e,t,n])}},8802:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.default=function(e,t){const{trailingSlash:n,baseUrl:r}=t;if(e.startsWith("#"))return e;if(void 0===n)return e;const[a]=e.split(/[#?]/),o="/"===a||a===r?a:(i=a,n?function(e){return e.endsWith("/")?e:`${e}/`}(i):function(e){return e.endsWith("/")?e.slice(0,-1):e}(i));var i;return e.replace(a,o)}},4143:(e,t)=>{"use strict";Object.defineProperty(t,"__esModule",{value:!0}),t.getErrorCausalChain=void 0,t.getErrorCausalChain=function e(t){return t.cause?[t,...e(t.cause)]:[t]}},8780:function(e,t,n){"use strict";var r=this&&this.__importDefault||function(e){return e&&e.__esModule?e:{default:e}};Object.defineProperty(t,"__esModule",{value:!0}),t.getErrorCausalChain=t.applyTrailingSlash=t.blogPostContainerID=void 0,t.blogPostContainerID="__blog-post-container";var a=n(8802);Object.defineProperty(t,"applyTrailingSlash",{enumerable:!0,get:function(){return r(a).default}});var o=n(4143);Object.defineProperty(t,"getErrorCausalChain",{enumerable:!0,get:function(){return o.getErrorCausalChain}})},6010:(e,t,n)=>{"use strict";function r(e){var t,n,a="";if("string"==typeof e||"number"==typeof e)a+=e;else if("object"==typeof e)if(Array.isArray(e))for(t=0;ta});const a=function(){for(var e,t,n=0,a="";n{"use strict";n.d(t,{lX:()=>w,q_:()=>C,ob:()=>f,PP:()=>A,Ep:()=>p});var r=n(7462);function a(e){return"/"===e.charAt(0)}function o(e,t){for(var n=t,r=n+1,a=e.length;r=0;p--){var f=i[p];"."===f?o(i,p):".."===f?(o(i,p),d++):d&&(o(i,p),d--)}if(!c)for(;d--;d)i.unshift("..");!c||""===i[0]||i[0]&&a(i[0])||i.unshift("");var g=i.join("/");return n&&"/"!==g.substr(-1)&&(g+="/"),g};var s=n(8776);function l(e){return"/"===e.charAt(0)?e:"/"+e}function c(e){return"/"===e.charAt(0)?e.substr(1):e}function u(e,t){return function(e,t){return 0===e.toLowerCase().indexOf(t.toLowerCase())&&-1!=="/?#".indexOf(e.charAt(t.length))}(e,t)?e.substr(t.length):e}function d(e){return"/"===e.charAt(e.length-1)?e.slice(0,-1):e}function p(e){var t=e.pathname,n=e.search,r=e.hash,a=t||"/";return n&&"?"!==n&&(a+="?"===n.charAt(0)?n:"?"+n),r&&"#"!==r&&(a+="#"===r.charAt(0)?r:"#"+r),a}function f(e,t,n,a){var o;"string"==typeof e?(o=function(e){var t=e||"/",n="",r="",a=t.indexOf("#");-1!==a&&(r=t.substr(a),t=t.substr(0,a));var o=t.indexOf("?");return-1!==o&&(n=t.substr(o),t=t.substr(0,o)),{pathname:t,search:"?"===n?"":n,hash:"#"===r?"":r}}(e),o.state=t):(void 0===(o=(0,r.Z)({},e)).pathname&&(o.pathname=""),o.search?"?"!==o.search.charAt(0)&&(o.search="?"+o.search):o.search="",o.hash?"#"!==o.hash.charAt(0)&&(o.hash="#"+o.hash):o.hash="",void 0!==t&&void 0===o.state&&(o.state=t));try{o.pathname=decodeURI(o.pathname)}catch(s){throw s instanceof URIError?new URIError('Pathname "'+o.pathname+'" could not be decoded. This is likely caused by an invalid percent-encoding.'):s}return n&&(o.key=n),a?o.pathname?"/"!==o.pathname.charAt(0)&&(o.pathname=i(o.pathname,a.pathname)):o.pathname=a.pathname:o.pathname||(o.pathname="/"),o}function g(){var e=null;var t=[];return{setPrompt:function(t){return e=t,function(){e===t&&(e=null)}},confirmTransitionTo:function(t,n,r,a){if(null!=e){var o="function"==typeof e?e(t,n):e;"string"==typeof o?"function"==typeof r?r(o,a):a(!0):a(!1!==o)}else a(!0)},appendListener:function(e){var n=!0;function r(){n&&e.apply(void 0,arguments)}return t.push(r),function(){n=!1,t=t.filter((function(e){return e!==r}))}},notifyListeners:function(){for(var e=arguments.length,n=new Array(e),r=0;rt?n.splice(t,n.length-t,a):n.push(a),d({action:r,location:a,index:t,entries:n})}}))},replace:function(e,t){var r="REPLACE",a=f(e,t,m(),w.location);u.confirmTransitionTo(a,r,n,(function(e){e&&(w.entries[w.index]=a,d({action:r,location:a}))}))},go:v,goBack:function(){v(-1)},goForward:function(){v(1)},canGo:function(e){var t=w.index+e;return t>=0&&t{"use strict";var r=n(9864),a={childContextTypes:!0,contextType:!0,contextTypes:!0,defaultProps:!0,displayName:!0,getDefaultProps:!0,getDerivedStateFromError:!0,getDerivedStateFromProps:!0,mixins:!0,propTypes:!0,type:!0},o={name:!0,length:!0,prototype:!0,caller:!0,callee:!0,arguments:!0,arity:!0},i={$$typeof:!0,compare:!0,defaultProps:!0,displayName:!0,propTypes:!0,type:!0},s={};function l(e){return r.isMemo(e)?i:s[e.$$typeof]||a}s[r.ForwardRef]={$$typeof:!0,render:!0,defaultProps:!0,displayName:!0,propTypes:!0},s[r.Memo]=i;var c=Object.defineProperty,u=Object.getOwnPropertyNames,d=Object.getOwnPropertySymbols,p=Object.getOwnPropertyDescriptor,f=Object.getPrototypeOf,g=Object.prototype;e.exports=function e(t,n,r){if("string"!=typeof n){if(g){var a=f(n);a&&a!==g&&e(t,a,r)}var i=u(n);d&&(i=i.concat(d(n)));for(var s=l(t),m=l(n),h=0;h{"use strict";e.exports=function(e,t,n,r,a,o,i,s){if(!e){var l;if(void 0===t)l=new Error("Minified exception occurred; use the non-minified dev environment for the full error message and additional helpful warnings.");else{var c=[n,r,a,o,i,s],u=0;(l=new Error(t.replace(/%s/g,(function(){return c[u++]})))).name="Invariant Violation"}throw l.framesToPop=1,l}}},5826:e=>{e.exports=Array.isArray||function(e){return"[object Array]"==Object.prototype.toString.call(e)}},7439:(e,t,n)=>{"use strict";n.r(t)},2497:(e,t,n)=>{"use strict";n.r(t)},7800:(e,t,n)=>{"use strict";n.r(t)},4865:function(e,t,n){var r,a;r=function(){var e,t,n={version:"0.2.0"},r=n.settings={minimum:.08,easing:"ease",positionUsing:"",speed:200,trickle:!0,trickleRate:.02,trickleSpeed:800,showSpinner:!0,barSelector:'[role="bar"]',spinnerSelector:'[role="spinner"]',parent:"body",template:'
    '};function a(e,t,n){return en?n:e}function o(e){return 100*(-1+e)}function i(e,t,n){var a;return(a="translate3d"===r.positionUsing?{transform:"translate3d("+o(e)+"%,0,0)"}:"translate"===r.positionUsing?{transform:"translate("+o(e)+"%,0)"}:{"margin-left":o(e)+"%"}).transition="all "+t+"ms "+n,a}n.configure=function(e){var t,n;for(t in e)void 0!==(n=e[t])&&e.hasOwnProperty(t)&&(r[t]=n);return this},n.status=null,n.set=function(e){var t=n.isStarted();e=a(e,r.minimum,1),n.status=1===e?null:e;var o=n.render(!t),c=o.querySelector(r.barSelector),u=r.speed,d=r.easing;return o.offsetWidth,s((function(t){""===r.positionUsing&&(r.positionUsing=n.getPositioningCSS()),l(c,i(e,u,d)),1===e?(l(o,{transition:"none",opacity:1}),o.offsetWidth,setTimeout((function(){l(o,{transition:"all "+u+"ms linear",opacity:0}),setTimeout((function(){n.remove(),t()}),u)}),u)):setTimeout(t,u)})),this},n.isStarted=function(){return"number"==typeof n.status},n.start=function(){n.status||n.set(0);var e=function(){setTimeout((function(){n.status&&(n.trickle(),e())}),r.trickleSpeed)};return r.trickle&&e(),this},n.done=function(e){return e||n.status?n.inc(.3+.5*Math.random()).set(1):this},n.inc=function(e){var t=n.status;return t?("number"!=typeof e&&(e=(1-t)*a(Math.random()*t,.1,.95)),t=a(t+e,0,.994),n.set(t)):n.start()},n.trickle=function(){return n.inc(Math.random()*r.trickleRate)},e=0,t=0,n.promise=function(r){return r&&"resolved"!==r.state()?(0===t&&n.start(),e++,t++,r.always((function(){0==--t?(e=0,n.done()):n.set((e-t)/e)})),this):this},n.render=function(e){if(n.isRendered())return document.getElementById("nprogress");u(document.documentElement,"nprogress-busy");var t=document.createElement("div");t.id="nprogress",t.innerHTML=r.template;var a,i=t.querySelector(r.barSelector),s=e?"-100":o(n.status||0),c=document.querySelector(r.parent);return l(i,{transition:"all 0 linear",transform:"translate3d("+s+"%,0,0)"}),r.showSpinner||(a=t.querySelector(r.spinnerSelector))&&f(a),c!=document.body&&u(c,"nprogress-custom-parent"),c.appendChild(t),t},n.remove=function(){d(document.documentElement,"nprogress-busy"),d(document.querySelector(r.parent),"nprogress-custom-parent");var e=document.getElementById("nprogress");e&&f(e)},n.isRendered=function(){return!!document.getElementById("nprogress")},n.getPositioningCSS=function(){var e=document.body.style,t="WebkitTransform"in e?"Webkit":"MozTransform"in e?"Moz":"msTransform"in e?"ms":"OTransform"in e?"O":"";return t+"Perspective"in e?"translate3d":t+"Transform"in e?"translate":"margin"};var s=function(){var e=[];function t(){var n=e.shift();n&&n(t)}return function(n){e.push(n),1==e.length&&t()}}(),l=function(){var e=["Webkit","O","Moz","ms"],t={};function n(e){return e.replace(/^-ms-/,"ms-").replace(/-([\da-z])/gi,(function(e,t){return t.toUpperCase()}))}function r(t){var n=document.body.style;if(t in n)return t;for(var r,a=e.length,o=t.charAt(0).toUpperCase()+t.slice(1);a--;)if((r=e[a]+o)in n)return r;return t}function a(e){return e=n(e),t[e]||(t[e]=r(e))}function o(e,t,n){t=a(t),e.style[t]=n}return function(e,t){var n,r,a=arguments;if(2==a.length)for(n in t)void 0!==(r=t[n])&&t.hasOwnProperty(n)&&o(e,n,r);else o(e,a[1],a[2])}}();function c(e,t){return("string"==typeof e?e:p(e)).indexOf(" "+t+" ")>=0}function u(e,t){var n=p(e),r=n+t;c(n,t)||(e.className=r.substring(1))}function d(e,t){var n,r=p(e);c(e,t)&&(n=r.replace(" "+t+" "," "),e.className=n.substring(1,n.length-1))}function p(e){return(" "+(e.className||"")+" ").replace(/\s+/gi," ")}function f(e){e&&e.parentNode&&e.parentNode.removeChild(e)}return n},void 0===(a="function"==typeof r?r.call(t,n,t,e):r)||(e.exports=a)},5795:()=>{Prism.languages.ada={comment:/--.*/,string:/"(?:""|[^"\r\f\n])*"/,number:[{pattern:/\b\d(?:_?\d)*#[\dA-F](?:_?[\dA-F])*(?:\.[\dA-F](?:_?[\dA-F])*)?#(?:E[+-]?\d(?:_?\d)*)?/i},{pattern:/\b\d(?:_?\d)*(?:\.\d(?:_?\d)*)?(?:E[+-]?\d(?:_?\d)*)?\b/i}],attribute:{pattern:/\b'\w+/,alias:"attr-name"},keyword:/\b(?:abort|abs|abstract|accept|access|aliased|all|and|array|at|begin|body|case|constant|declare|delay|delta|digits|do|else|elsif|end|entry|exception|exit|for|function|generic|goto|if|in|interface|is|limited|loop|mod|new|not|null|of|or|others|out|overriding|package|pragma|private|procedure|protected|raise|range|record|rem|renames|requeue|return|reverse|select|separate|some|subtype|synchronized|tagged|task|terminate|then|type|until|use|when|while|with|xor)\b/i,boolean:/\b(?:false|true)\b/i,operator:/<[=>]?|>=?|=>?|:=|\/=?|\*\*?|[&+-]/,punctuation:/\.\.?|[,;():]/,char:/'.'/,variable:/\b[a-z](?:\w)*\b/i}},7874:()=>{!function(e){var t="\\b(?:BASH|BASHOPTS|BASH_ALIASES|BASH_ARGC|BASH_ARGV|BASH_CMDS|BASH_COMPLETION_COMPAT_DIR|BASH_LINENO|BASH_REMATCH|BASH_SOURCE|BASH_VERSINFO|BASH_VERSION|COLORTERM|COLUMNS|COMP_WORDBREAKS|DBUS_SESSION_BUS_ADDRESS|DEFAULTS_PATH|DESKTOP_SESSION|DIRSTACK|DISPLAY|EUID|GDMSESSION|GDM_LANG|GNOME_KEYRING_CONTROL|GNOME_KEYRING_PID|GPG_AGENT_INFO|GROUPS|HISTCONTROL|HISTFILE|HISTFILESIZE|HISTSIZE|HOME|HOSTNAME|HOSTTYPE|IFS|INSTANCE|JOB|LANG|LANGUAGE|LC_ADDRESS|LC_ALL|LC_IDENTIFICATION|LC_MEASUREMENT|LC_MONETARY|LC_NAME|LC_NUMERIC|LC_PAPER|LC_TELEPHONE|LC_TIME|LESSCLOSE|LESSOPEN|LINES|LOGNAME|LS_COLORS|MACHTYPE|MAILCHECK|MANDATORY_PATH|NO_AT_BRIDGE|OLDPWD|OPTERR|OPTIND|ORBIT_SOCKETDIR|OSTYPE|PAPERSIZE|PATH|PIPESTATUS|PPID|PS1|PS2|PS3|PS4|PWD|RANDOM|REPLY|SECONDS|SELINUX_INIT|SESSION|SESSIONTYPE|SESSION_MANAGER|SHELL|SHELLOPTS|SHLVL|SSH_AUTH_SOCK|TERM|UID|UPSTART_EVENTS|UPSTART_INSTANCE|UPSTART_JOB|UPSTART_SESSION|USER|WINDOWID|XAUTHORITY|XDG_CONFIG_DIRS|XDG_CURRENT_DESKTOP|XDG_DATA_DIRS|XDG_GREETER_DATA_DIR|XDG_MENU_PREFIX|XDG_RUNTIME_DIR|XDG_SEAT|XDG_SEAT_PATH|XDG_SESSION_DESKTOP|XDG_SESSION_ID|XDG_SESSION_PATH|XDG_SESSION_TYPE|XDG_VTNR|XMODIFIERS)\\b",n={pattern:/(^(["']?)\w+\2)[ \t]+\S.*/,lookbehind:!0,alias:"punctuation",inside:null},r={bash:n,environment:{pattern:RegExp("\\$"+t),alias:"constant"},variable:[{pattern:/\$?\(\([\s\S]+?\)\)/,greedy:!0,inside:{variable:[{pattern:/(^\$\(\([\s\S]+)\)\)/,lookbehind:!0},/^\$\(\(/],number:/\b0x[\dA-Fa-f]+\b|(?:\b\d+(?:\.\d*)?|\B\.\d+)(?:[Ee]-?\d+)?/,operator:/--|\+\+|\*\*=?|<<=?|>>=?|&&|\|\||[=!+\-*/%<>^&|]=?|[?~:]/,punctuation:/\(\(?|\)\)?|,|;/}},{pattern:/\$\((?:\([^)]+\)|[^()])+\)|`[^`]+`/,greedy:!0,inside:{variable:/^\$\(|^`|\)$|`$/}},{pattern:/\$\{[^}]+\}/,greedy:!0,inside:{operator:/:[-=?+]?|[!\/]|##?|%%?|\^\^?|,,?/,punctuation:/[\[\]]/,environment:{pattern:RegExp("(\\{)"+t),lookbehind:!0,alias:"constant"}}},/\$(?:\w+|[#?*!@$])/],entity:/\\(?:[abceEfnrtv\\"]|O?[0-7]{1,3}|U[0-9a-fA-F]{8}|u[0-9a-fA-F]{4}|x[0-9a-fA-F]{1,2})/};e.languages.bash={shebang:{pattern:/^#!\s*\/.*/,alias:"important"},comment:{pattern:/(^|[^"{\\$])#.*/,lookbehind:!0},"function-name":[{pattern:/(\bfunction\s+)[\w-]+(?=(?:\s*\(?:\s*\))?\s*\{)/,lookbehind:!0,alias:"function"},{pattern:/\b[\w-]+(?=\s*\(\s*\)\s*\{)/,alias:"function"}],"for-or-select":{pattern:/(\b(?:for|select)\s+)\w+(?=\s+in\s)/,alias:"variable",lookbehind:!0},"assign-left":{pattern:/(^|[\s;|&]|[<>]\()\w+(?:\.\w+)*(?=\+?=)/,inside:{environment:{pattern:RegExp("(^|[\\s;|&]|[<>]\\()"+t),lookbehind:!0,alias:"constant"}},alias:"variable",lookbehind:!0},parameter:{pattern:/(^|\s)-{1,2}(?:\w+:[+-]?)?\w+(?:\.\w+)*(?=[=\s]|$)/,alias:"variable",lookbehind:!0},string:[{pattern:/((?:^|[^<])<<-?\s*)(\w+)\s[\s\S]*?(?:\r?\n|\r)\2/,lookbehind:!0,greedy:!0,inside:r},{pattern:/((?:^|[^<])<<-?\s*)(["'])(\w+)\2\s[\s\S]*?(?:\r?\n|\r)\3/,lookbehind:!0,greedy:!0,inside:{bash:n}},{pattern:/(^|[^\\](?:\\\\)*)"(?:\\[\s\S]|\$\([^)]+\)|\$(?!\()|`[^`]+`|[^"\\`$])*"/,lookbehind:!0,greedy:!0,inside:r},{pattern:/(^|[^$\\])'[^']*'/,lookbehind:!0,greedy:!0},{pattern:/\$'(?:[^'\\]|\\[\s\S])*'/,greedy:!0,inside:{entity:r.entity}}],environment:{pattern:RegExp("\\$?"+t),alias:"constant"},variable:r.variable,function:{pattern:/(^|[\s;|&]|[<>]\()(?:add|apropos|apt|apt-cache|apt-get|aptitude|aspell|automysqlbackup|awk|basename|bash|bc|bconsole|bg|bzip2|cal|cargo|cat|cfdisk|chgrp|chkconfig|chmod|chown|chroot|cksum|clear|cmp|column|comm|composer|cp|cron|crontab|csplit|curl|cut|date|dc|dd|ddrescue|debootstrap|df|diff|diff3|dig|dir|dircolors|dirname|dirs|dmesg|docker|docker-compose|du|egrep|eject|env|ethtool|expand|expect|expr|fdformat|fdisk|fg|fgrep|file|find|fmt|fold|format|free|fsck|ftp|fuser|gawk|git|gparted|grep|groupadd|groupdel|groupmod|groups|grub-mkconfig|gzip|halt|head|hg|history|host|hostname|htop|iconv|id|ifconfig|ifdown|ifup|import|install|ip|java|jobs|join|kill|killall|less|link|ln|locate|logname|logrotate|look|lpc|lpr|lprint|lprintd|lprintq|lprm|ls|lsof|lynx|make|man|mc|mdadm|mkconfig|mkdir|mke2fs|mkfifo|mkfs|mkisofs|mknod|mkswap|mmv|more|most|mount|mtools|mtr|mutt|mv|nano|nc|netstat|nice|nl|node|nohup|notify-send|npm|nslookup|op|open|parted|passwd|paste|pathchk|ping|pkill|pnpm|podman|podman-compose|popd|pr|printcap|printenv|ps|pushd|pv|quota|quotacheck|quotactl|ram|rar|rcp|reboot|remsync|rename|renice|rev|rm|rmdir|rpm|rsync|scp|screen|sdiff|sed|sendmail|seq|service|sftp|sh|shellcheck|shuf|shutdown|sleep|slocate|sort|split|ssh|stat|strace|su|sudo|sum|suspend|swapon|sync|sysctl|tac|tail|tar|tee|time|timeout|top|touch|tr|traceroute|tsort|tty|umount|uname|unexpand|uniq|units|unrar|unshar|unzip|update-grub|uptime|useradd|userdel|usermod|users|uudecode|uuencode|v|vcpkg|vdir|vi|vim|virsh|vmstat|wait|watch|wc|wget|whereis|which|who|whoami|write|xargs|xdg-open|yarn|yes|zenity|zip|zsh|zypper)(?=$|[)\s;|&])/,lookbehind:!0},keyword:{pattern:/(^|[\s;|&]|[<>]\()(?:case|do|done|elif|else|esac|fi|for|function|if|in|select|then|until|while)(?=$|[)\s;|&])/,lookbehind:!0},builtin:{pattern:/(^|[\s;|&]|[<>]\()(?:\.|:|alias|bind|break|builtin|caller|cd|command|continue|declare|echo|enable|eval|exec|exit|export|getopts|hash|help|let|local|logout|mapfile|printf|pwd|read|readarray|readonly|return|set|shift|shopt|source|test|times|trap|type|typeset|ulimit|umask|unalias|unset)(?=$|[)\s;|&])/,lookbehind:!0,alias:"class-name"},boolean:{pattern:/(^|[\s;|&]|[<>]\()(?:false|true)(?=$|[)\s;|&])/,lookbehind:!0},"file-descriptor":{pattern:/\B&\d\b/,alias:"important"},operator:{pattern:/\d?<>|>\||\+=|=[=~]?|!=?|<<[<-]?|[&\d]?>>|\d[<>]&?|[<>][&=]?|&[>&]?|\|[&|]?/,inside:{"file-descriptor":{pattern:/^\d/,alias:"important"}}},punctuation:/\$?\(\(?|\)\)?|\.\.|[{}[\];\\]/,number:{pattern:/(^|\s)(?:[1-9]\d*|0)(?:[.,]\d+)?\b/,lookbehind:!0}},n.inside=e.languages.bash;for(var a=["comment","function-name","for-or-select","assign-left","parameter","string","environment","function","keyword","builtin","boolean","file-descriptor","operator","punctuation","number"],o=r.variable[1].inside,i=0;i{!function(e){function t(e,t){return e.replace(/<<(\d+)>>/g,(function(e,n){return"(?:"+t[+n]+")"}))}function n(e,n,r){return RegExp(t(e,n),r||"")}function r(e,t){for(var n=0;n>/g,(function(){return"(?:"+e+")"}));return e.replace(/<>/g,"[^\\s\\S]")}var a="bool byte char decimal double dynamic float int long object sbyte short string uint ulong ushort var void",o="class enum interface record struct",i="add alias and ascending async await by descending from(?=\\s*(?:\\w|$)) get global group into init(?=\\s*;) join let nameof not notnull on or orderby partial remove select set unmanaged value when where with(?=\\s*{)",s="abstract as base break case catch checked const continue default delegate do else event explicit extern finally fixed for foreach goto if implicit in internal is lock namespace new null operator out override params private protected public readonly ref return sealed sizeof stackalloc static switch this throw try typeof unchecked unsafe using virtual volatile while yield";function l(e){return"\\b(?:"+e.trim().replace(/ /g,"|")+")\\b"}var c=l(o),u=RegExp(l(a+" "+o+" "+i+" "+s)),d=l(o+" "+i+" "+s),p=l(a+" "+o+" "+s),f=r(/<(?:[^<>;=+\-*/%&|^]|<>)*>/.source,2),g=r(/\((?:[^()]|<>)*\)/.source,2),m=/@?\b[A-Za-z_]\w*\b/.source,h=t(/<<0>>(?:\s*<<1>>)?/.source,[m,f]),b=t(/(?!<<0>>)<<1>>(?:\s*\.\s*<<1>>)*/.source,[d,h]),y=/\[\s*(?:,\s*)*\]/.source,v=t(/<<0>>(?:\s*(?:\?\s*)?<<1>>)*(?:\s*\?)?/.source,[b,y]),w=t(/[^,()<>[\];=+\-*/%&|^]|<<0>>|<<1>>|<<2>>/.source,[f,g,y]),k=t(/\(<<0>>+(?:,<<0>>+)+\)/.source,[w]),x=t(/(?:<<0>>|<<1>>)(?:\s*(?:\?\s*)?<<2>>)*(?:\s*\?)?/.source,[k,b,y]),S={keyword:u,punctuation:/[<>()?,.:[\]]/},_=/'(?:[^\r\n'\\]|\\.|\\[Uux][\da-fA-F]{1,8})'/.source,E=/"(?:\\.|[^\\"\r\n])*"/.source,C=/@"(?:""|\\[\s\S]|[^\\"])*"(?!")/.source;e.languages.csharp=e.languages.extend("clike",{string:[{pattern:n(/(^|[^$\\])<<0>>/.source,[C]),lookbehind:!0,greedy:!0},{pattern:n(/(^|[^@$\\])<<0>>/.source,[E]),lookbehind:!0,greedy:!0}],"class-name":[{pattern:n(/(\busing\s+static\s+)<<0>>(?=\s*;)/.source,[b]),lookbehind:!0,inside:S},{pattern:n(/(\busing\s+<<0>>\s*=\s*)<<1>>(?=\s*;)/.source,[m,x]),lookbehind:!0,inside:S},{pattern:n(/(\busing\s+)<<0>>(?=\s*=)/.source,[m]),lookbehind:!0},{pattern:n(/(\b<<0>>\s+)<<1>>/.source,[c,h]),lookbehind:!0,inside:S},{pattern:n(/(\bcatch\s*\(\s*)<<0>>/.source,[b]),lookbehind:!0,inside:S},{pattern:n(/(\bwhere\s+)<<0>>/.source,[m]),lookbehind:!0},{pattern:n(/(\b(?:is(?:\s+not)?|as)\s+)<<0>>/.source,[v]),lookbehind:!0,inside:S},{pattern:n(/\b<<0>>(?=\s+(?!<<1>>|with\s*\{)<<2>>(?:\s*[=,;:{)\]]|\s+(?:in|when)\b))/.source,[x,p,m]),inside:S}],keyword:u,number:/(?:\b0(?:x[\da-f_]*[\da-f]|b[01_]*[01])|(?:\B\.\d+(?:_+\d+)*|\b\d+(?:_+\d+)*(?:\.\d+(?:_+\d+)*)?)(?:e[-+]?\d+(?:_+\d+)*)?)(?:[dflmu]|lu|ul)?\b/i,operator:/>>=?|<<=?|[-=]>|([-+&|])\1|~|\?\?=?|[-+*/%&|^!=<>]=?/,punctuation:/\?\.?|::|[{}[\];(),.:]/}),e.languages.insertBefore("csharp","number",{range:{pattern:/\.\./,alias:"operator"}}),e.languages.insertBefore("csharp","punctuation",{"named-parameter":{pattern:n(/([(,]\s*)<<0>>(?=\s*:)/.source,[m]),lookbehind:!0,alias:"punctuation"}}),e.languages.insertBefore("csharp","class-name",{namespace:{pattern:n(/(\b(?:namespace|using)\s+)<<0>>(?:\s*\.\s*<<0>>)*(?=\s*[;{])/.source,[m]),lookbehind:!0,inside:{punctuation:/\./}},"type-expression":{pattern:n(/(\b(?:default|sizeof|typeof)\s*\(\s*(?!\s))(?:[^()\s]|\s(?!\s)|<<0>>)*(?=\s*\))/.source,[g]),lookbehind:!0,alias:"class-name",inside:S},"return-type":{pattern:n(/<<0>>(?=\s+(?:<<1>>\s*(?:=>|[({]|\.\s*this\s*\[)|this\s*\[))/.source,[x,b]),inside:S,alias:"class-name"},"constructor-invocation":{pattern:n(/(\bnew\s+)<<0>>(?=\s*[[({])/.source,[x]),lookbehind:!0,inside:S,alias:"class-name"},"generic-method":{pattern:n(/<<0>>\s*<<1>>(?=\s*\()/.source,[m,f]),inside:{function:n(/^<<0>>/.source,[m]),generic:{pattern:RegExp(f),alias:"class-name",inside:S}}},"type-list":{pattern:n(/\b((?:<<0>>\s+<<1>>|record\s+<<1>>\s*<<5>>|where\s+<<2>>)\s*:\s*)(?:<<3>>|<<4>>|<<1>>\s*<<5>>|<<6>>)(?:\s*,\s*(?:<<3>>|<<4>>|<<6>>))*(?=\s*(?:where|[{;]|=>|$))/.source,[c,h,m,x,u.source,g,/\bnew\s*\(\s*\)/.source]),lookbehind:!0,inside:{"record-arguments":{pattern:n(/(^(?!new\s*\()<<0>>\s*)<<1>>/.source,[h,g]),lookbehind:!0,greedy:!0,inside:e.languages.csharp},keyword:u,"class-name":{pattern:RegExp(x),greedy:!0,inside:S},punctuation:/[,()]/}},preprocessor:{pattern:/(^[\t ]*)#.*/m,lookbehind:!0,alias:"property",inside:{directive:{pattern:/(#)\b(?:define|elif|else|endif|endregion|error|if|line|nullable|pragma|region|undef|warning)\b/,lookbehind:!0,alias:"keyword"}}}});var T=E+"|"+_,A=t(/\/(?![*/])|\/\/[^\r\n]*[\r\n]|\/\*(?:[^*]|\*(?!\/))*\*\/|<<0>>/.source,[T]),N=r(t(/[^"'/()]|<<0>>|\(<>*\)/.source,[A]),2),j=/\b(?:assembly|event|field|method|module|param|property|return|type)\b/.source,L=t(/<<0>>(?:\s*\(<<1>>*\))?/.source,[b,N]);e.languages.insertBefore("csharp","class-name",{attribute:{pattern:n(/((?:^|[^\s\w>)?])\s*\[\s*)(?:<<0>>\s*:\s*)?<<1>>(?:\s*,\s*<<1>>)*(?=\s*\])/.source,[j,L]),lookbehind:!0,greedy:!0,inside:{target:{pattern:n(/^<<0>>(?=\s*:)/.source,[j]),alias:"keyword"},"attribute-arguments":{pattern:n(/\(<<0>>*\)/.source,[N]),inside:e.languages.csharp},"class-name":{pattern:RegExp(b),inside:{punctuation:/\./}},punctuation:/[:,]/}}});var P=/:[^}\r\n]+/.source,R=r(t(/[^"'/()]|<<0>>|\(<>*\)/.source,[A]),2),O=t(/\{(?!\{)(?:(?![}:])<<0>>)*<<1>>?\}/.source,[R,P]),I=r(t(/[^"'/()]|\/(?!\*)|\/\*(?:[^*]|\*(?!\/))*\*\/|<<0>>|\(<>*\)/.source,[T]),2),F=t(/\{(?!\{)(?:(?![}:])<<0>>)*<<1>>?\}/.source,[I,P]);function M(t,r){return{interpolation:{pattern:n(/((?:^|[^{])(?:\{\{)*)<<0>>/.source,[t]),lookbehind:!0,inside:{"format-string":{pattern:n(/(^\{(?:(?![}:])<<0>>)*)<<1>>(?=\}$)/.source,[r,P]),lookbehind:!0,inside:{punctuation:/^:/}},punctuation:/^\{|\}$/,expression:{pattern:/[\s\S]+/,alias:"language-csharp",inside:e.languages.csharp}}},string:/[\s\S]+/}}e.languages.insertBefore("csharp","string",{"interpolation-string":[{pattern:n(/(^|[^\\])(?:\$@|@\$)"(?:""|\\[\s\S]|\{\{|<<0>>|[^\\{"])*"/.source,[O]),lookbehind:!0,greedy:!0,inside:M(O,R)},{pattern:n(/(^|[^@\\])\$"(?:\\.|\{\{|<<0>>|[^\\"{])*"/.source,[F]),lookbehind:!0,greedy:!0,inside:M(F,I)}],char:{pattern:RegExp(_),greedy:!0}}),e.languages.dotnet=e.languages.cs=e.languages.csharp}(Prism)},397:()=>{!function(e){var t="(?:"+[/[a-zA-Z_\x80-\uFFFF][\w\x80-\uFFFF]*/.source,/-?(?:\.\d+|\d+(?:\.\d*)?)/.source,/"[^"\\]*(?:\\[\s\S][^"\\]*)*"/.source,/<(?:[^<>]|(?!)*>/.source].join("|")+")",n={markup:{pattern:/(^<)[\s\S]+(?=>$)/,lookbehind:!0,alias:["language-markup","language-html","language-xml"],inside:e.languages.markup}};function r(e,n){return RegExp(e.replace(//g,(function(){return t})),n)}e.languages.dot={comment:{pattern:/\/\/.*|\/\*[\s\S]*?\*\/|^#.*/m,greedy:!0},"graph-name":{pattern:r(/(\b(?:digraph|graph|subgraph)[ \t\r\n]+)/.source,"i"),lookbehind:!0,greedy:!0,alias:"class-name",inside:n},"attr-value":{pattern:r(/(=[ \t\r\n]*)/.source),lookbehind:!0,greedy:!0,inside:n},"attr-name":{pattern:r(/([\[;, \t\r\n])(?=[ \t\r\n]*=)/.source),lookbehind:!0,greedy:!0,inside:n},keyword:/\b(?:digraph|edge|graph|node|strict|subgraph)\b/i,"compass-point":{pattern:/(:[ \t\r\n]*)(?:[ewc_]|[ns][ew]?)(?![\w\x80-\uFFFF])/,lookbehind:!0,alias:"builtin"},node:{pattern:r(/(^|[^-.\w\x80-\uFFFF\\])/.source),lookbehind:!0,greedy:!0,inside:n},operator:/[=:]|-[->]/,punctuation:/[\[\]{};,]/},e.languages.gv=e.languages.dot}(Prism)},1295:()=>{Prism.languages.haskell={comment:{pattern:/(^|[^-!#$%*+=?&@|~.:<>^\\\/])(?:--(?:(?=.)[^-!#$%*+=?&@|~.:<>^\\\/].*|$)|\{-[\s\S]*?-\})/m,lookbehind:!0},char:{pattern:/'(?:[^\\']|\\(?:[abfnrtv\\"'&]|\^[A-Z@[\]^_]|ACK|BEL|BS|CAN|CR|DC1|DC2|DC3|DC4|DEL|DLE|EM|ENQ|EOT|ESC|ETB|ETX|FF|FS|GS|HT|LF|NAK|NUL|RS|SI|SO|SOH|SP|STX|SUB|SYN|US|VT|\d+|o[0-7]+|x[0-9a-fA-F]+))'/,alias:"string"},string:{pattern:/"(?:[^\\"]|\\(?:\S|\s+\\))*"/,greedy:!0},keyword:/\b(?:case|class|data|deriving|do|else|if|in|infixl|infixr|instance|let|module|newtype|of|primitive|then|type|where)\b/,"import-statement":{pattern:/(^[\t ]*)import\s+(?:qualified\s+)?(?:[A-Z][\w']*)(?:\.[A-Z][\w']*)*(?:\s+as\s+(?:[A-Z][\w']*)(?:\.[A-Z][\w']*)*)?(?:\s+hiding\b)?/m,lookbehind:!0,inside:{keyword:/\b(?:as|hiding|import|qualified)\b/,punctuation:/\./}},builtin:/\b(?:abs|acos|acosh|all|and|any|appendFile|approxRational|asTypeOf|asin|asinh|atan|atan2|atanh|basicIORun|break|catch|ceiling|chr|compare|concat|concatMap|const|cos|cosh|curry|cycle|decodeFloat|denominator|digitToInt|div|divMod|drop|dropWhile|either|elem|encodeFloat|enumFrom|enumFromThen|enumFromThenTo|enumFromTo|error|even|exp|exponent|fail|filter|flip|floatDigits|floatRadix|floatRange|floor|fmap|foldl|foldl1|foldr|foldr1|fromDouble|fromEnum|fromInt|fromInteger|fromIntegral|fromRational|fst|gcd|getChar|getContents|getLine|group|head|id|inRange|index|init|intToDigit|interact|ioError|isAlpha|isAlphaNum|isAscii|isControl|isDenormalized|isDigit|isHexDigit|isIEEE|isInfinite|isLower|isNaN|isNegativeZero|isOctDigit|isPrint|isSpace|isUpper|iterate|last|lcm|length|lex|lexDigits|lexLitChar|lines|log|logBase|lookup|map|mapM|mapM_|max|maxBound|maximum|maybe|min|minBound|minimum|mod|negate|not|notElem|null|numerator|odd|or|ord|otherwise|pack|pi|pred|primExitWith|print|product|properFraction|putChar|putStr|putStrLn|quot|quotRem|range|rangeSize|read|readDec|readFile|readFloat|readHex|readIO|readInt|readList|readLitChar|readLn|readOct|readParen|readSigned|reads|readsPrec|realToFrac|recip|rem|repeat|replicate|return|reverse|round|scaleFloat|scanl|scanl1|scanr|scanr1|seq|sequence|sequence_|show|showChar|showInt|showList|showLitChar|showParen|showSigned|showString|shows|showsPrec|significand|signum|sin|sinh|snd|sort|span|splitAt|sqrt|subtract|succ|sum|tail|take|takeWhile|tan|tanh|threadToIOResult|toEnum|toInt|toInteger|toLower|toRational|toUpper|truncate|uncurry|undefined|unlines|until|unwords|unzip|unzip3|userError|words|writeFile|zip|zip3|zipWith|zipWith3)\b/,number:/\b(?:\d+(?:\.\d+)?(?:e[+-]?\d+)?|0o[0-7]+|0x[0-9a-f]+)\b/i,operator:[{pattern:/`(?:[A-Z][\w']*\.)*[_a-z][\w']*`/,greedy:!0},{pattern:/(\s)\.(?=\s)/,lookbehind:!0},/[-!#$%*+=?&@|~:<>^\\\/][-!#$%*+=?&@|~.:<>^\\\/]*|\.[-!#$%*+=?&@|~.:<>^\\\/]+/],hvariable:{pattern:/\b(?:[A-Z][\w']*\.)*[_a-z][\w']*/,inside:{punctuation:/\./}},constant:{pattern:/\b(?:[A-Z][\w']*\.)*[A-Z][\w']*/,inside:{punctuation:/\./}},punctuation:/[{}[\];(),.:]/},Prism.languages.hs=Prism.languages.haskell},2503:()=>{!function(e){var t=/\b(?:abstract|assert|boolean|break|byte|case|catch|char|class|const|continue|default|do|double|else|enum|exports|extends|final|finally|float|for|goto|if|implements|import|instanceof|int|interface|long|module|native|new|non-sealed|null|open|opens|package|permits|private|protected|provides|public|record(?!\s*[(){}[\]<>=%~.:,;?+\-*/&|^])|requires|return|sealed|short|static|strictfp|super|switch|synchronized|this|throw|throws|to|transient|transitive|try|uses|var|void|volatile|while|with|yield)\b/,n=/(?:[a-z]\w*\s*\.\s*)*(?:[A-Z]\w*\s*\.\s*)*/.source,r={pattern:RegExp(/(^|[^\w.])/.source+n+/[A-Z](?:[\d_A-Z]*[a-z]\w*)?\b/.source),lookbehind:!0,inside:{namespace:{pattern:/^[a-z]\w*(?:\s*\.\s*[a-z]\w*)*(?:\s*\.)?/,inside:{punctuation:/\./}},punctuation:/\./}};e.languages.java=e.languages.extend("clike",{string:{pattern:/(^|[^\\])"(?:\\.|[^"\\\r\n])*"/,lookbehind:!0,greedy:!0},"class-name":[r,{pattern:RegExp(/(^|[^\w.])/.source+n+/[A-Z]\w*(?=\s+\w+\s*[;,=()]|\s*(?:\[[\s,]*\]\s*)?::\s*new\b)/.source),lookbehind:!0,inside:r.inside},{pattern:RegExp(/(\b(?:class|enum|extends|implements|instanceof|interface|new|record|throws)\s+)/.source+n+/[A-Z]\w*\b/.source),lookbehind:!0,inside:r.inside}],keyword:t,function:[e.languages.clike.function,{pattern:/(::\s*)[a-z_]\w*/,lookbehind:!0}],number:/\b0b[01][01_]*L?\b|\b0x(?:\.[\da-f_p+-]+|[\da-f_]+(?:\.[\da-f_p+-]+)?)\b|(?:\b\d[\d_]*(?:\.[\d_]*)?|\B\.\d[\d_]*)(?:e[+-]?\d[\d_]*)?[dfl]?/i,operator:{pattern:/(^|[^.])(?:<<=?|>>>?=?|->|--|\+\+|&&|\|\||::|[?:~]|[-+*/%&|^!=<>]=?)/m,lookbehind:!0},constant:/\b[A-Z][A-Z_\d]+\b/}),e.languages.insertBefore("java","string",{"triple-quoted-string":{pattern:/"""[ \t]*[\r\n](?:(?:"|"")?(?:\\.|[^"\\]))*"""/,greedy:!0,alias:"string"},char:{pattern:/'(?:\\.|[^'\\\r\n]){1,6}'/,greedy:!0}}),e.languages.insertBefore("java","class-name",{annotation:{pattern:/(^|[^.])@\w+(?:\s*\.\s*\w+)*/,lookbehind:!0,alias:"punctuation"},generics:{pattern:/<(?:[\w\s,.?]|&(?!&)|<(?:[\w\s,.?]|&(?!&)|<(?:[\w\s,.?]|&(?!&)|<(?:[\w\s,.?]|&(?!&))*>)*>)*>)*>/,inside:{"class-name":r,keyword:t,punctuation:/[<>(),.:]/,operator:/[?&|]/}},import:[{pattern:RegExp(/(\bimport\s+)/.source+n+/(?:[A-Z]\w*|\*)(?=\s*;)/.source),lookbehind:!0,inside:{namespace:r.inside.namespace,punctuation:/\./,operator:/\*/,"class-name":/\w+/}},{pattern:RegExp(/(\bimport\s+static\s+)/.source+n+/(?:\w+|\*)(?=\s*;)/.source),lookbehind:!0,alias:"static",inside:{namespace:r.inside.namespace,static:/\b\w+$/,punctuation:/\./,operator:/\*/,"class-name":/\w+/}}],namespace:{pattern:RegExp(/(\b(?:exports|import(?:\s+static)?|module|open|opens|package|provides|requires|to|transitive|uses|with)\s+)(?!)[a-z]\w*(?:\.[a-z]\w*)*\.?/.source.replace(//g,(function(){return t.source}))),lookbehind:!0,inside:{punctuation:/\./}}})}(Prism)},6854:()=>{!function(e){function t(e,t){return"___"+e.toUpperCase()+t+"___"}Object.defineProperties(e.languages["markup-templating"]={},{buildPlaceholders:{value:function(n,r,a,o){if(n.language===r){var i=n.tokenStack=[];n.code=n.code.replace(a,(function(e){if("function"==typeof o&&!o(e))return e;for(var a,s=i.length;-1!==n.code.indexOf(a=t(r,s));)++s;return i[s]=e,a})),n.grammar=e.languages.markup}}},tokenizePlaceholders:{value:function(n,r){if(n.language===r&&n.tokenStack){n.grammar=e.languages[r];var a=0,o=Object.keys(n.tokenStack);!function i(s){for(var l=0;l=o.length);l++){var c=s[l];if("string"==typeof c||c.content&&"string"==typeof c.content){var u=o[a],d=n.tokenStack[u],p="string"==typeof c?c:c.content,f=t(r,u),g=p.indexOf(f);if(g>-1){++a;var m=p.substring(0,g),h=new e.Token(r,e.tokenize(d,n.grammar),"language-"+r,d),b=p.substring(g+f.length),y=[];m&&y.push.apply(y,i([m])),y.push(h),b&&y.push.apply(y,i([b])),"string"==typeof c?s.splice.apply(s,[l,1].concat(y)):c.content=y}}else c.content&&i(c.content)}return s}(n.tokens)}}}})}(Prism)},8704:()=>{Prism.languages.nix={comment:{pattern:/\/\*[\s\S]*?\*\/|#.*/,greedy:!0},string:{pattern:/"(?:[^"\\]|\\[\s\S])*"|''(?:(?!'')[\s\S]|''(?:'|\\|\$\{))*''/,greedy:!0,inside:{interpolation:{pattern:/(^|(?:^|(?!'').)[^\\])\$\{(?:[^{}]|\{[^}]*\})*\}/,lookbehind:!0,inside:null}}},url:[/\b(?:[a-z]{3,7}:\/\/)[\w\-+%~\/.:#=?&]+/,{pattern:/([^\/])(?:[\w\-+%~.:#=?&]*(?!\/\/)[\w\-+%~\/.:#=?&])?(?!\/\/)\/[\w\-+%~\/.:#=?&]*/,lookbehind:!0}],antiquotation:{pattern:/\$(?=\{)/,alias:"important"},number:/\b\d+\b/,keyword:/\b(?:assert|builtins|else|if|in|inherit|let|null|or|then|with)\b/,function:/\b(?:abort|add|all|any|attrNames|attrValues|baseNameOf|compareVersions|concatLists|currentSystem|deepSeq|derivation|dirOf|div|elem(?:At)?|fetch(?:Tarball|url)|filter(?:Source)?|fromJSON|genList|getAttr|getEnv|hasAttr|hashString|head|import|intersectAttrs|is(?:Attrs|Bool|Function|Int|List|Null|String)|length|lessThan|listToAttrs|map|mul|parseDrvName|pathExists|read(?:Dir|File)|removeAttrs|replaceStrings|seq|sort|stringLength|sub(?:string)?|tail|throw|to(?:File|JSON|Path|String|XML)|trace|typeOf)\b|\bfoldl'\B/,boolean:/\b(?:false|true)\b/,operator:/[=!<>]=?|\+\+?|\|\||&&|\/\/|->?|[?@]/,punctuation:/[{}()[\].,:;]/},Prism.languages.nix.string.inside.interpolation.inside=Prism.languages.nix},3210:()=>{Prism.languages.pascal={directive:{pattern:/\{\$[\s\S]*?\}/,greedy:!0,alias:["marco","property"]},comment:{pattern:/\(\*[\s\S]*?\*\)|\{[\s\S]*?\}|\/\/.*/,greedy:!0},string:{pattern:/(?:'(?:''|[^'\r\n])*'(?!')|#[&$%]?[a-f\d]+)+|\^[a-z]/i,greedy:!0},asm:{pattern:/(\basm\b)[\s\S]+?(?=\bend\s*[;[])/i,lookbehind:!0,greedy:!0,inside:null},keyword:[{pattern:/(^|[^&])\b(?:absolute|array|asm|begin|case|const|constructor|destructor|do|downto|else|end|file|for|function|goto|if|implementation|inherited|inline|interface|label|nil|object|of|operator|packed|procedure|program|record|reintroduce|repeat|self|set|string|then|to|type|unit|until|uses|var|while|with)\b/i,lookbehind:!0},{pattern:/(^|[^&])\b(?:dispose|exit|false|new|true)\b/i,lookbehind:!0},{pattern:/(^|[^&])\b(?:class|dispinterface|except|exports|finalization|finally|initialization|inline|library|on|out|packed|property|raise|resourcestring|threadvar|try)\b/i,lookbehind:!0},{pattern:/(^|[^&])\b(?:absolute|abstract|alias|assembler|bitpacked|break|cdecl|continue|cppdecl|cvar|default|deprecated|dynamic|enumerator|experimental|export|external|far|far16|forward|generic|helper|implements|index|interrupt|iochecks|local|message|name|near|nodefault|noreturn|nostackframe|oldfpccall|otherwise|overload|override|pascal|platform|private|protected|public|published|read|register|reintroduce|result|safecall|saveregisters|softfloat|specialize|static|stdcall|stored|strict|unaligned|unimplemented|varargs|virtual|write)\b/i,lookbehind:!0}],number:[/(?:[&%]\d+|\$[a-f\d]+)/i,/\b\d+(?:\.\d+)?(?:e[+-]?\d+)?/i],operator:[/\.\.|\*\*|:=|<[<=>]?|>[>=]?|[+\-*\/]=?|[@^=]/,{pattern:/(^|[^&])\b(?:and|as|div|exclude|in|include|is|mod|not|or|shl|shr|xor)\b/,lookbehind:!0}],punctuation:/\(\.|\.\)|[()\[\]:;,.]/},Prism.languages.pascal.asm.inside=Prism.languages.extend("pascal",{asm:void 0,keyword:void 0,operator:void 0}),Prism.languages.objectpascal=Prism.languages.pascal},366:()=>{Prism.languages.python={comment:{pattern:/(^|[^\\])#.*/,lookbehind:!0,greedy:!0},"string-interpolation":{pattern:/(?:f|fr|rf)(?:("""|''')[\s\S]*?\1|("|')(?:\\.|(?!\2)[^\\\r\n])*\2)/i,greedy:!0,inside:{interpolation:{pattern:/((?:^|[^{])(?:\{\{)*)\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}]|\{(?!\{)(?:[^{}])+\})+\})+\}/,lookbehind:!0,inside:{"format-spec":{pattern:/(:)[^:(){}]+(?=\}$)/,lookbehind:!0},"conversion-option":{pattern:/![sra](?=[:}]$)/,alias:"punctuation"},rest:null}},string:/[\s\S]+/}},"triple-quoted-string":{pattern:/(?:[rub]|br|rb)?("""|''')[\s\S]*?\1/i,greedy:!0,alias:"string"},string:{pattern:/(?:[rub]|br|rb)?("|')(?:\\.|(?!\1)[^\\\r\n])*\1/i,greedy:!0},function:{pattern:/((?:^|\s)def[ \t]+)[a-zA-Z_]\w*(?=\s*\()/g,lookbehind:!0},"class-name":{pattern:/(\bclass\s+)\w+/i,lookbehind:!0},decorator:{pattern:/(^[\t ]*)@\w+(?:\.\w+)*/m,lookbehind:!0,alias:["annotation","punctuation"],inside:{punctuation:/\./}},keyword:/\b(?:_(?=\s*:)|and|as|assert|async|await|break|case|class|continue|def|del|elif|else|except|exec|finally|for|from|global|if|import|in|is|lambda|match|nonlocal|not|or|pass|print|raise|return|try|while|with|yield)\b/,builtin:/\b(?:__import__|abs|all|any|apply|ascii|basestring|bin|bool|buffer|bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|complex|delattr|dict|dir|divmod|enumerate|eval|execfile|file|filter|float|format|frozenset|getattr|globals|hasattr|hash|help|hex|id|input|int|intern|isinstance|issubclass|iter|len|list|locals|long|map|max|memoryview|min|next|object|oct|open|ord|pow|property|range|raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|vars|xrange|zip)\b/,boolean:/\b(?:False|None|True)\b/,number:/\b0(?:b(?:_?[01])+|o(?:_?[0-7])+|x(?:_?[a-f0-9])+)\b|(?:\b\d+(?:_\d+)*(?:\.(?:\d+(?:_\d+)*)?)?|\B\.\d+(?:_\d+)*)(?:e[+-]?\d+(?:_\d+)*)?j?(?!\w)/i,operator:/[-+%=]=?|!=|:=|\*\*?=?|\/\/?=?|<[<=>]?|>[=>]?|[&|^~]/,punctuation:/[{}[\];(),.:]/},Prism.languages.python["string-interpolation"].inside.interpolation.inside.rest=Prism.languages.python,Prism.languages.py=Prism.languages.python},9385:()=>{!function(e){e.languages.ruby=e.languages.extend("clike",{comment:{pattern:/#.*|^=begin\s[\s\S]*?^=end/m,greedy:!0},"class-name":{pattern:/(\b(?:class|module)\s+|\bcatch\s+\()[\w.\\]+|\b[A-Z_]\w*(?=\s*\.\s*new\b)/,lookbehind:!0,inside:{punctuation:/[.\\]/}},keyword:/\b(?:BEGIN|END|alias|and|begin|break|case|class|def|define_method|defined|do|each|else|elsif|end|ensure|extend|for|if|in|include|module|new|next|nil|not|or|prepend|private|protected|public|raise|redo|require|rescue|retry|return|self|super|then|throw|undef|unless|until|when|while|yield)\b/,operator:/\.{2,3}|&\.|===||[!=]?~|(?:&&|\|\||<<|>>|\*\*|[+\-*/%<>!^&|=])=?|[?:]/,punctuation:/[(){}[\].,;]/}),e.languages.insertBefore("ruby","operator",{"double-colon":{pattern:/::/,alias:"punctuation"}});var t={pattern:/((?:^|[^\\])(?:\\{2})*)#\{(?:[^{}]|\{[^{}]*\})*\}/,lookbehind:!0,inside:{content:{pattern:/^(#\{)[\s\S]+(?=\}$)/,lookbehind:!0,inside:e.languages.ruby},delimiter:{pattern:/^#\{|\}$/,alias:"punctuation"}}};delete e.languages.ruby.function;var n="(?:"+[/([^a-zA-Z0-9\s{(\[<=])(?:(?!\1)[^\\]|\\[\s\S])*\1/.source,/\((?:[^()\\]|\\[\s\S]|\((?:[^()\\]|\\[\s\S])*\))*\)/.source,/\{(?:[^{}\\]|\\[\s\S]|\{(?:[^{}\\]|\\[\s\S])*\})*\}/.source,/\[(?:[^\[\]\\]|\\[\s\S]|\[(?:[^\[\]\\]|\\[\s\S])*\])*\]/.source,/<(?:[^<>\\]|\\[\s\S]|<(?:[^<>\\]|\\[\s\S])*>)*>/.source].join("|")+")",r=/(?:"(?:\\.|[^"\\\r\n])*"|(?:\b[a-zA-Z_]\w*|[^\s\0-\x7F]+)[?!]?|\$.)/.source;e.languages.insertBefore("ruby","keyword",{"regex-literal":[{pattern:RegExp(/%r/.source+n+/[egimnosux]{0,6}/.source),greedy:!0,inside:{interpolation:t,regex:/[\s\S]+/}},{pattern:/(^|[^/])\/(?!\/)(?:\[[^\r\n\]]+\]|\\.|[^[/\\\r\n])+\/[egimnosux]{0,6}(?=\s*(?:$|[\r\n,.;})#]))/,lookbehind:!0,greedy:!0,inside:{interpolation:t,regex:/[\s\S]+/}}],variable:/[@$]+[a-zA-Z_]\w*(?:[?!]|\b)/,symbol:[{pattern:RegExp(/(^|[^:]):/.source+r),lookbehind:!0,greedy:!0},{pattern:RegExp(/([\r\n{(,][ \t]*)/.source+r+/(?=:(?!:))/.source),lookbehind:!0,greedy:!0}],"method-definition":{pattern:/(\bdef\s+)\w+(?:\s*\.\s*\w+)?/,lookbehind:!0,inside:{function:/\b\w+$/,keyword:/^self\b/,"class-name":/^\w+/,punctuation:/\./}}}),e.languages.insertBefore("ruby","string",{"string-literal":[{pattern:RegExp(/%[qQiIwWs]?/.source+n),greedy:!0,inside:{interpolation:t,string:/[\s\S]+/}},{pattern:/("|')(?:#\{[^}]+\}|#(?!\{)|\\(?:\r\n|[\s\S])|(?!\1)[^\\#\r\n])*\1/,greedy:!0,inside:{interpolation:t,string:/[\s\S]+/}},{pattern:/<<[-~]?([a-z_]\w*)[\r\n](?:.*[\r\n])*?[\t ]*\1/i,alias:"heredoc-string",greedy:!0,inside:{delimiter:{pattern:/^<<[-~]?[a-z_]\w*|\b[a-z_]\w*$/i,inside:{symbol:/\b\w+/,punctuation:/^<<[-~]?/}},interpolation:t,string:/[\s\S]+/}},{pattern:/<<[-~]?'([a-z_]\w*)'[\r\n](?:.*[\r\n])*?[\t ]*\1/i,alias:"heredoc-string",greedy:!0,inside:{delimiter:{pattern:/^<<[-~]?'[a-z_]\w*'|\b[a-z_]\w*$/i,inside:{symbol:/\b\w+/,punctuation:/^<<[-~]?'|'$/}},string:/[\s\S]+/}}],"command-literal":[{pattern:RegExp(/%x/.source+n),greedy:!0,inside:{interpolation:t,command:{pattern:/[\s\S]+/,alias:"string"}}},{pattern:/`(?:#\{[^}]+\}|#(?!\{)|\\(?:\r\n|[\s\S])|[^\\`#\r\n])*`/,greedy:!0,inside:{interpolation:t,command:{pattern:/[\s\S]+/,alias:"string"}}}]}),delete e.languages.ruby.string,e.languages.insertBefore("ruby","number",{builtin:/\b(?:Array|Bignum|Binding|Class|Continuation|Dir|Exception|FalseClass|File|Fixnum|Float|Hash|IO|Integer|MatchData|Method|Module|NilClass|Numeric|Object|Proc|Range|Regexp|Stat|String|Struct|Symbol|TMS|Thread|ThreadGroup|Time|TrueClass)\b/,constant:/\b[A-Z][A-Z0-9_]*(?:[?!]|\b)/}),e.languages.rb=e.languages.ruby}(Prism)},767:()=>{!function(e){for(var t=/\/\*(?:[^*/]|\*(?!\/)|\/(?!\*)|)*\*\//.source,n=0;n<2;n++)t=t.replace(//g,(function(){return t}));t=t.replace(//g,(function(){return/[^\s\S]/.source})),e.languages.rust={comment:[{pattern:RegExp(/(^|[^\\])/.source+t),lookbehind:!0,greedy:!0},{pattern:/(^|[^\\:])\/\/.*/,lookbehind:!0,greedy:!0}],string:{pattern:/b?"(?:\\[\s\S]|[^\\"])*"|b?r(#*)"(?:[^"]|"(?!\1))*"\1/,greedy:!0},char:{pattern:/b?'(?:\\(?:x[0-7][\da-fA-F]|u\{(?:[\da-fA-F]_*){1,6}\}|.)|[^\\\r\n\t'])'/,greedy:!0},attribute:{pattern:/#!?\[(?:[^\[\]"]|"(?:\\[\s\S]|[^\\"])*")*\]/,greedy:!0,alias:"attr-name",inside:{string:null}},"closure-params":{pattern:/([=(,:]\s*|\bmove\s*)\|[^|]*\||\|[^|]*\|(?=\s*(?:\{|->))/,lookbehind:!0,greedy:!0,inside:{"closure-punctuation":{pattern:/^\||\|$/,alias:"punctuation"},rest:null}},"lifetime-annotation":{pattern:/'\w+/,alias:"symbol"},"fragment-specifier":{pattern:/(\$\w+:)[a-z]+/,lookbehind:!0,alias:"punctuation"},variable:/\$\w+/,"function-definition":{pattern:/(\bfn\s+)\w+/,lookbehind:!0,alias:"function"},"type-definition":{pattern:/(\b(?:enum|struct|trait|type|union)\s+)\w+/,lookbehind:!0,alias:"class-name"},"module-declaration":[{pattern:/(\b(?:crate|mod)\s+)[a-z][a-z_\d]*/,lookbehind:!0,alias:"namespace"},{pattern:/(\b(?:crate|self|super)\s*)::\s*[a-z][a-z_\d]*\b(?:\s*::(?:\s*[a-z][a-z_\d]*\s*::)*)?/,lookbehind:!0,alias:"namespace",inside:{punctuation:/::/}}],keyword:[/\b(?:Self|abstract|as|async|await|become|box|break|const|continue|crate|do|dyn|else|enum|extern|final|fn|for|if|impl|in|let|loop|macro|match|mod|move|mut|override|priv|pub|ref|return|self|static|struct|super|trait|try|type|typeof|union|unsafe|unsized|use|virtual|where|while|yield)\b/,/\b(?:bool|char|f(?:32|64)|[ui](?:8|16|32|64|128|size)|str)\b/],function:/\b[a-z_]\w*(?=\s*(?:::\s*<|\())/,macro:{pattern:/\b\w+!/,alias:"property"},constant:/\b[A-Z_][A-Z_\d]+\b/,"class-name":/\b[A-Z]\w*\b/,namespace:{pattern:/(?:\b[a-z][a-z_\d]*\s*::\s*)*\b[a-z][a-z_\d]*\s*::(?!\s*<)/,inside:{punctuation:/::/}},number:/\b(?:0x[\dA-Fa-f](?:_?[\dA-Fa-f])*|0o[0-7](?:_?[0-7])*|0b[01](?:_?[01])*|(?:(?:\d(?:_?\d)*)?\.)?\d(?:_?\d)*(?:[Ee][+-]?\d+)?)(?:_?(?:f32|f64|[iu](?:8|16|32|64|size)?))?\b/,boolean:/\b(?:false|true)\b/,punctuation:/->|\.\.=|\.{1,3}|::|[{}[\];(),:]/,operator:/[-+*\/%!^]=?|=[=>]?|&[&=]?|\|[|=]?|<>?=?|[@?]/},e.languages.rust["closure-params"].inside.rest=e.languages.rust,e.languages.rust.attribute.inside.string=e.languages.rust.string}(Prism)},218:(e,t,n)=>{var r={"./prism-ada":5795,"./prism-bash":7874,"./prism-csharp":9016,"./prism-dot":397,"./prism-haskell":1295,"./prism-java":2503,"./prism-nix":8704,"./prism-pascal":3210,"./prism-python":366,"./prism-ruby":9385,"./prism-rust":767};function a(e){var t=o(e);return n(t)}function o(e){if(!n.o(r,e)){var t=new Error("Cannot find module '"+e+"'");throw t.code="MODULE_NOT_FOUND",t}return r[e]}a.keys=function(){return Object.keys(r)},a.resolve=o,e.exports=a,a.id=218},2703:(e,t,n)=>{"use strict";var r=n(414);function a(){}function o(){}o.resetWarningCache=a,e.exports=function(){function e(e,t,n,a,o,i){if(i!==r){var s=new Error("Calling PropTypes validators directly is not supported by the `prop-types` package. Use PropTypes.checkPropTypes() to call them. Read more at http://fb.me/use-check-prop-types");throw s.name="Invariant Violation",s}}function t(){return e}e.isRequired=e;var n={array:e,bigint:e,bool:e,func:e,number:e,object:e,string:e,symbol:e,any:e,arrayOf:t,element:e,elementType:e,instanceOf:t,node:e,objectOf:t,oneOf:t,oneOfType:t,shape:t,exact:t,checkPropTypes:o,resetWarningCache:a};return n.PropTypes=n,n}},5697:(e,t,n)=>{e.exports=n(2703)()},414:e=>{"use strict";e.exports="SECRET_DO_NOT_PASS_THIS_OR_YOU_WILL_BE_FIRED"},4448:(e,t,n)=>{"use strict";var r=n(7294),a=n(3840);function o(e){for(var t="https://reactjs.org/docs/error-decoder.html?invariant="+e,n=1;n

    As I have said in the tl;dr, we are looking for the shortest path, but the start and goal differ for the part 1 and 2. So I have decided to refactor my solution to a BFS algorithm that takes necessary parameters via functions:

    -
    fn bfs<F, G>(
    graph: &[Vec<char>], start: &Position, has_edge: F, is_target: G
    ) -> Option<usize>
    where
    F: Fn(&[Vec<char>], &Position, &Position) -> bool,
    G: Fn(&[Vec<char>], &Position) -> bool
    +
    fn bfs<F, G>(
    graph: &[Vec<char>], start: &Position, has_edge: F, is_target: G
    ) -> Option<usize>
    where
    F: Fn(&[Vec<char>], &Position, &Position) -> bool,
    G: Fn(&[Vec<char>], &Position) -> bool

    We pass the initial vertex from the caller and everything else is left to the BFS algorithm, based on the has_edge and is_target functions.

    This was easy! And that is not very usual in Rust once you want to pass around @@ -268,7 +268,7 @@ time complexity, because of the priority heap instead of the queue.

    You can implement a lot of traits if you want to. It is imperative to implement ordering on the packets. I had a typo, so I also proceeded to implement a Display trait for debugging purposes:

    -
    impl Display for Packet {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
    match self {
    Packet::Integer(x) => write!(f, "{x}"),
    Packet::List(lst) => write!(f, "[{}]", lst.iter().map(|p| format!("{p}")).join(",")),
    }
    }
    }
    +
    impl Display for Packet {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
    match self {
    Packet::Integer(x) => write!(f, "{x}"),
    Packet::List(lst) => write!(f, "[{}]", lst.iter().map(|p| format!("{p}")).join(",")),
    }
    }
    }

    Solution

    A lot of technical details… Parsing is nasty too…

    Day 14: Regolith Reservoir

    @@ -288,16 +288,16 @@ leave it be, so I tried to implement the Index and IndexMutunsafe part are the 2 methods that are named *unchecked*. Anyways, I will be implementing the Index* traits for now, rather than the SliceIndex.

    It's relatively straightforward…

    -
    impl<I, C> Index<Vector2D<I>> for [C]
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: Index<usize>,
    {
    type Output = C::Output;

    fn index(&self, index: Vector2D<I>) -> &Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &self[y][x]
    }
    }

    impl<I, C> IndexMut<Vector2D<I>> for [C]
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: IndexMut<usize>,
    {
    fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &mut self[y][x]
    }
    }
    +
    impl<I, C> Index<Vector2D<I>> for [C]
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: Index<usize>,
    {
    type Output = C::Output;

    fn index(&self, index: Vector2D<I>) -> &Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &self[y][x]
    }
    }

    impl<I, C> IndexMut<Vector2D<I>> for [C]
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: IndexMut<usize>,
    {
    fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &mut self[y][x]
    }
    }

    We can see a lot of similarities to the implementation of index and index_mut functions. In the end, they are 1:1, just wrapped in the trait that provides a syntax sugar for container[idx].

    note

    I have also switched from using the TryFrom to TryInto trait, since it better matches what we are using, the .try_into rather than usize::try_from.

    Also implementing TryFrom automatically provides you with a TryInto trait, -since it is relatively easy to implement. Just compare the following:

    pub trait TryFrom<T>: Sized {
    type Error;

    fn try_from(value: T) -> Result<Self, Self::Error>;
    }

    pub trait TryInto<T>: Sized {
    type Error;

    fn try_into(self) -> Result<T, Self::Error>;
    }
    +since it is relatively easy to implement. Just compare the following:

    pub trait TryFrom<T>: Sized {
    type Error;

    fn try_from(value: T) -> Result<Self, Self::Error>;
    }

    pub trait TryInto<T>: Sized {
    type Error;

    fn try_into(self) -> Result<T, Self::Error>;
    }

    OK, so we have our trait implemented, we should be able to use container[index], right? Yes… but actually no 😦

    -
    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
    --> src/bin/day08.rs:26:18
    |
    26 | if trees[pos] > tallest {
    | ^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
    --> src/bin/day08.rs:30:28
    |
    30 | max(tallest, trees[pos])
    | ^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<isize>`
    --> src/bin/day08.rs:52:28
    |
    52 | let max_height = trees[position];
    | ^^^^^^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<isize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<isize>>`
    +
    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
    --> src/bin/day08.rs:26:18
    |
    26 | if trees[pos] > tallest {
    | ^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
    --> src/bin/day08.rs:30:28
    |
    30 | max(tallest, trees[pos])
    | ^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<isize>`
    --> src/bin/day08.rs:52:28
    |
    52 | let max_height = trees[position];
    | ^^^^^^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<isize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<isize>>`

    Why? We have it implemented for the slices ([C]), why doesn't it work? Well, the fun part consists of the fact that in other place, where we were using it, we were passing the &[Vec<T>], but this is coming from a helper functions that @@ -307,9 +307,9 @@ those. Just for the slices. 🤯 What are we going to do abo so let's implement a macro! The only difference across the implementations are the types of the outer containers. Implementation doesn't differ at all!

    Implementing the macro can be done in a following way:

    -
    macro_rules! generate_indices {
    ($container:ty) => {
    impl<I, C> Index<Vector2D<I>> for $container
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: Index<usize>,
    {
    type Output = C::Output;

    fn index(&self, index: Vector2D<I>) -> &Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &self[y][x]
    }
    }

    impl<I, C> IndexMut<Vector2D<I>> for $container
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: IndexMut<usize>,
    {
    fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &mut self[y][x]
    }
    }
    };
    }
    +
    macro_rules! generate_indices {
    ($container:ty) => {
    impl<I, C> Index<Vector2D<I>> for $container
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: Index<usize>,
    {
    type Output = C::Output;

    fn index(&self, index: Vector2D<I>) -> &Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &self[y][x]
    }
    }

    impl<I, C> IndexMut<Vector2D<I>> for $container
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: IndexMut<usize>,
    {
    fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &mut self[y][x]
    }
    }
    };
    }

    And now we can simply do

    -
    generate_indices!(VecDeque<C>);
    generate_indices!([C]);
    generate_indices!(Vec<C>);
    // generate_indices!([C; N], const N: usize);
    +
    generate_indices!(VecDeque<C>);
    generate_indices!([C]);
    generate_indices!(Vec<C>);
    // generate_indices!([C; N], const N: usize);

    The last type (I took the inspiration from the implementations of the Index and IndexMut traits) is a bit problematic, because of the const N: usize part, which I haven't managed to be able to parse. And that's how I got rid of the error.

    @@ -319,11 +319,11 @@ copy-paste, cause the cost of this “monstrosity” outweighs the benefits of n

    This issue is relatively funny. If you don't use any type aliases, just the raw types, you'll get suggested certain changes by the clippy. For example if you consider the following piece of code

    -
    fn get_sum(nums: &Vec<i32>) -> i32 {
    nums.iter().sum()
    }

    fn main() {
    let nums = vec![1, 2, 3];
    println!("Sum: {}", get_sum(&nums));
    }
    +
    fn get_sum(nums: &Vec<i32>) -> i32 {
    nums.iter().sum()
    }

    fn main() {
    let nums = vec![1, 2, 3];
    println!("Sum: {}", get_sum(&nums));
    }

    and you run clippy on it, you will get

    -
    Checking playground v0.0.1 (/playground)
    warning: writing `&Vec` instead of `&[_]` involves a new object where a slice will do
    --> src/main.rs:1:18
    |
    1 | fn get_sum(nums: &Vec<i32>) -> i32 {
    | ^^^^^^^^^ help: change this to: `&[i32]`
    |
    = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#ptr_arg
    = note: `#[warn(clippy::ptr_arg)]` on by default

    warning: `playground` (bin "playground") generated 1 warning
    Finished dev [unoptimized + debuginfo] target(s) in 0.61s
    +
    Checking playground v0.0.1 (/playground)
    warning: writing `&Vec` instead of `&[_]` involves a new object where a slice will do
    --> src/main.rs:1:18
    |
    1 | fn get_sum(nums: &Vec<i32>) -> i32 {
    | ^^^^^^^^^ help: change this to: `&[i32]`
    |
    = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#ptr_arg
    = note: `#[warn(clippy::ptr_arg)]` on by default

    warning: `playground` (bin "playground") generated 1 warning
    Finished dev [unoptimized + debuginfo] target(s) in 0.61s

    However, if you introduce a type alias, such as

    -
    type Numbers = Vec<i32>;
    +
    type Numbers = Vec<i32>;

    Then clippy won't say anything, cause there is literally nothing to suggest. However the outcome is not the same…

    diff --git a/blog/aoc-2022/3rd-week/index.html b/blog/aoc-2022/3rd-week/index.html index 89babf5..12c8ed5 100644 --- a/blog/aoc-2022/3rd-week/index.html +++ b/blog/aoc-2022/3rd-week/index.html @@ -14,8 +14,8 @@ - - + +

    3rd week of Advent of Code '22 in Rust

    · 12 min read
    Matej Focko

    Let's go through the third week of Advent of Code in Rust.

    @@ -37,7 +37,7 @@ to implement the indexing in a graph, rather than explicitly access the underlying data structure.

    Here you can see a rather short snippet from the solution that allows you to “index” the graph:

    -
    impl Index<&str> for Graph {
    type Output = Vertex;

    fn index(&self, index: &str) -> &Self::Output {
    &self.g[index]
    }
    }
    +
    impl Index<&str> for Graph {
    type Output = Vertex;

    fn index(&self, index: &str) -> &Self::Output {
    &self.g[index]
    }
    }

    Cartesian product

    During the implementation I had to utilize Floyd-Warshall algorithm for finding the shortest path between pairs of vertices and utilized the iproduct! macro @@ -51,7 +51,7 @@ also makes it harder to evaluate algorithmically, since you need to check the different ways the work can be split.

    Being affected by functional programming brain damage™️, I have chosen to do this part by function that returns an iterator over the possible ways:

    -
    fn pairings(
    valves: &BTreeSet<String>,
    ) -> impl Iterator<Item = (BTreeSet<String>, BTreeSet<String>)> + '_ {
    let mapping = valves.iter().collect_vec();

    let max_mask = 1 << (valves.len() - 1);

    (0..max_mask).map(move |mask| {
    let mut elephant = BTreeSet::new();
    let mut human = BTreeSet::new();

    for (i, &v) in mapping.iter().enumerate() {
    if (mask & (1 << i)) == 0 {
    human.insert(v.clone());
    } else {
    elephant.insert(v.clone());
    }
    }

    (human, elephant)
    })
    }
    +
    fn pairings(
    valves: &BTreeSet<String>,
    ) -> impl Iterator<Item = (BTreeSet<String>, BTreeSet<String>)> + '_ {
    let mapping = valves.iter().collect_vec();

    let max_mask = 1 << (valves.len() - 1);

    (0..max_mask).map(move |mask| {
    let mut elephant = BTreeSet::new();
    let mut human = BTreeSet::new();

    for (i, &v) in mapping.iter().enumerate() {
    if (mask & (1 << i)) == 0 {
    human.insert(v.clone());
    } else {
    elephant.insert(v.clone());
    }
    }

    (human, elephant)
    })
    }

    Day 17: Pyroclastic Flow

    tl;dr

    Simulating an autonomous Tetris where pieces get affected by a series of jets of hot gas.

    @@ -62,7 +62,7 @@ hot gas.

    iterate through the positions that can actually collide with the wall or other piece.

    To get the desired behaviour, you can just compose few smaller functions:

    -
    fn occupied(shape: &[Vec<char>]) -> impl Iterator<Item = Position> + '_ {
    shape.iter().enumerate().flat_map(|(y, row)| {
    row.iter().enumerate().filter_map(move |(x, c)| {
    if c == &'#' {
    Some(Vector2D::new(x as isize, y as isize))
    } else {
    None
    }
    })
    })
    }
    +
    fn occupied(shape: &[Vec<char>]) -> impl Iterator<Item = Position> + '_ {
    shape.iter().enumerate().flat_map(|(y, row)| {
    row.iter().enumerate().filter_map(move |(x, c)| {
    if c == &'#' {
    Some(Vector2D::new(x as isize, y as isize))
    } else {
    None
    }
    })
    })
    }

    In the end, we get relative positions which we can adjust later when given the specific positions from iterator. You can see some interesting parts in this:

      @@ -81,7 +81,7 @@ and also unwraps the values from Some(…). jets that move our pieces around. Initially I have implemented my own infinite iterator that just yields the indices. It is a very simple, yet powerful, piece of code:

      -
      struct InfiniteIndex {
      size: usize,
      i: usize,
      }

      impl InfiniteIndex {
      fn new(size: usize) -> InfiniteIndex {
      InfiniteIndex { size, i: size - 1 }
      }
      }

      impl Iterator for InfiniteIndex {
      type Item = usize;

      fn next(&mut self) -> Option<Self::Item> {
      self.i = (self.i + 1) % self.size;
      Some(self.i)
      }
      }
      +
      struct InfiniteIndex {
      size: usize,
      i: usize,
      }

      impl InfiniteIndex {
      fn new(size: usize) -> InfiniteIndex {
      InfiniteIndex { size, i: size - 1 }
      }
      }

      impl Iterator for InfiniteIndex {
      type Item = usize;

      fn next(&mut self) -> Option<Self::Item> {
      self.i = (self.i + 1) % self.size;
      Some(self.i)
      }
      }

      However when I'm looking at the code now, it doesn't really make much sense… Guess what, we can use a built-in function that is implemented on iterators for that! The function is called .cycle()

      @@ -96,7 +96,7 @@ cubes.

    Solution

    This day is kinda interesting, because it shows how easily you can complicate the problem and also how much can you screw yourself over with the optimization and -“smart” approach.

    +“smart” approach.

    For the first part you need to find the surface area of an obsidian that is approximated by cubes. Now, that is a very easy thing to do, just keep the track of already added cubes, and check if the newly added cube touches any face of any @@ -127,13 +127,13 @@ the Rc<RefCell<T>>. In the end I failed on wrong an a rather interesting issue with .borrow_mut() method being used on Rc<RefCell<T>>.

    .borrow_mut()

    Consider the following snippet of the code (taken from the documentation):

    -
    use std::cell::{RefCell, RefMut};
    use std::collections::HashMap;
    use std::rc::Rc;
    // use std::borrow::BorrowMut;

    fn main() {
    let shared_map: Rc<RefCell<_>> = Rc::new(RefCell::new(HashMap::new()));
    // Create a new block to limit the scope of the dynamic borrow
    {
    let mut map: RefMut<_> = shared_map.borrow_mut();
    map.insert("africa", 92388);
    map.insert("kyoto", 11837);
    map.insert("piccadilly", 11826);
    map.insert("marbles", 38);
    }

    // Note that if we had not let the previous borrow of the cache fall out
    // of scope then the subsequent borrow would cause a dynamic thread panic.
    // This is the major hazard of using `RefCell`.
    let total: i32 = shared_map.borrow().values().sum();
    println!("{total}");
    }
    +
    use std::cell::{RefCell, RefMut};
    use std::collections::HashMap;
    use std::rc::Rc;
    // use std::borrow::BorrowMut;

    fn main() {
    let shared_map: Rc<RefCell<_>> = Rc::new(RefCell::new(HashMap::new()));
    // Create a new block to limit the scope of the dynamic borrow
    {
    let mut map: RefMut<_> = shared_map.borrow_mut();
    map.insert("africa", 92388);
    map.insert("kyoto", 11837);
    map.insert("piccadilly", 11826);
    map.insert("marbles", 38);
    }

    // Note that if we had not let the previous borrow of the cache fall out
    // of scope then the subsequent borrow would cause a dynamic thread panic.
    // This is the major hazard of using `RefCell`.
    let total: i32 = shared_map.borrow().values().sum();
    println!("{total}");
    }

    We allocate a hash map on the heap and then in the inner block, we borrow it as a mutable reference, so that we can use it.

    note

    It is a very primitive example for Rc<RefCell<T>> and mutable borrow.

    If you uncomment the 4th line with use std::borrow::BorrowMut;, you cannot compile the code anymore, because of

    -
       Compiling playground v0.0.1 (/playground)
    error[E0308]: mismatched types
    --> src/main.rs:10:34
    |
    10 | let mut map: RefMut<_> = shared_map.borrow_mut();
    | --------- ^^^^^^^^^^^^^^^^^^^^^^^ expected struct `RefMut`, found mutable reference
    | |
    | expected due to this
    |
    = note: expected struct `RefMut<'_, _>`
    found mutable reference `&mut Rc<RefCell<HashMap<_, _>>>`

    error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
    --> src/main.rs:11:13
    |
    11 | map.insert("africa", 92388);
    | ^^^^^^ method not found in `RefMut<'_, _>`

    error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
    --> src/main.rs:12:13
    |
    12 | map.insert("kyoto", 11837);
    | ^^^^^^ method not found in `RefMut<'_, _>`

    error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
    --> src/main.rs:13:13
    |
    13 | map.insert("piccadilly", 11826);
    | ^^^^^^ method not found in `RefMut<'_, _>`

    error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
    --> src/main.rs:14:13
    |
    14 | map.insert("marbles", 38);
    | ^^^^^^ method not found in `RefMut<'_, _>`

    Some errors have detailed explanations: E0308, E0599.
    For more information about an error, try `rustc --explain E0308`.
    error: could not compile `playground` due to 5 previous errors
    +
       Compiling playground v0.0.1 (/playground)
    error[E0308]: mismatched types
    --> src/main.rs:10:34
    |
    10 | let mut map: RefMut<_> = shared_map.borrow_mut();
    | --------- ^^^^^^^^^^^^^^^^^^^^^^^ expected struct `RefMut`, found mutable reference
    | |
    | expected due to this
    |
    = note: expected struct `RefMut<'_, _>`
    found mutable reference `&mut Rc<RefCell<HashMap<_, _>>>`

    error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
    --> src/main.rs:11:13
    |
    11 | map.insert("africa", 92388);
    | ^^^^^^ method not found in `RefMut<'_, _>`

    error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
    --> src/main.rs:12:13
    |
    12 | map.insert("kyoto", 11837);
    | ^^^^^^ method not found in `RefMut<'_, _>`

    error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
    --> src/main.rs:13:13
    |
    13 | map.insert("piccadilly", 11826);
    | ^^^^^^ method not found in `RefMut<'_, _>`

    error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
    --> src/main.rs:14:13
    |
    14 | map.insert("marbles", 38);
    | ^^^^^^ method not found in `RefMut<'_, _>`

    Some errors have detailed explanations: E0308, E0599.
    For more information about an error, try `rustc --explain E0308`.
    error: could not compile `playground` due to 5 previous errors

    It might seem a bit ridiculous. However, I got to a point where the compiler suggested use std::borrow::BorrowMut; and it resulted in breaking parts of the code that worked previously. I think it may be a good idea to go over what is @@ -160,14 +160,14 @@ method. OK, but how can we call it on the Rc<T>? Easily! I have not been able to find a lot on this trait. My guess is that it provides a method instead of a syntactic sugar (&mut x) for the mutable borrow. And also it provides default implementations for the types:

    -
    impl BorrowMut<str> for String

    impl<T> BorrowMut<T> for &mut T
    where
    T: ?Sized,

    impl<T> BorrowMut<T> for T
    where
    T: ?Sized,

    impl<T, A> BorrowMut<[T]> for Vec<T, A>
    where
    A: Allocator,

    impl<T, A> BorrowMut<T> for Box<T, A>
    where
    A: Allocator,
    T: ?Sized,

    impl<T, const N: usize> BorrowMut<[T]> for [T; N]
    +
    impl BorrowMut<str> for String

    impl<T> BorrowMut<T> for &mut T
    where
    T: ?Sized,

    impl<T> BorrowMut<T> for T
    where
    T: ?Sized,

    impl<T, A> BorrowMut<[T]> for Vec<T, A>
    where
    A: Allocator,

    impl<T, A> BorrowMut<T> for Box<T, A>
    where
    A: Allocator,
    T: ?Sized,

    impl<T, const N: usize> BorrowMut<[T]> for [T; N]
    Conflict

    Now the question is why did it break the code… My first take was that the type Rc<RefCell<T>> has some specialized implementation of the .borrow_mut() and the use overrides it with the default, which is true in a sense. However there is no specialized implementation. Let's have a look at the trait and the type signature on the RefCell<T>:

    -
    // trait
    pub trait BorrowMut<Borrowed>: Borrow<Borrowed>
    where
    Borrowed: ?Sized,
    {
    fn borrow_mut(&mut self) -> &mut Borrowed;
    }

    // ‹RefCell<T>.borrow_mut()› type signature
    pub fn borrow_mut(&self) -> RefMut<'_, T>
    +
    // trait
    pub trait BorrowMut<Borrowed>: Borrow<Borrowed>
    where
    Borrowed: ?Sized,
    {
    fn borrow_mut(&mut self) -> &mut Borrowed;
    }

    // ‹RefCell<T>.borrow_mut()› type signature
    pub fn borrow_mut(&self) -> RefMut<'_, T>

    I think that we can definitely agree on the fact that RefMut<'_, T> is not the RefCell<T>.

    In my opinion, RefCell<T> implements a separate .borrow_mut() rather @@ -194,7 +194,7 @@ as:

    that you can use the macro machinery to save yourself some typing. If you have enumeration of which the default value doesn't bear any parameter, you can just do2:

    -
    #[derive(Default)]
    enum Color {
    #[default]
    White,
    Gray,
    Black,
    }
    +
    #[derive(Default)]
    enum Color {
    #[default]
    White,
    Gray,
    Black,
    }

    Abusing negation

    If you want to use a unary minus operator on your own type, you can implement a Neg trait3. I was dealing with a binary tree and needed a way how to look diff --git a/blog/aoc-2022/4th-week/index.html b/blog/aoc-2022/4th-week/index.html index 3c69b45..7458212 100644 --- a/blog/aoc-2022/4th-week/index.html +++ b/blog/aoc-2022/4th-week/index.html @@ -14,8 +14,8 @@ - - + +

    4th week of Advent of Code '22 in Rust

    · 16 min read
    Matej Focko

    Let's go through the fourth week of Advent of Code in Rust.

    @@ -35,17 +35,17 @@ each row and column to determine the boundaries, it was very easy to do for the rows (cause each row is a Vec element), but not for the columns, since they span multiple rows.

    For this use case I have implemented my own column iterator:

    -
    pub struct ColumnIterator<'a, T> {
    map: &'a [Vec<T>],
    column: usize,

    i: usize,
    }

    impl<'a, T> ColumnIterator<'a, T> {
    pub fn new(map: &'a [Vec<T>], column: usize) -> ColumnIterator<'a, T> {
    Self { map, column, i: 0 }
    }
    }

    impl<'a, T> Iterator for ColumnIterator<'a, T> {
    type Item = &'a T;

    fn next(&mut self) -> Option<Self::Item> {
    if self.i >= self.map.len() {
    return None;
    }

    self.i += 1;
    Some(&self.map[self.i - 1][self.column])
    }
    }
    +
    pub struct ColumnIterator<'a, T> {
    map: &'a [Vec<T>],
    column: usize,

    i: usize,
    }

    impl<'a, T> ColumnIterator<'a, T> {
    pub fn new(map: &'a [Vec<T>], column: usize) -> ColumnIterator<'a, T> {
    Self { map, column, i: 0 }
    }
    }

    impl<'a, T> Iterator for ColumnIterator<'a, T> {
    type Item = &'a T;

    fn next(&mut self) -> Option<Self::Item> {
    if self.i >= self.map.len() {
    return None;
    }

    self.i += 1;
    Some(&self.map[self.i - 1][self.column])
    }
    }

    Given this piece of an iterator, it is very easy to factor out the common functionality between the rows and columns into:

    -
    let mut find_boundaries = |constructor: fn(usize) -> Orientation,
    iterator: &mut dyn Iterator<Item = &char>,
    upper_bound,
    i| {
    let mut first_non_empty = iterator.enumerate().skip_while(|&(_, &c)| c == ' ');
    let start = first_non_empty.next().unwrap().0 as isize;

    let mut last_non_empty = first_non_empty.skip_while(|&(_, &c)| c != ' ');
    let end = last_non_empty.next().unwrap_or((upper_bound, &'_')).0 as isize;

    boundaries.insert(constructor(i), start..end);
    };
    +
    let mut find_boundaries = |constructor: fn(usize) -> Orientation,
    iterator: &mut dyn Iterator<Item = &char>,
    upper_bound,
    i| {
    let mut first_non_empty = iterator.enumerate().skip_while(|&(_, &c)| c == ' ');
    let start = first_non_empty.next().unwrap().0 as isize;

    let mut last_non_empty = first_non_empty.skip_while(|&(_, &c)| c != ' ');
    let end = last_non_empty.next().unwrap_or((upper_bound, &'_')).0 as isize;

    boundaries.insert(constructor(i), start..end);
    };

    And then use it as such:

    -
    // construct all horizontal boundaries
    (0..map.len()).for_each(|row| {
    find_boundaries(
    Orientation::horizontal,
    &mut map[row].iter(),
    map[row].len(),
    row,
    );
    });

    // construct all vertical boundaries
    (0..map[0].len()).for_each(|col| {
    find_boundaries(
    Orientation::vertical,
    &mut ColumnIterator::new(&map, col),
    map.len(),
    col,
    );
    });
    +
    // construct all horizontal boundaries
    (0..map.len()).for_each(|row| {
    find_boundaries(
    Orientation::horizontal,
    &mut map[row].iter(),
    map[row].len(),
    row,
    );
    });

    // construct all vertical boundaries
    (0..map[0].len()).for_each(|col| {
    find_boundaries(
    Orientation::vertical,
    &mut ColumnIterator::new(&map, col),
    map.len(),
    col,
    );
    });

    Walking around the map

    Once the 2nd part got introduced, you start to think about a way how not to copy-paste a lot of stuff (I haven't avoided it anyways…). In this problem, I've chosen to introduce a trait (i.e. interface) for 2D and 3D walker.

    -
    trait Wrap: Clone {
    type State;

    // simulation
    fn is_blocked(&self) -> bool;
    fn step(&mut self, steps: isize);
    fn turn_left(&mut self);
    fn turn_right(&mut self);

    // movement
    fn next(&self) -> (Self::State, Direction);

    // final answer
    fn answer(&self) -> Output;
    }
    +
    trait Wrap: Clone {
    type State;

    // simulation
    fn is_blocked(&self) -> bool;
    fn step(&mut self, steps: isize);
    fn turn_left(&mut self);
    fn turn_right(&mut self);

    // movement
    fn next(&self) -> (Self::State, Direction);

    // final answer
    fn answer(&self) -> Output;
    }

    Each walker maintains its own state and also provides the functions that are used during the simulation. The “promised” methods are separated into:

      @@ -57,7 +57,7 @@ implementation-specific walker

    Both 2D and 3D versions borrow the original input and therefore you must annotate the lifetime of it:

    -
    struct Wrap2D<'a> {
    input: &'a Input,
    position: Position,
    direction: Direction,
    }
    impl<'a> Wrap2D<'a> {
    fn new(input: &'a Input) -> Wrap2D<'a> {
    // …
    +
    struct Wrap2D<'a> {
    input: &'a Input,
    position: Position,
    direction: Direction,
    }
    impl<'a> Wrap2D<'a> {
    fn new(input: &'a Input) -> Wrap2D<'a> {
    // …

    Problems

    I have used a lot of closures for this problem and once I introduced a parameter that was of unknown type (apart from the fact it implements a specific trait), I @@ -74,13 +74,13 @@ of rather smart suggestions.

    char was the .is_digit() function that takes a radix as a parameter. Clippy noticed that I use radix = 10 and suggested switching to .is_ascii_digit() that does exactly the same thing:

    -
    -                .take_while(|c| c.is_digit(10))
    + .take_while(|c| c.is_ascii_digit())
    +
    -                .take_while(|c| c.is_digit(10))
    + .take_while(|c| c.is_ascii_digit())

    Another useful suggestion appeared when working with the iterators and I wanted to get the nn-th element from it. You know the .skip(), you know the .next(), just “slap” them together and we're done for 😁 Well, I got suggested to use .nth() that does exactly the combination of the two mentioned methods on iterators:

    -
    -            match it.clone().skip(skip).next().unwrap() {
    + match it.clone().nth(skip).unwrap() {
    +
    -            match it.clone().skip(skip).next().unwrap() {
    + match it.clone().nth(skip).unwrap() {

    Day 23: Unstable Diffusion

    tl;dr

    Simulating movement of elves around with a set of specific rules.

    Solution

    @@ -93,20 +93,20 @@ minimum that are, of course, exactly the same except for initial values and comparators, it looks like a rather simple fix, but typing in Rust is something else, right? In the end I settled for a function that computes both boundaries without any duplication while using a closure:

    -
    fn get_bounds(positions: &Input) -> (Vector2D<isize>, Vector2D<isize>) {
    let f = |init, cmp: &dyn Fn(isize, isize) -> isize| {
    positions
    .iter()
    .fold(Vector2D::new(init, init), |acc, elf| {
    Vector2D::new(cmp(acc.x(), elf.x()), cmp(acc.y(), elf.y()))
    })
    };

    (f(isize::MAX, &min::<isize>), f(isize::MIN, &max::<isize>))
    }
    +
    fn get_bounds(positions: &Input) -> (Vector2D<isize>, Vector2D<isize>) {
    let f = |init, cmp: &dyn Fn(isize, isize) -> isize| {
    positions
    .iter()
    .fold(Vector2D::new(init, init), |acc, elf| {
    Vector2D::new(cmp(acc.x(), elf.x()), cmp(acc.y(), elf.y()))
    })
    };

    (f(isize::MAX, &min::<isize>), f(isize::MIN, &max::<isize>))
    }

    This function returns a pair of 2D vectors that represent opposite points of the bounding rectangle of all elves.

    You might ask why would we need a closure and the answer is that positions cannot be captured from within the nested function, only via closure. One more fun fact on top of that is the type of the comparator

    -
    &dyn Fn(isize, isize) -> isize
    +
    &dyn Fn(isize, isize) -> isize

    Once we remove the dyn keyword, compiler yells at us and also includes a way how to get a more thorough explanation of the error by running

    $ rustc --explain E0782

    which shows us

    Trait objects must include the dyn keyword.

    Erroneous code example:

    -
    trait Foo {}
    fn test(arg: Box<Foo>) {} // error!
    +
    trait Foo {}
    fn test(arg: Box<Foo>) {} // error!

    Trait objects are a way to call methods on types that are not known until runtime but conform to some trait.

    Trait objects should be formed with Box<dyn Foo>, but in the code above @@ -114,7 +114,7 @@ runtime but conform to some trait.

    This makes it harder to see that arg is a trait object and not a simply a heap allocated type called Foo.

    To fix this issue, add dyn before the trait name.

    -
    trait Foo {}
    fn test(arg: Box<dyn Foo>) {} // ok!
    +
    trait Foo {}
    fn test(arg: Box<dyn Foo>) {} // ok!

    This used to be allowed before edition 2021, but is now an error.

    Rant

    Not all of the explanations are helpful though, in some cases they might be even more confusing than helpful, since they address very simple use cases.

    As you can see, even in this case there are two sides to the explanations:

      @@ -157,7 +157,7 @@ cleaned it up a bit. The changed version is shown here and the original was just more verbose.

    I'll skip the boring parts of checking bounds and entry/exit of the basin 😉 We can easily calculate positions of the blizzards using a modular arithmetics:

    -
    impl Index<Position> for Basin {
    type Output = char;

    fn index(&self, index: Position) -> &Self::Output {
    // ‹skipped boring parts›

    // We need to account for the loops of the blizzards
    let width = self.cols - 2;
    let height = self.rows - 2;

    let blizzard_origin = |size, d, t, i| ((i - 1 + size + d * (t % size)) % size + 1) as usize;
    [
    (
    index.y() as usize,
    blizzard_origin(width, -1, index.z(), index.x()),
    '>',
    ),
    (
    index.y() as usize,
    blizzard_origin(width, 1, index.z(), index.x()),
    '<',
    ),
    (
    blizzard_origin(height, -1, index.z(), index.y()),
    index.x() as usize,
    'v',
    ),
    (
    blizzard_origin(height, 1, index.z(), index.y()),
    index.x() as usize,
    '^',
    ),
    ]
    .iter()
    .find_map(|&(y, x, direction)| {
    if self.map[y][x] == direction {
    Some(&self.map[y][x])
    } else {
    None
    }
    })
    .unwrap_or(&'.')
    }
    }
    +
    impl Index<Position> for Basin {
    type Output = char;

    fn index(&self, index: Position) -> &Self::Output {
    // ‹skipped boring parts›

    // We need to account for the loops of the blizzards
    let width = self.cols - 2;
    let height = self.rows - 2;

    let blizzard_origin = |size, d, t, i| ((i - 1 + size + d * (t % size)) % size + 1) as usize;
    [
    (
    index.y() as usize,
    blizzard_origin(width, -1, index.z(), index.x()),
    '>',
    ),
    (
    index.y() as usize,
    blizzard_origin(width, 1, index.z(), index.x()),
    '<',
    ),
    (
    blizzard_origin(height, -1, index.z(), index.y()),
    index.x() as usize,
    'v',
    ),
    (
    blizzard_origin(height, 1, index.z(), index.y()),
    index.x() as usize,
    '^',
    ),
    ]
    .iter()
    .find_map(|&(y, x, direction)| {
    if self.map[y][x] == direction {
    Some(&self.map[y][x])
    } else {
    None
    }
    })
    .unwrap_or(&'.')
    }
    }

    As you can see, there is an expression for calculating the original position and it's used multiple times, so why not take it out to a lambda, right? 😉

    I couldn't get the rustfmt to format the for-loop nicely, so I've just @@ -175,10 +175,10 @@ algorithm, since it better reflects the cost function.

    a priority for the queue.

    Whereas with the A*, I have chosen to use both time and Manhattan distance that promotes vertices closer to the exit and with a minimum time taken.

    Cost function is, of course, a closure 😉

    -
    let cost = |p: Position| p.z() as usize + exit.y().abs_diff(p.y()) + exit.x().abs_diff(p.x());
    +
    let cost = |p: Position| p.z() as usize + exit.y().abs_diff(p.y()) + exit.x().abs_diff(p.x());

    And also for checking the possible moves from the current vertex, I have implemented, yet another, closure that yields an iterator with the next moves:

    -
    let next_positions = |p| {
    [(0, 0, 1), (0, -1, 1), (0, 1, 1), (-1, 0, 1), (1, 0, 1)]
    .iter()
    .filter_map(move |&(x, y, t)| {
    let next_p = p + Vector3D::new(x, y, t);

    if basin[next_p] == '.' {
    Some(next_p)
    } else {
    None
    }
    })
    };
    +
    let next_positions = |p| {
    [(0, 0, 1), (0, -1, 1), (0, 1, 1), (-1, 0, 1), (1, 0, 1)]
    .iter()
    .filter_map(move |&(x, y, t)| {
    let next_p = p + Vector3D::new(x, y, t);

    if basin[next_p] == '.' {
    Some(next_p)
    } else {
    None
    }
    })
    };

    Min-heap

    In this case I had a need to use the priority queue taking the elements with the lowest cost as the prioritized ones. Rust only offers you the BinaryHeap and @@ -188,7 +188,7 @@ the BinaryHeap). However the wrapping affects the type of the h popping the most prioritized elements yields values wrapped in the Reverse.

    For this purpose I have just taken the max-heap and wrapped it as a whole in a separate structure providing just the desired methods:

    -
    use std::cmp::{Ord, Reverse};
    use std::collections::BinaryHeap;

    pub struct MinHeap<T> {
    heap: BinaryHeap<Reverse<T>>,
    }

    impl<T: Ord> MinHeap<T> {
    pub fn new() -> MinHeap<T> {
    MinHeap {
    heap: BinaryHeap::new(),
    }
    }

    pub fn push(&mut self, item: T) {
    self.heap.push(Reverse(item))
    }

    pub fn pop(&mut self) -> Option<T> {
    self.heap.pop().map(|Reverse(x)| x)
    }
    }

    impl<T: Ord> Default for MinHeap<T> {
    fn default() -> Self {
    Self::new()
    }
    }
    +
    use std::cmp::{Ord, Reverse};
    use std::collections::BinaryHeap;

    pub struct MinHeap<T> {
    heap: BinaryHeap<Reverse<T>>,
    }

    impl<T: Ord> MinHeap<T> {
    pub fn new() -> MinHeap<T> {
    MinHeap {
    heap: BinaryHeap::new(),
    }
    }

    pub fn push(&mut self, item: T) {
    self.heap.push(Reverse(item))
    }

    pub fn pop(&mut self) -> Option<T> {
    self.heap.pop().map(|Reverse(x)| x)
    }
    }

    impl<T: Ord> Default for MinHeap<T> {
    fn default() -> Self {
    Self::new()
    }
    }

    Rest is just the algorithm implementation which is not that interesting.

    Day 25: Full of Hot Air

    tl;dr

    Playing around with a numbers in a special base.

    @@ -205,7 +205,7 @@ with a rather easy solution, as the last day always seems to be.

    that sounds familiar, doesn't it? Let's introduce a structure for the SNAFU numbers and implement the traits that we need.

    Let's start with a structure:

    -
    #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
    struct SNAFU {
    value: i64,
    }
    +
    #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
    struct SNAFU {
    value: i64,
    }

    Converting from &str

    We will start by implementing the FromStr trait that will help us parse our input. This is rather simple, I can just take the from_snafu function, copy-paste it @@ -224,13 +224,13 @@ trait for the SNAFU.

    After those changes we need to adjust the code and tests.

    Parsing of the input is very easy, before we have used the lines, now we parse everything:

    -
         fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
    - file_to_lines(pathname)
    + file_to_structs(pathname)
    }
    +
         fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
    - file_to_lines(pathname)
    + file_to_structs(pathname)
    }

    Part 1 needs to be adjusted a bit too:

    -
         fn part_1(input: &Input) -> Output {
    - to_snafu(input.iter().map(|s| from_snafu(s)).sum())
    + SNAFU::from(input.iter().map(|s| s.value).sum::<i64>()).to_string()
    }
    +
         fn part_1(input: &Input) -> Output {
    - to_snafu(input.iter().map(|s| from_snafu(s)).sum())
    + SNAFU::from(input.iter().map(|s| s.value).sum::<i64>()).to_string()
    }

    You can also see that it simplifies the meaning a bit and it is more explicit than the previous versions.

    And for the tests:

    -
         #[test]
    fn test_from() {
    - for (n, s) in EXAMPLES.iter() {
    - assert_eq!(from_snafu(s), *n);
    + for (&n, s) in EXAMPLES.iter() {
    + assert_eq!(s.parse::<SNAFU>().unwrap().value, n);
    }
    }

    #[test]
    fn test_to() {
    - for (n, s) in EXAMPLES.iter() {
    - assert_eq!(to_snafu(*n), s.to_string());
    + for (&n, s) in EXAMPLES.iter() {
    + assert_eq!(SNAFU::from(n).to_string(), s.to_string());
    }
    +
         #[test]
    fn test_from() {
    - for (n, s) in EXAMPLES.iter() {
    - assert_eq!(from_snafu(s), *n);
    + for (&n, s) in EXAMPLES.iter() {
    + assert_eq!(s.parse::<SNAFU>().unwrap().value, n);
    }
    }

    #[test]
    fn test_to() {
    - for (n, s) in EXAMPLES.iter() {
    - assert_eq!(to_snafu(*n), s.to_string());
    + for (&n, s) in EXAMPLES.iter() {
    + assert_eq!(SNAFU::from(n).to_string(), s.to_string());
    }

    Summary

    Let's wrap the whole thing up! Keeping in mind both AoC and the Rust…

    Finished advent calendar :smile:

    diff --git a/blog/aoc-2022/intro/index.html b/blog/aoc-2022/intro/index.html index 11e080e..7bcb70b 100644 --- a/blog/aoc-2022/intro/index.html +++ b/blog/aoc-2022/intro/index.html @@ -14,8 +14,8 @@ - - + +

    Advent of Code '22 in Rust

    · 9 min read
    Matej Focko

    Let's talk about the preparations for this year's Advent of Code.

    @@ -50,15 +50,15 @@ problems in it. However the toolkit is questionable :/

    with rust-analyzer. Because of my choice of libraries, we will also introduce a .envrc file that can be used by direnv, which allows you to set specific environment variables when you enter a directory. In our case, we will use

    -
    # to show nice backtrace when using the color-eyre
    export RUST_BACKTRACE=1

    # to catch logs generated by tracing
    export RUST_LOG=trace
    +
    # to show nice backtrace when using the color-eyre
    export RUST_BACKTRACE=1

    # to catch logs generated by tracing
    export RUST_LOG=trace

    And for the one of the most obnoxious things ever, we will use a script to download the inputs instead of “clicking, opening and copying to a file1. There is no need to be fancy, so we will adjust Python script by Martin2.

    -
    #!/usr/bin/env python3

    import datetime
    import yaml
    import requests
    import sys


    def load_config():
    with open("env.yaml", "r") as f:
    js = yaml.load(f, Loader=yaml.Loader)
    return js["session"], js["year"]


    def get_input(session, year, day):
    return requests.get(
    f"https://adventofcode.com/{year}/day/{day}/input",
    cookies={"session": session},
    headers={
    "User-Agent": "{repo} by {mail}".format(
    repo="gitlab.com/mfocko/advent-of-code-2022",
    mail="me@mfocko.xyz",
    )
    },
    ).content.decode("utf-8")


    def main():
    day = datetime.datetime.now().day
    if len(sys.argv) == 2:
    day = sys.argv[1]

    session, year = load_config()
    problem_input = get_input(session, year, day)

    with open(f"./inputs/day{day:>02}.txt", "w") as f:
    f.write(problem_input)


    if __name__ == "__main__":
    main()
    +
    #!/usr/bin/env python3

    import datetime
    import yaml
    import requests
    import sys


    def load_config():
    with open("env.yaml", "r") as f:
    js = yaml.load(f, Loader=yaml.Loader)
    return js["session"], js["year"]


    def get_input(session, year, day):
    return requests.get(
    f"https://adventofcode.com/{year}/day/{day}/input",
    cookies={"session": session},
    headers={
    "User-Agent": "{repo} by {mail}".format(
    repo="gitlab.com/mfocko/advent-of-code-2022",
    mail="me@mfocko.xyz",
    )
    },
    ).content.decode("utf-8")


    def main():
    day = datetime.datetime.now().day
    if len(sys.argv) == 2:
    day = sys.argv[1]

    session, year = load_config()
    problem_input = get_input(session, year, day)

    with open(f"./inputs/day{day:>02}.txt", "w") as f:
    f.write(problem_input)


    if __name__ == "__main__":
    main()

    If the script is called without any arguments, it will deduce the day from the system, so we do not need to change the day every morning. It also requires a configuration file:

    -
    # env.yaml
    session: ‹your session cookie›
    year: 2022
    +
    # env.yaml
    session: ‹your session cookie›
    year: 2022

    Libraries

    Looking at the list of the libraries, I have chosen “a lot” of them. Let's walk through each of them.

    @@ -87,7 +87,7 @@ also we can follow KISS. I have 2 modules that my “library” exports parsing and one for 2D vector (that gets used quite often during Advent of Code).

    Key part is, of course, processing the input and my library exports following functions that get used a lot:

    -
    /// Reads file to the string.
    pub fn file_to_string<P: AsRef<Path>>(pathname: P) -> String;

    /// Reads file and returns it as a vector of characters.
    pub fn file_to_chars<P: AsRef<Path>>(pathname: P) -> Vec<char>;

    /// Reads file and returns a vector of parsed structures. Expects each structure
    /// on its own line in the file. And `T` needs to implement `FromStr` trait.
    pub fn file_to_structs<P: AsRef<Path>, T: FromStr>(pathname: P) -> Vec<T>
    where
    <T as FromStr>::Err: Debug;

    /// Converts iterator over strings to a vector of parsed structures. `T` needs
    /// to implement `FromStr` trait and its error must derive `Debug` trait.
    pub fn strings_to_structs<T: FromStr, U>(
    iter: impl Iterator<Item = U>
    ) -> Vec<T>
    where
    <T as std::str::FromStr>::Err: std::fmt::Debug,
    U: Deref<Target = str>;

    /// Reads file and returns it as a vector of its lines.
    pub fn file_to_lines<P: AsRef<Path>>(pathname: P) -> Vec<String>;
    +
    /// Reads file to the string.
    pub fn file_to_string<P: AsRef<Path>>(pathname: P) -> String;

    /// Reads file and returns it as a vector of characters.
    pub fn file_to_chars<P: AsRef<Path>>(pathname: P) -> Vec<char>;

    /// Reads file and returns a vector of parsed structures. Expects each structure
    /// on its own line in the file. And `T` needs to implement `FromStr` trait.
    pub fn file_to_structs<P: AsRef<Path>, T: FromStr>(pathname: P) -> Vec<T>
    where
    <T as FromStr>::Err: Debug;

    /// Converts iterator over strings to a vector of parsed structures. `T` needs
    /// to implement `FromStr` trait and its error must derive `Debug` trait.
    pub fn strings_to_structs<T: FromStr, U>(
    iter: impl Iterator<Item = U>
    ) -> Vec<T>
    where
    <T as std::str::FromStr>::Err: std::fmt::Debug,
    U: Deref<Target = str>;

    /// Reads file and returns it as a vector of its lines.
    pub fn file_to_lines<P: AsRef<Path>>(pathname: P) -> Vec<String>;

    As for the vector, I went with a rather simple implementation that allows only addition of the vectors for now and accessing the elements via functions x() and y(). Also the vector is generic, so we can use it with any numeric type we @@ -96,36 +96,36 @@ need.

    We can also prepare a template to quickly bootstrap each of the days. We know that each puzzle has 2 parts, which means that we can start with 2 functions that will solve them.

    -
    fn part1(input: &Input) -> Output {
    todo!()
    }

    fn part2(input: &Input) -> Output {
    todo!()
    }
    +
    fn part1(input: &Input) -> Output {
    todo!()
    }

    fn part2(input: &Input) -> Output {
    todo!()
    }

    Both functions take reference to the input and return some output (in majority of puzzles, it is the same type). todo!() can be used as a nice placeholder, it also causes a panic when reached and we could also provide some string with an explanation, e.g. todo!("part 1"). We have not given functions a specific type and to avoid as much copy-paste as possible, we will introduce type aliases.

    -
    type Input = String;
    type Output = i32;
    +
    type Input = String;
    type Output = i32;
    tip

    This allows us to quickly adjust the types only in one place without the need to do regex-replace or replace them manually.

    For each day we get a personalized input that is provided as a text file. Almost all the time, we would like to get some structured type out of that input, and therefore it makes sense to introduce a new function that will provide the parsing of the input.

    -
    fn parse_input(path: &str) -> Input {
    todo!()
    }
    +
    fn parse_input(path: &str) -> Input {
    todo!()
    }

    This “parser” will take a path to the file, just in case we would like to run the sample instead of input.

    OK, so now we can write a main function that will take all of the pieces and run them.

    -
    fn main() {
    let input = parse_input("inputs/dayXX.txt");

    println!("Part 1: {}", part_1(&input));
    println!("Part 2: {}", part_2(&input));
    }
    +
    fn main() {
    let input = parse_input("inputs/dayXX.txt");

    println!("Part 1: {}", part_1(&input));
    println!("Part 2: {}", part_2(&input));
    }

    This would definitely do :) But we have installed a few libraries and we want to use them. In this part we are going to utilize tracing (for tracing, duh…) and color-eyre (for better error reporting, e.g. from parsing).

    -
    fn main() -> Result<()> {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = parse_input("inputs/dayXX.txt");

    info!("Part 1: {}", part_1(&input));
    info!("Part 2: {}", part_2(&input));

    Ok(())
    }
    +
    fn main() -> Result<()> {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = parse_input("inputs/dayXX.txt");

    info!("Part 1: {}", part_1(&input));
    info!("Part 2: {}", part_2(&input));

    Ok(())
    }

    The first statement will set up tracing and configure it to print out the logs to terminal, based on the environment variable. We also change the formatting a bit, since we do not need all the fancy features of the logger. Pure initialization would get us logs like this:

    -
    2022-12-11T19:53:19.975343Z  INFO day01: Part 1: 0
    +
    2022-12-11T19:53:19.975343Z  INFO day01: Part 1: 0

    However after running that command, we will get the following:

    -
     INFO src/bin/day01.rs:35: Part 1: 0
    +
     INFO src/bin/day01.rs:35: Part 1: 0

    And the color_eyre::install()? is quite straightforward. We just initialize the error reporting by color eyre.

    caution

    Notice that we had to add Ok(()) to the end of the function and adjust the @@ -133,7 +133,7 @@ return type of the main to Result<()>. It is cau can be installed only once and therefore it can fail, that is how we got the ? at the end of the ::install which unwraps the »result« of the installation.

    Overall we will get to a template like this:

    -
    use aoc_2022::*;

    use color_eyre::eyre::Result;
    use tracing::info;
    use tracing_subscriber::EnvFilter;

    type Input = String;
    type Output = i32;

    fn parse_input(path: &str) -> Input {
    todo!()
    }

    fn part1(input: &Input) -> Output {
    todo!()
    }

    fn part2(input: &Input) -> Output {
    todo!()
    }

    fn main() -> Result<()> {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = parse_input("inputs/dayXX.txt");

    info!("Part 1: {}", part_1(&input));
    info!("Part 2: {}", part_2(&input));

    Ok(())
    }
    +
    use aoc_2022::*;

    use color_eyre::eyre::Result;
    use tracing::info;
    use tracing_subscriber::EnvFilter;

    type Input = String;
    type Output = i32;

    fn parse_input(path: &str) -> Input {
    todo!()
    }

    fn part1(input: &Input) -> Output {
    todo!()
    }

    fn part2(input: &Input) -> Output {
    todo!()
    }

    fn main() -> Result<()> {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = parse_input("inputs/dayXX.txt");

    info!("Part 1: {}", part_1(&input));
    info!("Part 2: {}", part_2(&input));

    Ok(())
    }

    Footnotes

    1. diff --git a/blog/archive/index.html b/blog/archive/index.html index 44a5989..ed02ea8 100644 --- a/blog/archive/index.html +++ b/blog/archive/index.html @@ -14,8 +14,8 @@ - - + + diff --git a/blog/atom.xml b/blog/atom.xml index e7b9f53..9ecf98c 100644 --- a/blog/atom.xml +++ b/blog/atom.xml @@ -35,7 +35,7 @@ very close to installing the desired package.

    So in shell you would do

    -
    # dnf copr enable ‹copr-repository›
    # dnf install ‹package-from-the-repository›
    +
    # dnf copr enable ‹copr-repository›
    # dnf install ‹package-from-the-repository›

    And… that's it! Nothing else needed! Simple, right? And literally same process as you would do for the PPA.

    AUR

    On the other hand, if you are familiar with the archLinux, you definitely know @@ -122,17 +122,17 @@ each row and column to determine the boundaries, it was very easy to do for the rows (cause each row is a Vec element), but not for the columns, since they span multiple rows.

    For this use case I have implemented my own column iterator:

    -
    pub struct ColumnIterator<'a, T> {
    map: &'a [Vec<T>],
    column: usize,

    i: usize,
    }

    impl<'a, T> ColumnIterator<'a, T> {
    pub fn new(map: &'a [Vec<T>], column: usize) -> ColumnIterator<'a, T> {
    Self { map, column, i: 0 }
    }
    }

    impl<'a, T> Iterator for ColumnIterator<'a, T> {
    type Item = &'a T;

    fn next(&mut self) -> Option<Self::Item> {
    if self.i >= self.map.len() {
    return None;
    }

    self.i += 1;
    Some(&self.map[self.i - 1][self.column])
    }
    }
    +
    pub struct ColumnIterator<'a, T> {
    map: &'a [Vec<T>],
    column: usize,

    i: usize,
    }

    impl<'a, T> ColumnIterator<'a, T> {
    pub fn new(map: &'a [Vec<T>], column: usize) -> ColumnIterator<'a, T> {
    Self { map, column, i: 0 }
    }
    }

    impl<'a, T> Iterator for ColumnIterator<'a, T> {
    type Item = &'a T;

    fn next(&mut self) -> Option<Self::Item> {
    if self.i >= self.map.len() {
    return None;
    }

    self.i += 1;
    Some(&self.map[self.i - 1][self.column])
    }
    }

    Given this piece of an iterator, it is very easy to factor out the common functionality between the rows and columns into:

    -
    let mut find_boundaries = |constructor: fn(usize) -> Orientation,
    iterator: &mut dyn Iterator<Item = &char>,
    upper_bound,
    i| {
    let mut first_non_empty = iterator.enumerate().skip_while(|&(_, &c)| c == ' ');
    let start = first_non_empty.next().unwrap().0 as isize;

    let mut last_non_empty = first_non_empty.skip_while(|&(_, &c)| c != ' ');
    let end = last_non_empty.next().unwrap_or((upper_bound, &'_')).0 as isize;

    boundaries.insert(constructor(i), start..end);
    };
    +
    let mut find_boundaries = |constructor: fn(usize) -> Orientation,
    iterator: &mut dyn Iterator<Item = &char>,
    upper_bound,
    i| {
    let mut first_non_empty = iterator.enumerate().skip_while(|&(_, &c)| c == ' ');
    let start = first_non_empty.next().unwrap().0 as isize;

    let mut last_non_empty = first_non_empty.skip_while(|&(_, &c)| c != ' ');
    let end = last_non_empty.next().unwrap_or((upper_bound, &'_')).0 as isize;

    boundaries.insert(constructor(i), start..end);
    };

    And then use it as such:

    -
    // construct all horizontal boundaries
    (0..map.len()).for_each(|row| {
    find_boundaries(
    Orientation::horizontal,
    &mut map[row].iter(),
    map[row].len(),
    row,
    );
    });

    // construct all vertical boundaries
    (0..map[0].len()).for_each(|col| {
    find_boundaries(
    Orientation::vertical,
    &mut ColumnIterator::new(&map, col),
    map.len(),
    col,
    );
    });
    +
    // construct all horizontal boundaries
    (0..map.len()).for_each(|row| {
    find_boundaries(
    Orientation::horizontal,
    &mut map[row].iter(),
    map[row].len(),
    row,
    );
    });

    // construct all vertical boundaries
    (0..map[0].len()).for_each(|col| {
    find_boundaries(
    Orientation::vertical,
    &mut ColumnIterator::new(&map, col),
    map.len(),
    col,
    );
    });

    Walking around the map

    Once the 2nd part got introduced, you start to think about a way how not to copy-paste a lot of stuff (I haven't avoided it anyways…). In this problem, I've chosen to introduce a trait (i.e. interface) for 2D and 3D walker.

    -
    trait Wrap: Clone {
    type State;

    // simulation
    fn is_blocked(&self) -> bool;
    fn step(&mut self, steps: isize);
    fn turn_left(&mut self);
    fn turn_right(&mut self);

    // movement
    fn next(&self) -> (Self::State, Direction);

    // final answer
    fn answer(&self) -> Output;
    }
    +
    trait Wrap: Clone {
    type State;

    // simulation
    fn is_blocked(&self) -> bool;
    fn step(&mut self, steps: isize);
    fn turn_left(&mut self);
    fn turn_right(&mut self);

    // movement
    fn next(&self) -> (Self::State, Direction);

    // final answer
    fn answer(&self) -> Output;
    }

    Each walker maintains its own state and also provides the functions that are used during the simulation. The “promised” methods are separated into:

      @@ -144,7 +144,7 @@ implementation-specific walker

    Both 2D and 3D versions borrow the original input and therefore you must annotate the lifetime of it:

    -
    struct Wrap2D<'a> {
    input: &'a Input,
    position: Position,
    direction: Direction,
    }
    impl<'a> Wrap2D<'a> {
    fn new(input: &'a Input) -> Wrap2D<'a> {
    // …
    +
    struct Wrap2D<'a> {
    input: &'a Input,
    position: Position,
    direction: Direction,
    }
    impl<'a> Wrap2D<'a> {
    fn new(input: &'a Input) -> Wrap2D<'a> {
    // …

    Problems

    I have used a lot of closures for this problem and once I introduced a parameter that was of unknown type (apart from the fact it implements a specific trait), I @@ -161,13 +161,13 @@ of rather smart suggestions.

    char was the .is_digit() function that takes a radix as a parameter. Clippy noticed that I use radix = 10 and suggested switching to .is_ascii_digit() that does exactly the same thing:

    -
    -                .take_while(|c| c.is_digit(10))
    + .take_while(|c| c.is_ascii_digit())
    +
    -                .take_while(|c| c.is_digit(10))
    + .take_while(|c| c.is_ascii_digit())

    Another useful suggestion appeared when working with the iterators and I wanted to get the nn-th element from it. You know the .skip(), you know the .next(), just “slap” them together and we're done for 😁 Well, I got suggested to use .nth() that does exactly the combination of the two mentioned methods on iterators:

    -
    -            match it.clone().skip(skip).next().unwrap() {
    + match it.clone().nth(skip).unwrap() {
    +
    -            match it.clone().skip(skip).next().unwrap() {
    + match it.clone().nth(skip).unwrap() {

    Day 23: Unstable Diffusion

    tl;dr

    Simulating movement of elves around with a set of specific rules.

    Solution

    @@ -180,20 +180,20 @@ minimum that are, of course, exactly the same except for initial values and comparators, it looks like a rather simple fix, but typing in Rust is something else, right? In the end I settled for a function that computes both boundaries without any duplication while using a closure:

    -
    fn get_bounds(positions: &Input) -> (Vector2D<isize>, Vector2D<isize>) {
    let f = |init, cmp: &dyn Fn(isize, isize) -> isize| {
    positions
    .iter()
    .fold(Vector2D::new(init, init), |acc, elf| {
    Vector2D::new(cmp(acc.x(), elf.x()), cmp(acc.y(), elf.y()))
    })
    };

    (f(isize::MAX, &min::<isize>), f(isize::MIN, &max::<isize>))
    }
    +
    fn get_bounds(positions: &Input) -> (Vector2D<isize>, Vector2D<isize>) {
    let f = |init, cmp: &dyn Fn(isize, isize) -> isize| {
    positions
    .iter()
    .fold(Vector2D::new(init, init), |acc, elf| {
    Vector2D::new(cmp(acc.x(), elf.x()), cmp(acc.y(), elf.y()))
    })
    };

    (f(isize::MAX, &min::<isize>), f(isize::MIN, &max::<isize>))
    }

    This function returns a pair of 2D vectors that represent opposite points of the bounding rectangle of all elves.

    You might ask why would we need a closure and the answer is that positions cannot be captured from within the nested function, only via closure. One more fun fact on top of that is the type of the comparator

    -
    &dyn Fn(isize, isize) -> isize
    +
    &dyn Fn(isize, isize) -> isize

    Once we remove the dyn keyword, compiler yells at us and also includes a way how to get a more thorough explanation of the error by running

    $ rustc --explain E0782

    which shows us

    Trait objects must include the dyn keyword.

    Erroneous code example:

    -
    trait Foo {}
    fn test(arg: Box<Foo>) {} // error!
    +
    trait Foo {}
    fn test(arg: Box<Foo>) {} // error!

    Trait objects are a way to call methods on types that are not known until runtime but conform to some trait.

    Trait objects should be formed with Box<dyn Foo>, but in the code above @@ -201,7 +201,7 @@ runtime but conform to some trait.

    This makes it harder to see that arg is a trait object and not a simply a heap allocated type called Foo.

    To fix this issue, add dyn before the trait name.

    -
    trait Foo {}
    fn test(arg: Box<dyn Foo>) {} // ok!
    +
    trait Foo {}
    fn test(arg: Box<dyn Foo>) {} // ok!

    This used to be allowed before edition 2021, but is now an error.

    Rant

    Not all of the explanations are helpful though, in some cases they might be even more confusing than helpful, since they address very simple use cases.

    As you can see, even in this case there are two sides to the explanations:

      @@ -244,7 +244,7 @@ cleaned it up a bit. The changed version is shown here and the original was just more verbose.

    I'll skip the boring parts of checking bounds and entry/exit of the basin 😉 We can easily calculate positions of the blizzards using a modular arithmetics:

    -
    impl Index<Position> for Basin {
    type Output = char;

    fn index(&self, index: Position) -> &Self::Output {
    // ‹skipped boring parts›

    // We need to account for the loops of the blizzards
    let width = self.cols - 2;
    let height = self.rows - 2;

    let blizzard_origin = |size, d, t, i| ((i - 1 + size + d * (t % size)) % size + 1) as usize;
    [
    (
    index.y() as usize,
    blizzard_origin(width, -1, index.z(), index.x()),
    '>',
    ),
    (
    index.y() as usize,
    blizzard_origin(width, 1, index.z(), index.x()),
    '<',
    ),
    (
    blizzard_origin(height, -1, index.z(), index.y()),
    index.x() as usize,
    'v',
    ),
    (
    blizzard_origin(height, 1, index.z(), index.y()),
    index.x() as usize,
    '^',
    ),
    ]
    .iter()
    .find_map(|&(y, x, direction)| {
    if self.map[y][x] == direction {
    Some(&self.map[y][x])
    } else {
    None
    }
    })
    .unwrap_or(&'.')
    }
    }
    +
    impl Index<Position> for Basin {
    type Output = char;

    fn index(&self, index: Position) -> &Self::Output {
    // ‹skipped boring parts›

    // We need to account for the loops of the blizzards
    let width = self.cols - 2;
    let height = self.rows - 2;

    let blizzard_origin = |size, d, t, i| ((i - 1 + size + d * (t % size)) % size + 1) as usize;
    [
    (
    index.y() as usize,
    blizzard_origin(width, -1, index.z(), index.x()),
    '>',
    ),
    (
    index.y() as usize,
    blizzard_origin(width, 1, index.z(), index.x()),
    '<',
    ),
    (
    blizzard_origin(height, -1, index.z(), index.y()),
    index.x() as usize,
    'v',
    ),
    (
    blizzard_origin(height, 1, index.z(), index.y()),
    index.x() as usize,
    '^',
    ),
    ]
    .iter()
    .find_map(|&(y, x, direction)| {
    if self.map[y][x] == direction {
    Some(&self.map[y][x])
    } else {
    None
    }
    })
    .unwrap_or(&'.')
    }
    }

    As you can see, there is an expression for calculating the original position and it's used multiple times, so why not take it out to a lambda, right? 😉

    I couldn't get the rustfmt to format the for-loop nicely, so I've just @@ -262,10 +262,10 @@ algorithm, since it better reflects the cost function.

    a priority for the queue.

    Whereas with the A*, I have chosen to use both time and Manhattan distance that promotes vertices closer to the exit and with a minimum time taken.

    Cost function is, of course, a closure 😉

    -
    let cost = |p: Position| p.z() as usize + exit.y().abs_diff(p.y()) + exit.x().abs_diff(p.x());
    +
    let cost = |p: Position| p.z() as usize + exit.y().abs_diff(p.y()) + exit.x().abs_diff(p.x());

    And also for checking the possible moves from the current vertex, I have implemented, yet another, closure that yields an iterator with the next moves:

    -
    let next_positions = |p| {
    [(0, 0, 1), (0, -1, 1), (0, 1, 1), (-1, 0, 1), (1, 0, 1)]
    .iter()
    .filter_map(move |&(x, y, t)| {
    let next_p = p + Vector3D::new(x, y, t);

    if basin[next_p] == '.' {
    Some(next_p)
    } else {
    None
    }
    })
    };
    +
    let next_positions = |p| {
    [(0, 0, 1), (0, -1, 1), (0, 1, 1), (-1, 0, 1), (1, 0, 1)]
    .iter()
    .filter_map(move |&(x, y, t)| {
    let next_p = p + Vector3D::new(x, y, t);

    if basin[next_p] == '.' {
    Some(next_p)
    } else {
    None
    }
    })
    };

    Min-heap

    In this case I had a need to use the priority queue taking the elements with the lowest cost as the prioritized ones. Rust only offers you the BinaryHeap and @@ -275,7 +275,7 @@ the BinaryHeap). However the wrapping affects the type of the h popping the most prioritized elements yields values wrapped in the Reverse.

    For this purpose I have just taken the max-heap and wrapped it as a whole in a separate structure providing just the desired methods:

    -
    use std::cmp::{Ord, Reverse};
    use std::collections::BinaryHeap;

    pub struct MinHeap<T> {
    heap: BinaryHeap<Reverse<T>>,
    }

    impl<T: Ord> MinHeap<T> {
    pub fn new() -> MinHeap<T> {
    MinHeap {
    heap: BinaryHeap::new(),
    }
    }

    pub fn push(&mut self, item: T) {
    self.heap.push(Reverse(item))
    }

    pub fn pop(&mut self) -> Option<T> {
    self.heap.pop().map(|Reverse(x)| x)
    }
    }

    impl<T: Ord> Default for MinHeap<T> {
    fn default() -> Self {
    Self::new()
    }
    }
    +
    use std::cmp::{Ord, Reverse};
    use std::collections::BinaryHeap;

    pub struct MinHeap<T> {
    heap: BinaryHeap<Reverse<T>>,
    }

    impl<T: Ord> MinHeap<T> {
    pub fn new() -> MinHeap<T> {
    MinHeap {
    heap: BinaryHeap::new(),
    }
    }

    pub fn push(&mut self, item: T) {
    self.heap.push(Reverse(item))
    }

    pub fn pop(&mut self) -> Option<T> {
    self.heap.pop().map(|Reverse(x)| x)
    }
    }

    impl<T: Ord> Default for MinHeap<T> {
    fn default() -> Self {
    Self::new()
    }
    }

    Rest is just the algorithm implementation which is not that interesting.

    Day 25: Full of Hot Air

    tl;dr

    Playing around with a numbers in a special base.

    @@ -292,7 +292,7 @@ with a rather easy solution, as the last day always seems to be.

    that sounds familiar, doesn't it? Let's introduce a structure for the SNAFU numbers and implement the traits that we need.

    Let's start with a structure:

    -
    #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
    struct SNAFU {
    value: i64,
    }
    +
    #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
    struct SNAFU {
    value: i64,
    }

    Converting from &str

    We will start by implementing the FromStr trait that will help us parse our input. This is rather simple, I can just take the from_snafu function, copy-paste it @@ -311,13 +311,13 @@ trait for the SNAFU.

    After those changes we need to adjust the code and tests.

    Parsing of the input is very easy, before we have used the lines, now we parse everything:

    -
         fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
    - file_to_lines(pathname)
    + file_to_structs(pathname)
    }
    +
         fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
    - file_to_lines(pathname)
    + file_to_structs(pathname)
    }

    Part 1 needs to be adjusted a bit too:

    -
         fn part_1(input: &Input) -> Output {
    - to_snafu(input.iter().map(|s| from_snafu(s)).sum())
    + SNAFU::from(input.iter().map(|s| s.value).sum::<i64>()).to_string()
    }
    +
         fn part_1(input: &Input) -> Output {
    - to_snafu(input.iter().map(|s| from_snafu(s)).sum())
    + SNAFU::from(input.iter().map(|s| s.value).sum::<i64>()).to_string()
    }

    You can also see that it simplifies the meaning a bit and it is more explicit than the previous versions.

    And for the tests:

    -
         #[test]
    fn test_from() {
    - for (n, s) in EXAMPLES.iter() {
    - assert_eq!(from_snafu(s), *n);
    + for (&n, s) in EXAMPLES.iter() {
    + assert_eq!(s.parse::<SNAFU>().unwrap().value, n);
    }
    }

    #[test]
    fn test_to() {
    - for (n, s) in EXAMPLES.iter() {
    - assert_eq!(to_snafu(*n), s.to_string());
    + for (&n, s) in EXAMPLES.iter() {
    + assert_eq!(SNAFU::from(n).to_string(), s.to_string());
    }
    +
         #[test]
    fn test_from() {
    - for (n, s) in EXAMPLES.iter() {
    - assert_eq!(from_snafu(s), *n);
    + for (&n, s) in EXAMPLES.iter() {
    + assert_eq!(s.parse::<SNAFU>().unwrap().value, n);
    }
    }

    #[test]
    fn test_to() {
    - for (n, s) in EXAMPLES.iter() {
    - assert_eq!(to_snafu(*n), s.to_string());
    + for (&n, s) in EXAMPLES.iter() {
    + assert_eq!(SNAFU::from(n).to_string(), s.to_string());
    }

    Summary

    Let's wrap the whole thing up! Keeping in mind both AoC and the Rust…

    Finished advent calendar :smile:

    @@ -383,7 +383,7 @@ to implement the indexing in a graph, rather than explicitly access the underlying data structure.

    Here you can see a rather short snippet from the solution that allows you to “index” the graph:

    -
    impl Index<&str> for Graph {
    type Output = Vertex;

    fn index(&self, index: &str) -> &Self::Output {
    &self.g[index]
    }
    }
    +
    impl Index<&str> for Graph {
    type Output = Vertex;

    fn index(&self, index: &str) -> &Self::Output {
    &self.g[index]
    }
    }

    Cartesian product

    During the implementation I had to utilize Floyd-Warshall algorithm for finding the shortest path between pairs of vertices and utilized the iproduct! macro @@ -397,7 +397,7 @@ also makes it harder to evaluate algorithmically, since you need to check the different ways the work can be split.

    Being affected by functional programming brain damage™️, I have chosen to do this part by function that returns an iterator over the possible ways:

    -
    fn pairings(
    valves: &BTreeSet<String>,
    ) -> impl Iterator<Item = (BTreeSet<String>, BTreeSet<String>)> + '_ {
    let mapping = valves.iter().collect_vec();

    let max_mask = 1 << (valves.len() - 1);

    (0..max_mask).map(move |mask| {
    let mut elephant = BTreeSet::new();
    let mut human = BTreeSet::new();

    for (i, &v) in mapping.iter().enumerate() {
    if (mask & (1 << i)) == 0 {
    human.insert(v.clone());
    } else {
    elephant.insert(v.clone());
    }
    }

    (human, elephant)
    })
    }
    +
    fn pairings(
    valves: &BTreeSet<String>,
    ) -> impl Iterator<Item = (BTreeSet<String>, BTreeSet<String>)> + '_ {
    let mapping = valves.iter().collect_vec();

    let max_mask = 1 << (valves.len() - 1);

    (0..max_mask).map(move |mask| {
    let mut elephant = BTreeSet::new();
    let mut human = BTreeSet::new();

    for (i, &v) in mapping.iter().enumerate() {
    if (mask & (1 << i)) == 0 {
    human.insert(v.clone());
    } else {
    elephant.insert(v.clone());
    }
    }

    (human, elephant)
    })
    }

    Day 17: Pyroclastic Flow

    tl;dr

    Simulating an autonomous Tetris where pieces get affected by a series of jets of hot gas.

    @@ -408,7 +408,7 @@ hot gas.

    iterate through the positions that can actually collide with the wall or other piece.

    To get the desired behaviour, you can just compose few smaller functions:

    -
    fn occupied(shape: &[Vec<char>]) -> impl Iterator<Item = Position> + '_ {
    shape.iter().enumerate().flat_map(|(y, row)| {
    row.iter().enumerate().filter_map(move |(x, c)| {
    if c == &'#' {
    Some(Vector2D::new(x as isize, y as isize))
    } else {
    None
    }
    })
    })
    }
    +
    fn occupied(shape: &[Vec<char>]) -> impl Iterator<Item = Position> + '_ {
    shape.iter().enumerate().flat_map(|(y, row)| {
    row.iter().enumerate().filter_map(move |(x, c)| {
    if c == &'#' {
    Some(Vector2D::new(x as isize, y as isize))
    } else {
    None
    }
    })
    })
    }

    In the end, we get relative positions which we can adjust later when given the specific positions from iterator. You can see some interesting parts in this:

      @@ -427,7 +427,7 @@ and also unwraps the values from Some(…). jets that move our pieces around. Initially I have implemented my own infinite iterator that just yields the indices. It is a very simple, yet powerful, piece of code:

      -
      struct InfiniteIndex {
      size: usize,
      i: usize,
      }

      impl InfiniteIndex {
      fn new(size: usize) -> InfiniteIndex {
      InfiniteIndex { size, i: size - 1 }
      }
      }

      impl Iterator for InfiniteIndex {
      type Item = usize;

      fn next(&mut self) -> Option<Self::Item> {
      self.i = (self.i + 1) % self.size;
      Some(self.i)
      }
      }
      +
      struct InfiniteIndex {
      size: usize,
      i: usize,
      }

      impl InfiniteIndex {
      fn new(size: usize) -> InfiniteIndex {
      InfiniteIndex { size, i: size - 1 }
      }
      }

      impl Iterator for InfiniteIndex {
      type Item = usize;

      fn next(&mut self) -> Option<Self::Item> {
      self.i = (self.i + 1) % self.size;
      Some(self.i)
      }
      }

      However when I'm looking at the code now, it doesn't really make much sense… Guess what, we can use a built-in function that is implemented on iterators for that! The function is called .cycle()

      @@ -473,13 +473,13 @@ the Rc<RefCell<T>>. In the end I failed on wrong an a rather interesting issue with .borrow_mut() method being used on Rc<RefCell<T>>.

      .borrow_mut()

      Consider the following snippet of the code (taken from the documentation):

      -
      use std::cell::{RefCell, RefMut};
      use std::collections::HashMap;
      use std::rc::Rc;
      // use std::borrow::BorrowMut;

      fn main() {
      let shared_map: Rc<RefCell<_>> = Rc::new(RefCell::new(HashMap::new()));
      // Create a new block to limit the scope of the dynamic borrow
      {
      let mut map: RefMut<_> = shared_map.borrow_mut();
      map.insert("africa", 92388);
      map.insert("kyoto", 11837);
      map.insert("piccadilly", 11826);
      map.insert("marbles", 38);
      }

      // Note that if we had not let the previous borrow of the cache fall out
      // of scope then the subsequent borrow would cause a dynamic thread panic.
      // This is the major hazard of using `RefCell`.
      let total: i32 = shared_map.borrow().values().sum();
      println!("{total}");
      }
      +
      use std::cell::{RefCell, RefMut};
      use std::collections::HashMap;
      use std::rc::Rc;
      // use std::borrow::BorrowMut;

      fn main() {
      let shared_map: Rc<RefCell<_>> = Rc::new(RefCell::new(HashMap::new()));
      // Create a new block to limit the scope of the dynamic borrow
      {
      let mut map: RefMut<_> = shared_map.borrow_mut();
      map.insert("africa", 92388);
      map.insert("kyoto", 11837);
      map.insert("piccadilly", 11826);
      map.insert("marbles", 38);
      }

      // Note that if we had not let the previous borrow of the cache fall out
      // of scope then the subsequent borrow would cause a dynamic thread panic.
      // This is the major hazard of using `RefCell`.
      let total: i32 = shared_map.borrow().values().sum();
      println!("{total}");
      }

      We allocate a hash map on the heap and then in the inner block, we borrow it as a mutable reference, so that we can use it.

      note

      It is a very primitive example for Rc<RefCell<T>> and mutable borrow.

      If you uncomment the 4th line with use std::borrow::BorrowMut;, you cannot compile the code anymore, because of

      -
         Compiling playground v0.0.1 (/playground)
      error[E0308]: mismatched types
      --> src/main.rs:10:34
      |
      10 | let mut map: RefMut<_> = shared_map.borrow_mut();
      | --------- ^^^^^^^^^^^^^^^^^^^^^^^ expected struct `RefMut`, found mutable reference
      | |
      | expected due to this
      |
      = note: expected struct `RefMut<'_, _>`
      found mutable reference `&mut Rc<RefCell<HashMap<_, _>>>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:11:13
      |
      11 | map.insert("africa", 92388);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:12:13
      |
      12 | map.insert("kyoto", 11837);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:13:13
      |
      13 | map.insert("piccadilly", 11826);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:14:13
      |
      14 | map.insert("marbles", 38);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      Some errors have detailed explanations: E0308, E0599.
      For more information about an error, try `rustc --explain E0308`.
      error: could not compile `playground` due to 5 previous errors
      +
         Compiling playground v0.0.1 (/playground)
      error[E0308]: mismatched types
      --> src/main.rs:10:34
      |
      10 | let mut map: RefMut<_> = shared_map.borrow_mut();
      | --------- ^^^^^^^^^^^^^^^^^^^^^^^ expected struct `RefMut`, found mutable reference
      | |
      | expected due to this
      |
      = note: expected struct `RefMut<'_, _>`
      found mutable reference `&mut Rc<RefCell<HashMap<_, _>>>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:11:13
      |
      11 | map.insert("africa", 92388);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:12:13
      |
      12 | map.insert("kyoto", 11837);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:13:13
      |
      13 | map.insert("piccadilly", 11826);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:14:13
      |
      14 | map.insert("marbles", 38);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      Some errors have detailed explanations: E0308, E0599.
      For more information about an error, try `rustc --explain E0308`.
      error: could not compile `playground` due to 5 previous errors

      It might seem a bit ridiculous. However, I got to a point where the compiler suggested use std::borrow::BorrowMut; and it resulted in breaking parts of the code that worked previously. I think it may be a good idea to go over what is @@ -506,14 +506,14 @@ method. OK, but how can we call it on the Rc<T>? Easily! I have not been able to find a lot on this trait. My guess is that it provides a method instead of a syntactic sugar (&mut x) for the mutable borrow. And also it provides default implementations for the types:

      -
      impl BorrowMut<str> for String

      impl<T> BorrowMut<T> for &mut T
      where
      T: ?Sized,

      impl<T> BorrowMut<T> for T
      where
      T: ?Sized,

      impl<T, A> BorrowMut<[T]> for Vec<T, A>
      where
      A: Allocator,

      impl<T, A> BorrowMut<T> for Box<T, A>
      where
      A: Allocator,
      T: ?Sized,

      impl<T, const N: usize> BorrowMut<[T]> for [T; N]
      +
      impl BorrowMut<str> for String

      impl<T> BorrowMut<T> for &mut T
      where
      T: ?Sized,

      impl<T> BorrowMut<T> for T
      where
      T: ?Sized,

      impl<T, A> BorrowMut<[T]> for Vec<T, A>
      where
      A: Allocator,

      impl<T, A> BorrowMut<T> for Box<T, A>
      where
      A: Allocator,
      T: ?Sized,

      impl<T, const N: usize> BorrowMut<[T]> for [T; N]
      Conflict

      Now the question is why did it break the code… My first take was that the type Rc<RefCell<T>> has some specialized implementation of the .borrow_mut() and the use overrides it with the default, which is true in a sense. However there is no specialized implementation. Let's have a look at the trait and the type signature on the RefCell<T>:

      -
      // trait
      pub trait BorrowMut<Borrowed>: Borrow<Borrowed>
      where
      Borrowed: ?Sized,
      {
      fn borrow_mut(&mut self) -> &mut Borrowed;
      }

      // ‹RefCell<T>.borrow_mut()› type signature
      pub fn borrow_mut(&self) -> RefMut<'_, T>
      +
      // trait
      pub trait BorrowMut<Borrowed>: Borrow<Borrowed>
      where
      Borrowed: ?Sized,
      {
      fn borrow_mut(&mut self) -> &mut Borrowed;
      }

      // ‹RefCell<T>.borrow_mut()› type signature
      pub fn borrow_mut(&self) -> RefMut<'_, T>

      I think that we can definitely agree on the fact that RefMut<'_, T> is not the RefCell<T>.

      In my opinion, RefCell<T> implements a separate .borrow_mut() rather @@ -540,7 +540,7 @@ as:

      that you can use the macro machinery to save yourself some typing. If you have enumeration of which the default value doesn't bear any parameter, you can just do2:

      -
      #[derive(Default)]
      enum Color {
      #[default]
      White,
      Gray,
      Black,
      }
      +
      #[derive(Default)]
      enum Color {
      #[default]
      White,
      Gray,
      Black,
      }

      Abusing negation

      If you want to use a unary minus operator on your own type, you can implement a Neg trait3. I was dealing with a binary tree and needed a way how to look @@ -591,26 +591,26 @@ order and return the resulting matrix.

      Image describing the problem

      Skeleton and initial adjustments

      We are given the following skeleton for the C++ and the given challenge:

      -
      class Solution {
      public:
      vector<vector<int>> diagonalSort(vector<vector<int>>& mat) {

      }
      };
      +
      class Solution {
      public:
      vector<vector<int>> diagonalSort(vector<vector<int>>& mat) {

      }
      };

      The task is to sort the passed matrix diagonally and then return it. First of all, I don't like to solve this in a web browser, so we'll need to adjust it accordingly for running it locally. We'll start by including the vector header and using fully-qualified namespaces1 and also adding few tests:

      -
      #include <cassert>
      #include <vector>

      using matrix = std::vector<std::vector<int>>;

      class Solution {
      public:
      matrix diagonalSort(matrix& mat)
      {
      }
      };

      static void test_case_1()
      {
      // Input: mat = [[3,3,1,1],[2,2,1,2],[1,1,1,2]]
      // Output: [[1,1,1,1],[1,2,2,2],[1,2,3,3]]

      Solution s;
      assert((s.diagonalSort(std::vector { std::vector { 3, 3, 1, 1 },
      std::vector { 2, 2, 1, 2 },
      std::vector { 1, 1, 1, 2 } })
      == std::vector { std::vector { 1, 1, 1, 1 },
      std::vector { 1, 2, 2, 2 },
      std::vector { 1, 2, 3, 3 } }));
      }

      static void test_case_2()
      {
      // Input: mat =
      // [[11,25,66,1,69,7],[23,55,17,45,15,52],[75,31,36,44,58,8],[22,27,33,25,68,4],[84,28,14,11,5,50]]
      // Output:
      // [[5,17,4,1,52,7],[11,11,25,45,8,69],[14,23,25,44,58,15],[22,27,31,36,50,66],[84,28,75,33,55,68]]

      Solution s;
      assert((s.diagonalSort(std::vector { std::vector { 11, 25, 66, 1, 69, 7 },
      std::vector { 23, 55, 17, 45, 15, 52 },
      std::vector { 75, 31, 36, 44, 58, 8 },
      std::vector { 22, 27, 33, 25, 68, 4 },
      std::vector { 84, 28, 14, 11, 5, 50 } })
      == std::vector { std::vector { 5, 17, 4, 1, 52, 7 },
      std::vector { 11, 11, 25, 45, 8, 69 },
      std::vector { 14, 23, 25, 44, 58, 15 },
      std::vector { 22, 27, 31, 36, 50, 66 },
      std::vector { 84, 28, 75, 33, 55, 68 } }));
      }

      int main()
      {
      test_case_1();
      test_case_2();

      return 0;
      }
      +
      #include <cassert>
      #include <vector>

      using matrix = std::vector<std::vector<int>>;

      class Solution {
      public:
      matrix diagonalSort(matrix& mat)
      {
      }
      };

      static void test_case_1()
      {
      // Input: mat = [[3,3,1,1],[2,2,1,2],[1,1,1,2]]
      // Output: [[1,1,1,1],[1,2,2,2],[1,2,3,3]]

      Solution s;
      assert((s.diagonalSort(std::vector { std::vector { 3, 3, 1, 1 },
      std::vector { 2, 2, 1, 2 },
      std::vector { 1, 1, 1, 2 } })
      == std::vector { std::vector { 1, 1, 1, 1 },
      std::vector { 1, 2, 2, 2 },
      std::vector { 1, 2, 3, 3 } }));
      }

      static void test_case_2()
      {
      // Input: mat =
      // [[11,25,66,1,69,7],[23,55,17,45,15,52],[75,31,36,44,58,8],[22,27,33,25,68,4],[84,28,14,11,5,50]]
      // Output:
      // [[5,17,4,1,52,7],[11,11,25,45,8,69],[14,23,25,44,58,15],[22,27,31,36,50,66],[84,28,75,33,55,68]]

      Solution s;
      assert((s.diagonalSort(std::vector { std::vector { 11, 25, 66, 1, 69, 7 },
      std::vector { 23, 55, 17, 45, 15, 52 },
      std::vector { 75, 31, 36, 44, 58, 8 },
      std::vector { 22, 27, 33, 25, 68, 4 },
      std::vector { 84, 28, 14, 11, 5, 50 } })
      == std::vector { std::vector { 5, 17, 4, 1, 52, 7 },
      std::vector { 11, 11, 25, 45, 8, 69 },
      std::vector { 14, 23, 25, 44, 58, 15 },
      std::vector { 22, 27, 31, 36, 50, 66 },
      std::vector { 84, 28, 75, 33, 55, 68 } }));
      }

      int main()
      {
      test_case_1();
      test_case_2();

      return 0;
      }

      We need to return the matrix, but we're given a reference to the input matrix. We can easily abuse the C++ here and just switch the reference to value, this way the matrix will be copied when passed to the function, we can sort the copy and just return it back. And we also get yelled by the compiler for the fact that the method doesn't return anything yet, so to make it “shut up” we will just return the input for now:

      -
      -    matrix diagonalSort(matrix& mat)
      + matrix diagonalSort(matrix mat)
      {
      + return mat;
      }
      +
      -    matrix diagonalSort(matrix& mat)
      + matrix diagonalSort(matrix mat)
      {
      + return mat;
      }

      Now, we get the copy and we're good to go.

      Naïve solution

      As you may know, C++ offers a plethora of functions that can be used to your advantage, given that you know how to “bend” the data structures accordingly.

      What does that mean for us? Well, we have an std::sort, we can use it, right? Let's have a look at it:

      -
      template< class RandomIt >
      void sort( RandomIt first, RandomIt last );
      +
      template< class RandomIt >
      void sort( RandomIt first, RandomIt last );

      This overload is more than we need. What does it do? It just sorts the elements in the range [first, last) using operator< on them. We can't sort the whole matrix using this, but… we can sort just »one« diagonal without doing much work @@ -628,10 +628,10 @@ up, i.e. “compiler-assisted development3. And that way we get

      -
      matrix diagonalSort(matrix mat)
      {
      // we iterate over the diagonals
      for (auto d : diagonals(mat)) {
      // and we sort each diagonal
      std::sort(d.begin(), d.end());
      }

      // we take the matrix by copy, so we can sort in-situ and return the copy
      // that we sorted
      return mat;
      }
      +
      matrix diagonalSort(matrix mat)
      {
      // we iterate over the diagonals
      for (auto d : diagonals(mat)) {
      // and we sort each diagonal
      std::sort(d.begin(), d.end());
      }

      // we take the matrix by copy, so we can sort in-situ and return the copy
      // that we sorted
      return mat;
      }

      This solution looks very simple, doesn't it? Well, cause it is. Let's try compiling it:

      -
      matrix-sort.cpp:11:23: error: use of undeclared identifier 'diagonals' [clang-diagnostic-error]
      for (auto d : diagonals(mat)) {
      ^
      Found compiler error(s).
      make: *** [makefile:14: tidy] Error 1
      +
      matrix-sort.cpp:11:23: error: use of undeclared identifier 'diagonals' [clang-diagnostic-error]
      for (auto d : diagonals(mat)) {
      ^
      Found compiler error(s).
      make: *** [makefile:14: tidy] Error 1

      OK, seems about right. We haven't implemented the diagonals yet. And based on what we've written so far, we need a function or a class diagonals that will give us the diagonals we need.

      @@ -646,7 +646,7 @@ do such functionality for a matrix of any type, not just the int fr
    • get the beginning
    • get the end (the “sentinel”)
    -
    template <typename T>
    class diagonals {
    using matrix_t = std::vector<std::vector<T>>;

    matrix_t& _matrix;

    public:
    diagonals(matrix_t& m)
    : _matrix(m)
    {
    }
    diagonals_iter begin()
    {
    /* TODO */
    }
    diagonals_iter end()
    {
    /* TODO */
    }
    };
    +
    template <typename T>
    class diagonals {
    using matrix_t = std::vector<std::vector<T>>;

    matrix_t& _matrix;

    public:
    diagonals(matrix_t& m)
    : _matrix(m)
    {
    }
    diagonals_iter begin()
    {
    /* TODO */
    }
    diagonals_iter end()
    {
    /* TODO */
    }
    };

    Now we have a diagonals that we can use to go through the diagonals. We haven't implemented the core of it yet. Let's go through what we have for now.

    We have a templated class with templated T that is used as a placeholder for any @@ -665,7 +665,7 @@ in the first row, followed by the rest of the diagonals in the first column.

    need to know which diagonal is next. For that purpose we will pass the indices of the first cell on the diagonal. That way we can always tell how to move forward.

    We will start by updating the begin and end to reflect our choice accordingly.

    -
    diagonals_iter begin() { return diagonals_iter { _matrix, 0, 0 }; }
    diagonals_iter end() { return diagonals_iter { _matrix, 0, _matrix.size() }; }
    +
    diagonals_iter begin() { return diagonals_iter { _matrix, 0, 0 }; }
    diagonals_iter end() { return diagonals_iter { _matrix, 0, _matrix.size() }; }

    For the begin we return the first diagonal that starts at (0, 0). And because we have decided to do the diagonals in the first column at the end, the first diagonal that is not a valid one is the one at (0, height). Apart from the @@ -679,7 +679,7 @@ don't care about the fact they don't need to be sorted.

    We can start with a simple skeleton based on the information that we pass from the diagonals. Also to utilize the matrix_t and also contain implementation details hidden away, we will put this code into the diagonals class.

    -
    class diagonals_iter {
    matrix_t& m;
    std::size_t x;
    std::size_t y;

    public:
    diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
    : m(matrix)
    , x(x)
    , y(y)
    {
    }
    };
    +
    class diagonals_iter {
    matrix_t& m;
    std::size_t x;
    std::size_t y;

    public:
    diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
    : m(matrix)
    , x(x)
    , y(y)
    {
    }
    };

    In this case we will be implementing a “simple” forward iterator, so we don't need to implement a lot. Notably it will be:

      @@ -689,12 +689,12 @@ iterate over)
    • dereference operator (we need to be able to retrieve the objects we iterate over)
    -
    class diagonals_iter {
    matrix_t& m;
    std::size_t x;
    std::size_t y;

    public:
    diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
    : m(matrix)
    , x(x)
    , y(y)
    {
    }

    bool operator!=(const diagonals_iter& rhs) const
    {
    // iterators are not equal if they reference different matrices, or
    // their positions differ
    return m != rhs.m || x != rhs.x || y != rhs.y;
    }

    diagonals_iter& operator++()
    {
    if (y != 0) {
    // iterating through diagonals down the first column
    y++;
    return *this;
    }

    // iterating the diagonals along the first row
    x++;
    if (x == m.front().size()) {
    // switching to diagonals in the first column
    x = 0;
    y++;
    }

    return *this;
    }

    diagonal<T> operator*() const { return diagonal { m, x, y }; }
    };
    +
    class diagonals_iter {
    matrix_t& m;
    std::size_t x;
    std::size_t y;

    public:
    diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
    : m(matrix)
    , x(x)
    , y(y)
    {
    }

    bool operator!=(const diagonals_iter& rhs) const
    {
    // iterators are not equal if they reference different matrices, or
    // their positions differ
    return m != rhs.m || x != rhs.x || y != rhs.y;
    }

    diagonals_iter& operator++()
    {
    if (y != 0) {
    // iterating through diagonals down the first column
    y++;
    return *this;
    }

    // iterating the diagonals along the first row
    x++;
    if (x == m.front().size()) {
    // switching to diagonals in the first column
    x = 0;
    y++;
    }

    return *this;
    }

    diagonal<T> operator*() const { return diagonal { m, x, y }; }
    };

    Let's go one-by-one. Inequality operator is rather simple, just compare iterator's attributes field-by-field. If you think about it, checking inequality of two 2D vectors may be a bit inefficient, therefore, we can swap around and check it as a last thing.

    -
    -        return m != rhs.m || x != rhs.x || y != rhs.y;
    + return x != rhs.x || y != rhs.y || m != rhs.m;
    +
    -        return m != rhs.m || x != rhs.x || y != rhs.y;
    + return x != rhs.x || y != rhs.y || m != rhs.m;

    Preincrementation is where the magic happens. If you have a better look, you can see two branches of this operation:

      @@ -714,7 +714,7 @@ something else. In our case it will be a class called diagonal.

      a diagonal is the matrix itself and the “start” of the diagonal (row and column). And we also know that the diagonal must provide some iterators for the std::sort function. We can start with the following skeleton:

      -
      template <typename T>
      class diagonal {
      using matrix_t = std::vector<std::vector<T>>;

      matrix_t& matrix;
      std::size_t x;
      std::size_t y;

      public:
      diagonal(matrix_t& matrix, std::size_t x, std::size_t y)
      : matrix(matrix)
      , x(x)
      , y(y)
      {
      }

      diagonal_iter begin() const { return diagonal_iter { matrix, x, y }; }

      diagonal_iter end() const
      {
      auto max_x = matrix[y].size();
      auto max_y = matrix.size();

      // we need to find the distance in which we get out of bounds (either in
      // column or row)
      auto steps = std::min(max_x - x, max_y - y);

      return diagonal_iter { matrix, x + steps, y + steps };
      }
      };
      +
      template <typename T>
      class diagonal {
      using matrix_t = std::vector<std::vector<T>>;

      matrix_t& matrix;
      std::size_t x;
      std::size_t y;

      public:
      diagonal(matrix_t& matrix, std::size_t x, std::size_t y)
      : matrix(matrix)
      , x(x)
      , y(y)
      {
      }

      diagonal_iter begin() const { return diagonal_iter { matrix, x, y }; }

      diagonal_iter end() const
      {
      auto max_x = matrix[y].size();
      auto max_y = matrix.size();

      // we need to find the distance in which we get out of bounds (either in
      // column or row)
      auto steps = std::min(max_x - x, max_y - y);

      return diagonal_iter { matrix, x + steps, y + steps };
      }
      };

      Initialization is rather simple, we just “keep” the stuff we get, begin is the simplest, we just delegate.

      In case of the end, it gets more complicated. We need to know where is the “end” @@ -741,7 +741,7 @@ be used in std::sort. We need the usual operations like:

      We will also add all the types that our iterator uses with the category of the iterator, i.e. what interface it supports:

      -
      class diagonal_iter {
      // we need to keep reference to the matrix itself
      matrix_t& m;

      // we need to be able to tell our current position
      std::size_t x;
      std::size_t y;

      public:
      using difference_type = std::ptrdiff_t;
      using value_type = T;
      using pointer = T*;
      using reference = T&;
      using iterator_category = std::random_access_iterator_tag;

      diagonal_iter(matrix_t& matrix,
      std::size_t x,
      std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator==(const diagonal_iter& rhs) const
      {
      return x == rhs.x && y == rhs.y && m == rhs.m;
      }

      diagonal_iter& operator++()
      {
      // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
      // the same time
      x++;
      y++;
      return *this;
      }

      reference operator*() const { return m[y][x]; }
      };
      +
      class diagonal_iter {
      // we need to keep reference to the matrix itself
      matrix_t& m;

      // we need to be able to tell our current position
      std::size_t x;
      std::size_t y;

      public:
      using difference_type = std::ptrdiff_t;
      using value_type = T;
      using pointer = T*;
      using reference = T&;
      using iterator_category = std::random_access_iterator_tag;

      diagonal_iter(matrix_t& matrix,
      std::size_t x,
      std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator==(const diagonal_iter& rhs) const
      {
      return x == rhs.x && y == rhs.y && m == rhs.m;
      }

      diagonal_iter& operator++()
      {
      // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
      // the same time
      x++;
      y++;
      return *this;
      }

      reference operator*() const { return m[y][x]; }
      };

      This is pretty similar to the previous iterator, but now we need to implement the remaining requirements of the random access iterator. Let's see what those are:

        @@ -752,16 +752,16 @@ remaining requirements of the random access iterator. Let's see what th
      • define an ordering on the iterators

      Let's fill them in:

      -
      class diagonal_iter {
      // we need to keep reference to the matrix itself
      matrix_t& m;

      // we need to be able to tell our current position
      std::size_t x;
      std::size_t y;

      public:
      using difference_type = std::ptrdiff_t;
      using value_type = T;
      using pointer = T*;
      using reference = T&;
      using iterator_category = std::random_access_iterator_tag;

      diagonal_iter(matrix_t& matrix,
      std::size_t x,
      std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator==(const diagonal_iter& rhs) const
      {
      return x == rhs.x && y == rhs.y && m == rhs.m;
      }

      diagonal_iter& operator++()
      {
      // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
      // the same time
      x++;
      y++;
      return *this;
      }

      reference operator*() const { return m[y][x]; }

      // exactly opposite to the incrementation
      diagonal_iter operator--()
      {
      x--;
      y--;
      return *this;
      }

      // moving ‹n› steps back is same as calling decrementation ‹n›-times, so we
      // can just return a new iterator and subtract ‹n› from both coordinates in
      // the matrix
      diagonal_iter operator-(difference_type n) const
      {
      return diagonal_iter { m, x - n, y - n };
      }

      // here we assume that we are given two iterators on the same diagonal
      difference_type operator-(const diagonal_iter& rhs) const
      {
      assert(m == rhs.m);
      return x - rhs.x;
      }

      // counterpart of moving ‹n› steps backwards
      diagonal_iter operator+(difference_type n) const
      {
      return diagonal_iter { m, x + n, y + n };
      }

      // we compare the coordinates, and also assume that those 2 iterators are
      // lying on the same diagonal
      bool operator<(const diagonal_iter& rhs) const
      {
      assert(m == rhs.m);
      return x < rhs.x && y < rhs.y;
      }
      };
      +
      class diagonal_iter {
      // we need to keep reference to the matrix itself
      matrix_t& m;

      // we need to be able to tell our current position
      std::size_t x;
      std::size_t y;

      public:
      using difference_type = std::ptrdiff_t;
      using value_type = T;
      using pointer = T*;
      using reference = T&;
      using iterator_category = std::random_access_iterator_tag;

      diagonal_iter(matrix_t& matrix,
      std::size_t x,
      std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator==(const diagonal_iter& rhs) const
      {
      return x == rhs.x && y == rhs.y && m == rhs.m;
      }

      diagonal_iter& operator++()
      {
      // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
      // the same time
      x++;
      y++;
      return *this;
      }

      reference operator*() const { return m[y][x]; }

      // exactly opposite to the incrementation
      diagonal_iter operator--()
      {
      x--;
      y--;
      return *this;
      }

      // moving ‹n› steps back is same as calling decrementation ‹n›-times, so we
      // can just return a new iterator and subtract ‹n› from both coordinates in
      // the matrix
      diagonal_iter operator-(difference_type n) const
      {
      return diagonal_iter { m, x - n, y - n };
      }

      // here we assume that we are given two iterators on the same diagonal
      difference_type operator-(const diagonal_iter& rhs) const
      {
      assert(m == rhs.m);
      return x - rhs.x;
      }

      // counterpart of moving ‹n› steps backwards
      diagonal_iter operator+(difference_type n) const
      {
      return diagonal_iter { m, x + n, y + n };
      }

      // we compare the coordinates, and also assume that those 2 iterators are
      // lying on the same diagonal
      bool operator<(const diagonal_iter& rhs) const
      {
      assert(m == rhs.m);
      return x < rhs.x && y < rhs.y;
      }
      };

      At this point we could probably try and compile it, right? If we do so, we will get yelled at by a compiler for the following reasons:

      -
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
      __last = __next;
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1817:11: note: in instantiation of function template specialization 'std::__unguarded_linear_insert<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Val_less_iter>' requested here
      std::__unguarded_linear_insert(__i,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1849:9: note: in instantiation of function template specialization 'std::__insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__insertion_sort(__first, __first + int(_S_threshold), __comp);
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__final_insertion_sort(__first, __last, __comp);
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
      ^
      matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
      std::sort(d.begin(), d.end());
      ^
      matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
      matrix_t& m;
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1830:2: error: no matching function for call to '__unguarded_linear_insert' [clang-diagnostic-error]
      std::__unguarded_linear_insert(__i,
      ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1850:9: note: in instantiation of function template specialization 'std::__unguarded_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__unguarded_insertion_sort(__first + int(_S_threshold), __last,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__final_insertion_sort(__first, __last, __comp);
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
      ^
      matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
      std::sort(d.begin(), d.end());
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1782:5: note: candidate template ignored: substitution failure [with _RandomAccessIterator = diagonal<int>::diagonal_iter, _Compare = __gnu_cxx::__ops::_Val_less_iter]
      __unguarded_linear_insert(_RandomAccessIterator __last,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1923:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
      __last = __cut;
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1937:9: note: in instantiation of function template specialization 'std::__introsort_loop<diagonal<int>::diagonal_iter, long, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__introsort_loop(__first, __last,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
      ^
      matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
      std::sort(d.begin(), d.end());
      ^
      matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
      matrix_t& m;
      ^
      +
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
      __last = __next;
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1817:11: note: in instantiation of function template specialization 'std::__unguarded_linear_insert<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Val_less_iter>' requested here
      std::__unguarded_linear_insert(__i,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1849:9: note: in instantiation of function template specialization 'std::__insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__insertion_sort(__first, __first + int(_S_threshold), __comp);
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__final_insertion_sort(__first, __last, __comp);
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
      ^
      matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
      std::sort(d.begin(), d.end());
      ^
      matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
      matrix_t& m;
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1830:2: error: no matching function for call to '__unguarded_linear_insert' [clang-diagnostic-error]
      std::__unguarded_linear_insert(__i,
      ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1850:9: note: in instantiation of function template specialization 'std::__unguarded_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__unguarded_insertion_sort(__first + int(_S_threshold), __last,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__final_insertion_sort(__first, __last, __comp);
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
      ^
      matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
      std::sort(d.begin(), d.end());
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1782:5: note: candidate template ignored: substitution failure [with _RandomAccessIterator = diagonal<int>::diagonal_iter, _Compare = __gnu_cxx::__ops::_Val_less_iter]
      __unguarded_linear_insert(_RandomAccessIterator __last,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1923:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
      __last = __cut;
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1937:9: note: in instantiation of function template specialization 'std::__introsort_loop<diagonal<int>::diagonal_iter, long, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__introsort_loop(__first, __last,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
      ^
      matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
      std::sort(d.begin(), d.end());
      ^
      matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
      matrix_t& m;
      ^

      That's a lot of noise, isn't it? Let's focus on the important parts:

      -
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]

      matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
      matrix_t& m;
      ^
      +
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]

      matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
      matrix_t& m;
      ^

      Ah! We have a reference in our iterator, and this prevents us from having a copy assignment operator (that is used “somewhere” in the sorting algorithm). Well… Let's just wrap it!

      -
      # we need to keep a different type than reference
      - matrix_t& m;
      + std::reference_wrapper<matrix_t> m;

      # in comparison we need to get the reference out of the wrapper first
      - return x == rhs.x && y == rhs.y && m == rhs.m;
      + return x == rhs.x && y == rhs.y && m.get() == rhs.m.get();

      # same when we return a reference to the “cell” in the matrix
      - reference operator*() const { return m[y][x]; }
      + reference operator*() const { return m.get()[y][x]; }

      # and finally in the assertions that we set for the “distance” and “less than”
      - assert(m == rhs.m);
      + assert(m.get() == rhs.m.get());
      +
      # we need to keep a different type than reference
      - matrix_t& m;
      + std::reference_wrapper<matrix_t> m;

      # in comparison we need to get the reference out of the wrapper first
      - return x == rhs.x && y == rhs.y && m == rhs.m;
      + return x == rhs.x && y == rhs.y && m.get() == rhs.m.get();

      # same when we return a reference to the “cell” in the matrix
      - reference operator*() const { return m[y][x]; }
      + reference operator*() const { return m.get()[y][x]; }

      # and finally in the assertions that we set for the “distance” and “less than”
      - assert(m == rhs.m);
      + assert(m.get() == rhs.m.get());

      We're done now! We have written an iterator over diagonals for a 2D vector. You can have a look at the final result here.

      Footnotes

        @@ -815,7 +815,7 @@ own “box of hell”.

    Swapping indices

    Relatively simple implementation, just take the values, swap them and return new vector.

    -
    impl<T: Copy> Vector2D<T> {
    pub fn swap(&self) -> Self {
    Self {
    x: self.y,
    y: self.x,
    }
    }
    }
    +
    impl<T: Copy> Vector2D<T> {
    pub fn swap(&self) -> Self {
    Self {
    x: self.y,
    y: self.x,
    }
    }
    }

    Pretty straight-forward implementation, but let's talk about the T: Copy. We need to use it, since we are returning a new vector, with swapped values. If we had values that cannot be copied, the only thing we could do, would be a @@ -824,7 +824,7 @@ later on). This is pretty similar with the operations on sets from the first wee

    Indexing Vec

    I will start with the indexing, cause bound-checking is a bit more… complicated than I would like to.

    -
    pub fn index<'a, T, U>(v: &'a [Vec<U>], idx: &Vector2D<T>) -> &'a U
    where
    usize: TryFrom<T>,
    <usize as TryFrom<T>>::Error: Debug,
    T: Copy,
    {
    let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
    &v[y][x]
    }
    +
    pub fn index<'a, T, U>(v: &'a [Vec<U>], idx: &Vector2D<T>) -> &'a U
    where
    usize: TryFrom<T>,
    <usize as TryFrom<T>>::Error: Debug,
    T: Copy,
    {
    let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
    &v[y][x]
    }

    Let's talk about this mess… Body of the function is probably the most easy part and should not be hard to understand, we just take the x and y and convert them both to usize type that can be used later on for indexing.

    @@ -863,20 +863,20 @@ taken by a reference, i.e. returned reference must live at least as long as the

    First issue that our implementation has is the fact that we cannot get a mutable reference out of that function. This could be easily resolved by introducing new function, e.g. index_mut. Which I have actually done while writing this part:

    -
    pub fn index_mut<'a, T, U>(v: &'a mut [Vec<U>], idx: &Vector2D<T>) -> &'a mut U
    where
    usize: TryFrom<T>,
    <usize as TryFrom<T>>::Error: Debug,
    T: Copy,
    {
    let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
    &mut v[y][x]
    }
    +
    pub fn index_mut<'a, T, U>(v: &'a mut [Vec<U>], idx: &Vector2D<T>) -> &'a mut U
    where
    usize: TryFrom<T>,
    <usize as TryFrom<T>>::Error: Debug,
    T: Copy,
    {
    let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
    &mut v[y][x]
    }
    «↯» Why can't we use one function?

    When we consider a Vec<T>, we don't need to consider containers as T, Rust implements indexing as traits Index<T> and IndexMut<T> that do the dirty work behind syntactic sugar of container[idx].

    However, implementing of traits is not allowed for external types, i.e. types that you haven't defined yourself. This means that you can implement indexing over containers that you have implemented yourself, but you cannot use your own types for indexing “built-in” types.

    Another part of this rabbit hole is trait SliceIndex<T> that is of a relevance -because of

    impl<T, I> Index<I> for [T]
    where
    I: SliceIndex<[T]>

    impl<T, I, A> Index<I> for Vec<T, A>
    where
    I: SliceIndex<[T]>,
    A: Allocator

    impl<T, I, const N: usize> Index<I> for [T; N]
    where
    [T]: Index<I>

    In other words, if your type implements SliceIndex<T> trait, it can be used +because of

    impl<T, I> Index<I> for [T]
    where
    I: SliceIndex<[T]>

    impl<T, I, A> Index<I> for Vec<T, A>
    where
    I: SliceIndex<[T]>,
    A: Allocator

    impl<T, I, const N: usize> Index<I> for [T; N]
    where
    [T]: Index<I>

    In other words, if your type implements SliceIndex<T> trait, it can be used for indexing. As of now, this trait has all of its required methods experimental and is marked as unsafe.

    Another problem is a requirement for indexing either [Vec<T>] or Vec<Vec<T>>. This requirement could be countered by removing inner type Vec<T> and constraining it by a trait Index (or IndexMut respectively) in a following way

    -
    pub fn index<'a, C, T>(v: &'a [C], idx: &Vector2D<T>) -> &'a C::Output
    where
    usize: TryFrom<T>,
    <usize as TryFrom<T>>::Error: Debug,
    T: Copy,
    C: Index<usize>
    {
    let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
    &v[y][x]
    }
    +
    pub fn index<'a, C, T>(v: &'a [C], idx: &Vector2D<T>) -> &'a C::Output
    where
    usize: TryFrom<T>,
    <usize as TryFrom<T>>::Error: Debug,
    T: Copy,
    C: Index<usize>
    {
    let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
    &v[y][x]
    }

    Given this, we can also give a more meaningful typename for indexing type, such as I.

    Checking bounds

    @@ -887,12 +887,12 @@ up with negative values which, unlike in C++, causes an error (instead of underf that you can use to your advantage; you can easily guess how).

    So how can we approach this then? Well… we will convert the bounds instead of the indices and that lead us to:

    -
    pub fn in_range<T, U>(v: &[Vec<U>], idx: &Vector2D<T>) -> bool
    where
    usize: TryInto<T>,
    <usize as TryInto<T>>::Error: Debug,
    T: PartialOrd + Copy,
    {
    idx.y >= 0.try_into().unwrap()
    && idx.y < v.len().try_into().unwrap()
    && idx.x >= 0.try_into().unwrap()
    && idx.x
    < v[TryInto::<usize>::try_into(idx.y).unwrap()]
    .len()
    .try_into()
    .unwrap()
    }
    +
    pub fn in_range<T, U>(v: &[Vec<U>], idx: &Vector2D<T>) -> bool
    where
    usize: TryInto<T>,
    <usize as TryInto<T>>::Error: Debug,
    T: PartialOrd + Copy,
    {
    idx.y >= 0.try_into().unwrap()
    && idx.y < v.len().try_into().unwrap()
    && idx.x >= 0.try_into().unwrap()
    && idx.x
    < v[TryInto::<usize>::try_into(idx.y).unwrap()]
    .len()
    .try_into()
    .unwrap()
    }

    You can tell that it's definitely a shitty code. Let's improve it now! We will get back to the original idea, but do it better. We know that we cannot convert negative values into usize, but we also know that conversion like that returns a Result<T, E> which we can use to our advantage.

    -
    pub fn in_range<T, U>(v: &[Vec<U>], idx: &Vector2D<T>) -> bool
    where
    T: Copy,
    usize: TryFrom<T>,
    {
    usize::try_from(idx.y)
    .and_then(|y| usize::try_from(idx.x).map(|x| y < v.len() && x < v[y].len()))
    .unwrap_or(false)
    }
    +
    pub fn in_range<T, U>(v: &[Vec<U>], idx: &Vector2D<T>) -> bool
    where
    T: Copy,
    usize: TryFrom<T>,
    {
    usize::try_from(idx.y)
    .and_then(|y| usize::try_from(idx.x).map(|x| y < v.len() && x < v[y].len()))
    .unwrap_or(false)
    }

    Result<T, E> is a type similar to Either in Haskell and it allows us to chain multiple operations on correct results or propagate the original error without doing anything. Let's dissect it one-by-one.

    @@ -901,7 +901,7 @@ types and either successfully convert them or fail (with a reasonable error). Th method returns Result<T, E>.

    We call and_then on that result, let's have a look at the type signature of and_then, IMO it explains more than enough:

    -
    pub fn and_then<U, F>(self, op: F) -> Result<U, E>
    where
    F: FnOnce(T) -> Result<U, E>
    +
    pub fn and_then<U, F>(self, op: F) -> Result<U, E>
    where
    F: FnOnce(T) -> Result<U, E>

    OK… So it takes the result and a function and returns another result with different value and different error. However we can see that the function, which represents an operation on a result, takes just the value, i.e. it doesn't care @@ -911,7 +911,7 @@ about any previous error. To make it short:

    We parsed a y index and now we try to convert the x index with try_from again, but on that result we use map rather than and_then, why would that be?

    -
    pub fn map<U, F>(self, op: F) -> Result<U, E>
    where
    F: FnOnce(T) -> U
    +
    pub fn map<U, F>(self, op: F) -> Result<U, E>
    where
    F: FnOnce(T) -> U

    Huh… map performs an operation that cannot fail. And finally we use unwrap_or which takes the value from result, or in case of an error returns the default that we define.

    @@ -940,13 +940,13 @@ preparations for the AoC. Let's sum up our requirements:

    cannot do anything about it. However running and testing can be simplified!

    Let's introduce and export a new module solution that will take care of all of this. We will start by introducing a trait for each day.

    -
    pub trait Solution<Input, Output: Display> {
    fn parse_input<P: AsRef<Path>>(pathname: P) -> Input;

    fn part_1(input: &Input) -> Output;
    fn part_2(input: &Input) -> Output;
    }
    +
    pub trait Solution<Input, Output: Display> {
    fn parse_input<P: AsRef<Path>>(pathname: P) -> Input;

    fn part_1(input: &Input) -> Output;
    fn part_2(input: &Input) -> Output;
    }

    This does a lot of work for us already, we have defined a trait and for each day we will create a structure representing a specific day. That structure will also implement the Solution trait.

    Now we need to get rid of the boilerplate, we can't get rid of the main function, but we can at least move out the functionality.

    -
    fn run(type_of_input: &str) -> Result<()>
    where
    Self: Sized,
    {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = Self::parse_input(format!("{}s/{}.txt", type_of_input, Self::day()));

    info!("Part 1: {}", Self::part_1(&input));
    info!("Part 2: {}", Self::part_2(&input));

    Ok(())
    }

    fn main() -> Result<()>
    where
    Self: Sized,
    {
    Self::run("input")
    }
    +
    fn run(type_of_input: &str) -> Result<()>
    where
    Self: Sized,
    {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = Self::parse_input(format!("{}s/{}.txt", type_of_input, Self::day()));

    info!("Part 1: {}", Self::part_1(&input));
    info!("Part 2: {}", Self::part_2(&input));

    Ok(())
    }

    fn main() -> Result<()>
    where
    Self: Sized,
    {
    Self::run("input")
    }

    This is all part of the Solution trait, which can implement methods while being dependent on what is provided by the implementing types. In this case, we just need to bound the Output type to implement Display that is necessary for the @@ -955,14 +955,14 @@ need to bound the Output type to implement Display tha day() method that you can see being used when constructing path to the input file. That method will generate a name of the file, e.g. day01 and we know that we can somehow deduce it from the structure name, given we name it reasonably.

    -
    fn day() -> String {
    let mut day = String::from(type_name::<Self>().split("::").next().unwrap());
    day.make_ascii_lowercase();

    day.to_string()
    }
    +
    fn day() -> String {
    let mut day = String::from(type_name::<Self>().split("::").next().unwrap());
    day.make_ascii_lowercase();

    day.to_string()
    }
    type_name

    This feature is still experimental and considered to be internal, it is not advised to use it any production code.

    And now we can get to the nastiest stuff 😩 We will generate the tests!

    We want to be able to generate tests for sample input in a following way:

    -
    test_sample!(day_01, Day01, 42, 69);
    +
    test_sample!(day_01, Day01, 42, 69);

    There's not much we can do, so we will write a macro to generate the tests for us.

    -
    #[macro_export]
    macro_rules! test_sample {
    ($mod_name:ident, $day_struct:tt, $part_1:expr, $part_2:expr) => {
    #[cfg(test)]
    mod $mod_name {
    use super::*;

    #[test]
    fn test_part_1() {
    let sample =
    $day_struct::parse_input(&format!("samples/{}.txt", $day_struct::day()));
    assert_eq!($day_struct::part_1(&sample), $part_1);
    }

    #[test]
    fn test_part_2() {
    let sample =
    $day_struct::parse_input(&format!("samples/{}.txt", $day_struct::day()));
    assert_eq!($day_struct::part_2(&sample), $part_2);
    }
    }
    };
    }
    +
    #[macro_export]
    macro_rules! test_sample {
    ($mod_name:ident, $day_struct:tt, $part_1:expr, $part_2:expr) => {
    #[cfg(test)]
    mod $mod_name {
    use super::*;

    #[test]
    fn test_part_1() {
    let sample =
    $day_struct::parse_input(&format!("samples/{}.txt", $day_struct::day()));
    assert_eq!($day_struct::part_1(&sample), $part_1);
    }

    #[test]
    fn test_part_2() {
    let sample =
    $day_struct::parse_input(&format!("samples/{}.txt", $day_struct::day()));
    assert_eq!($day_struct::part_2(&sample), $part_2);
    }
    }
    };
    }

    We have used it in a similar way as macros in C/C++, one of the things that we can use to our advantage is defining “type” of the parameters for the macro. All parameters have their name prefixed with $ sign and you can define various “forms” @@ -978,7 +978,7 @@ which literally means an expression.

    Apart from that we need to use #[macro_export] to mark the macro as exported for usage outside of the module. Now our skeleton looks like:

    -
    use aoc_2022::*;

    type Input = String;
    type Output = String;

    struct DayXX;
    impl Solution<Input, Output> for DayXX {
    fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
    file_to_string(pathname)
    }

    fn part_1(input: &Input) -> Output {
    todo!()
    }

    fn part_2(input: &Input) -> Output {
    todo!()
    }
    }

    fn main() -> Result<()> {
    // DayXX::run("sample")
    DayXX::main()
    }

    // test_sample!(day_XX, DayXX, , );
    +
    use aoc_2022::*;

    type Input = String;
    type Output = String;

    struct DayXX;
    impl Solution<Input, Output> for DayXX {
    fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
    file_to_string(pathname)
    }

    fn part_1(input: &Input) -> Output {
    todo!()
    }

    fn part_2(input: &Input) -> Output {
    todo!()
    }
    }

    fn main() -> Result<()> {
    // DayXX::run("sample")
    DayXX::main()
    }

    // test_sample!(day_XX, DayXX, , );

    Solution

    Not much to talk about, it is relatively easy to simulate.

    Day 10: Cathode-Ray Tube

    @@ -988,7 +988,7 @@ CPU's accumulator.

    And the issue is caused by different types of Output for the part 1 and part 2.

    Problem is relatively simple and consists of simulating a CPU, I have approached it in a following way:

    -
    fn evaluate_instructions(instructions: &[Instruction], mut out: Output) -> Output {
    instructions
    .iter()
    .fold(State::new(), |state, instruction| {
    state.execute(instruction, &mut out)
    });

    out
    }
    +
    fn evaluate_instructions(instructions: &[Instruction], mut out: Output) -> Output {
    instructions
    .iter()
    .fold(State::new(), |state, instruction| {
    state.execute(instruction, &mut out)
    });

    out
    }

    We just take the instructions, we have some state of the CPU and we execute the instructions one-by-one. Perfect usage of the fold (or reduce as you may know it from other languages).

    @@ -996,11 +996,11 @@ it from other languages).

    that problem. And the answer is very simple and functional. Rust allows you to have an enumeration that can bear some other values apart from the type itself.

    tip

    We could've seen something like this with the Result<T, E> type that can be -defined as

    enum Result<T, E> {
    Ok(T),
    Err(E)
    }
    What does that mean though?

    When we have an Ok value, it has the result itself, and when we get an Err +defined as

    enum Result<T, E> {
    Ok(T),
    Err(E)
    }
    What does that mean though?

    When we have an Ok value, it has the result itself, and when we get an Err value, it has the error. This also allows us to handle results in a rather -pretty way:

    match do_something(x) {
    Ok(y) => {
    println!("SUCCESS: {}", y);
    },
    Err(y) => {
    eprintln!("ERROR: {}", y);
    }
    }
    +pretty way:

    match do_something(x) {
    Ok(y) => {
    println!("SUCCESS: {}", y);
    },
    Err(y) => {
    eprintln!("ERROR: {}", y);
    }
    }

    My solution has a following outline:

    -
    fn execute(&self, i: &Instruction, output: &mut Output) -> State {
    // execute the instruction

    // collect results if necessary
    match output {
    Output::Part1(x) => self.execute_part_1(y, x),
    Output::Part2(x) => self.execute_part_2(y, x),
    }

    // return the obtained state
    new_state
    }
    +
    fn execute(&self, i: &Instruction, output: &mut Output) -> State {
    // execute the instruction

    // collect results if necessary
    match output {
    Output::Part1(x) => self.execute_part_1(y, x),
    Output::Part2(x) => self.execute_part_2(y, x),
    }

    // return the obtained state
    new_state
    }

    You might think that it's a perfectly reasonable thing to do. Yes, but notice that the match statement doesn't collect the changes in any way and also we pass output by &mut, so it is shared across each iteration of the fold.

    @@ -1027,7 +1027,7 @@ also rolling down the hill…

    As I have said in the tl;dr, we are looking for the shortest path, but the start and goal differ for the part 1 and 2. So I have decided to refactor my solution to a BFS algorithm that takes necessary parameters via functions:

    -
    fn bfs<F, G>(
    graph: &[Vec<char>], start: &Position, has_edge: F, is_target: G
    ) -> Option<usize>
    where
    F: Fn(&[Vec<char>], &Position, &Position) -> bool,
    G: Fn(&[Vec<char>], &Position) -> bool
    +
    fn bfs<F, G>(
    graph: &[Vec<char>], start: &Position, has_edge: F, is_target: G
    ) -> Option<usize>
    where
    F: Fn(&[Vec<char>], &Position, &Position) -> bool,
    G: Fn(&[Vec<char>], &Position) -> bool

    We pass the initial vertex from the caller and everything else is left to the BFS algorithm, based on the has_edge and is_target functions.

    This was easy! And that is not very usual in Rust once you want to pass around @@ -1044,7 +1044,7 @@ time complexity, because of the priority heap instead of the queue.

    You can implement a lot of traits if you want to. It is imperative to implement ordering on the packets. I had a typo, so I also proceeded to implement a Display trait for debugging purposes:

    -
    impl Display for Packet {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
    match self {
    Packet::Integer(x) => write!(f, "{x}"),
    Packet::List(lst) => write!(f, "[{}]", lst.iter().map(|p| format!("{p}")).join(",")),
    }
    }
    }
    +
    impl Display for Packet {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
    match self {
    Packet::Integer(x) => write!(f, "{x}"),
    Packet::List(lst) => write!(f, "[{}]", lst.iter().map(|p| format!("{p}")).join(",")),
    }
    }
    }

    Solution

    A lot of technical details… Parsing is nasty too…

    Day 14: Regolith Reservoir

    @@ -1064,16 +1064,16 @@ leave it be, so I tried to implement the Index and IndexMutunsafe
    part are the 2 methods that are named *unchecked*. Anyways, I will be implementing the Index* traits for now, rather than the SliceIndex.

    It's relatively straightforward…

    -
    impl<I, C> Index<Vector2D<I>> for [C]
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: Index<usize>,
    {
    type Output = C::Output;

    fn index(&self, index: Vector2D<I>) -> &Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &self[y][x]
    }
    }

    impl<I, C> IndexMut<Vector2D<I>> for [C]
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: IndexMut<usize>,
    {
    fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &mut self[y][x]
    }
    }
    +
    impl<I, C> Index<Vector2D<I>> for [C]
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: Index<usize>,
    {
    type Output = C::Output;

    fn index(&self, index: Vector2D<I>) -> &Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &self[y][x]
    }
    }

    impl<I, C> IndexMut<Vector2D<I>> for [C]
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: IndexMut<usize>,
    {
    fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &mut self[y][x]
    }
    }

    We can see a lot of similarities to the implementation of index and index_mut functions. In the end, they are 1:1, just wrapped in the trait that provides a syntax sugar for container[idx].

    note

    I have also switched from using the TryFrom to TryInto trait, since it better matches what we are using, the .try_into rather than usize::try_from.

    Also implementing TryFrom automatically provides you with a TryInto trait, -since it is relatively easy to implement. Just compare the following:

    pub trait TryFrom<T>: Sized {
    type Error;

    fn try_from(value: T) -> Result<Self, Self::Error>;
    }

    pub trait TryInto<T>: Sized {
    type Error;

    fn try_into(self) -> Result<T, Self::Error>;
    }
    +since it is relatively easy to implement. Just compare the following:

    pub trait TryFrom<T>: Sized {
    type Error;

    fn try_from(value: T) -> Result<Self, Self::Error>;
    }

    pub trait TryInto<T>: Sized {
    type Error;

    fn try_into(self) -> Result<T, Self::Error>;
    }

    OK, so we have our trait implemented, we should be able to use container[index], right? Yes… but actually no 😦

    -
    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
    --> src/bin/day08.rs:26:18
    |
    26 | if trees[pos] > tallest {
    | ^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
    --> src/bin/day08.rs:30:28
    |
    30 | max(tallest, trees[pos])
    | ^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<isize>`
    --> src/bin/day08.rs:52:28
    |
    52 | let max_height = trees[position];
    | ^^^^^^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<isize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<isize>>`
    +
    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
    --> src/bin/day08.rs:26:18
    |
    26 | if trees[pos] > tallest {
    | ^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
    --> src/bin/day08.rs:30:28
    |
    30 | max(tallest, trees[pos])
    | ^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<isize>`
    --> src/bin/day08.rs:52:28
    |
    52 | let max_height = trees[position];
    | ^^^^^^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<isize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<isize>>`

    Why? We have it implemented for the slices ([C]), why doesn't it work? Well, the fun part consists of the fact that in other place, where we were using it, we were passing the &[Vec<T>], but this is coming from a helper functions that @@ -1083,9 +1083,9 @@ those. Just for the slices. 🤯 What are we going to do abo so let's implement a macro! The only difference across the implementations are the types of the outer containers. Implementation doesn't differ at all!

    Implementing the macro can be done in a following way:

    -
    macro_rules! generate_indices {
    ($container:ty) => {
    impl<I, C> Index<Vector2D<I>> for $container
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: Index<usize>,
    {
    type Output = C::Output;

    fn index(&self, index: Vector2D<I>) -> &Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &self[y][x]
    }
    }

    impl<I, C> IndexMut<Vector2D<I>> for $container
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: IndexMut<usize>,
    {
    fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &mut self[y][x]
    }
    }
    };
    }
    +
    macro_rules! generate_indices {
    ($container:ty) => {
    impl<I, C> Index<Vector2D<I>> for $container
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: Index<usize>,
    {
    type Output = C::Output;

    fn index(&self, index: Vector2D<I>) -> &Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &self[y][x]
    }
    }

    impl<I, C> IndexMut<Vector2D<I>> for $container
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: IndexMut<usize>,
    {
    fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &mut self[y][x]
    }
    }
    };
    }

    And now we can simply do

    -
    generate_indices!(VecDeque<C>);
    generate_indices!([C]);
    generate_indices!(Vec<C>);
    // generate_indices!([C; N], const N: usize);
    +
    generate_indices!(VecDeque<C>);
    generate_indices!([C]);
    generate_indices!(Vec<C>);
    // generate_indices!([C; N], const N: usize);

    The last type (I took the inspiration from the implementations of the Index and IndexMut traits) is a bit problematic, because of the const N: usize part, which I haven't managed to be able to parse. And that's how I got rid of the error.

    @@ -1095,11 +1095,11 @@ copy-paste, cause the cost of this “monstrosity” outweighs the benefits of n

    This issue is relatively funny. If you don't use any type aliases, just the raw types, you'll get suggested certain changes by the clippy. For example if you consider the following piece of code

    -
    fn get_sum(nums: &Vec<i32>) -> i32 {
    nums.iter().sum()
    }

    fn main() {
    let nums = vec![1, 2, 3];
    println!("Sum: {}", get_sum(&nums));
    }
    +
    fn get_sum(nums: &Vec<i32>) -> i32 {
    nums.iter().sum()
    }

    fn main() {
    let nums = vec![1, 2, 3];
    println!("Sum: {}", get_sum(&nums));
    }

    and you run clippy on it, you will get

    -
    Checking playground v0.0.1 (/playground)
    warning: writing `&Vec` instead of `&[_]` involves a new object where a slice will do
    --> src/main.rs:1:18
    |
    1 | fn get_sum(nums: &Vec<i32>) -> i32 {
    | ^^^^^^^^^ help: change this to: `&[i32]`
    |
    = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#ptr_arg
    = note: `#[warn(clippy::ptr_arg)]` on by default

    warning: `playground` (bin "playground") generated 1 warning
    Finished dev [unoptimized + debuginfo] target(s) in 0.61s
    +
    Checking playground v0.0.1 (/playground)
    warning: writing `&Vec` instead of `&[_]` involves a new object where a slice will do
    --> src/main.rs:1:18
    |
    1 | fn get_sum(nums: &Vec<i32>) -> i32 {
    | ^^^^^^^^^ help: change this to: `&[i32]`
    |
    = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#ptr_arg
    = note: `#[warn(clippy::ptr_arg)]` on by default

    warning: `playground` (bin "playground") generated 1 warning
    Finished dev [unoptimized + debuginfo] target(s) in 0.61s

    However, if you introduce a type alias, such as

    -
    type Numbers = Vec<i32>;
    +
    type Numbers = Vec<i32>;

    Then clippy won't say anything, cause there is literally nothing to suggest. However the outcome is not the same…

    ]]> @@ -1134,10 +1134,10 @@ backpacks and we want to choose the elf that has the most food ;)

    At first I've decided to put asserts into my main, something like

    -
    assert_eq!(part_1(&sample), 24000);
    info!("Part 1: {}", part_1(&input));

    assert_eq!(part_2(&sample), 45000);
    info!("Part 2: {}", part_2(&input));
    +
    assert_eq!(part_1(&sample), 24000);
    info!("Part 1: {}", part_1(&input));

    assert_eq!(part_2(&sample), 45000);
    info!("Part 2: {}", part_2(&input));

    However, once you get further, the sample input may take some time to run itself. So in the end, I have decided to turn them into unit tests:

    -
    #[cfg(test)]
    mod tests {
    use super::*;

    #[test]
    fn test_part_1() {
    let sample = parse_input("samples/day01.txt");
    assert_eq!(part_1(&sample), 24000);
    }

    #[test]
    fn test_part_2() {
    let sample = parse_input("samples/day01.txt");
    assert_eq!(part_2(&sample), 45000);
    }
    }
    +
    #[cfg(test)]
    mod tests {
    use super::*;

    #[test]
    fn test_part_1() {
    let sample = parse_input("samples/day01.txt");
    assert_eq!(part_1(&sample), 24000);
    }

    #[test]
    fn test_part_2() {
    let sample = parse_input("samples/day01.txt");
    assert_eq!(part_2(&sample), 45000);
    }
    }

    And later on I have noticed, it's hard to tell the difference between the days, so I further renamed the mod from generic tests to reflect the days.

    Also after finishing the first day puzzle, I have installed an sccache to @@ -1166,16 +1166,16 @@ to give up. Let's dive into it \o/

    Fun fact

    Fighting the compiler took me 30 minutes.

    We need to find a common item among 2 collections, that's an easy task, right? We can construct 2 sets and find an intersection:

    -
    let top: HashSet<i32> = [1, 2, 3].iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5].iter().collect();
    +
    let top: HashSet<i32> = [1, 2, 3].iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5].iter().collect();

    Now, the first issue that we encounter is caused by the fact that we are using a slice (the […]), iterator of that returns references to the numbers. And we get immediately yelled at by the compiler, because the numbers are discarded after running the .collect. To fix this, we can use .into_iter:

    -
    let top: HashSet<i32> = [1, 2, 3].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5].into_iter().collect();
    +
    let top: HashSet<i32> = [1, 2, 3].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5].into_iter().collect();

    This way the numbers will get copied instead of referenced. OK, let's find the intersection of those 2 collections:

    -
    println!("Common elements: {:?}", top.intersection(&bottom));
    -
    Common elements: [3]
    +
    println!("Common elements: {:?}", top.intersection(&bottom));
    +
    Common elements: [3]
    caution

    Notice that we need to do &bottom. It explicitly specifies that .intersection borrows the bottom, i.e. takes an immutable reference to it.

    That's what we want, right? Looks like it! \o/

    @@ -1183,16 +1183,16 @@ intersection of those 2 collections:

    that should be fairly easy, we have an intersection and we want to find intersection over all of them.

    Let's have a look at the type of the .intersection

    -
    pub fn intersection<'a>(
        &'a self,
        other: &'a HashSet<T, S>
    ) -> Intersection<'a, T, S>
    +
    pub fn intersection<'a>(
        &'a self,
        other: &'a HashSet<T, S>
    ) -> Intersection<'a, T, S>

    OK… Huh… But we have an example there!

    -
    let intersection: HashSet<_> = a.intersection(&b).collect();
    +
    let intersection: HashSet<_> = a.intersection(&b).collect();

    Cool, that's all we need.

    -
    let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
    let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
    let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

    let intersection: HashSet<_> = top.intersection(&bottom).collect();
    println!("Intersection: {:?}", intersection);
    -
    Intersection: {3, 4}
    +
    let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
    let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
    let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

    let intersection: HashSet<_> = top.intersection(&bottom).collect();
    println!("Intersection: {:?}", intersection);
    +
    Intersection: {3, 4}

    Cool, so let's do the intersection with the top_2:

    -
    let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
    let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
    let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

    let intersection: HashSet<_> = top.intersection(&bottom).collect();
    let intersection: HashSet<_> = intersection.intersection(&top_2).collect();
    println!("Intersection: {:?}", intersection);
    +
    let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
    let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
    let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

    let intersection: HashSet<_> = top.intersection(&bottom).collect();
    let intersection: HashSet<_> = intersection.intersection(&top_2).collect();
    println!("Intersection: {:?}", intersection);

    And we get yelled at by the compiler:

    -
    error[E0308]: mismatched types
    --> src/main.rs:10:58
    |
    10 | let intersection: HashSet<_> = intersection.intersection(&top_2).collect();
    | ------------ ^^^^^^ expected `&i32`, found `i32`
    | |
    | arguments to this function are incorrect
    |
    = note: expected reference `&HashSet<&i32>`
    found reference `&HashSet<i32>`
    +
    error[E0308]: mismatched types
    --> src/main.rs:10:58
    |
    10 | let intersection: HashSet<_> = intersection.intersection(&top_2).collect();
    | ------------ ^^^^^^ expected `&i32`, found `i32`
    | |
    | arguments to this function are incorrect
    |
    = note: expected reference `&HashSet<&i32>`
    found reference `&HashSet<i32>`

    /o\ What the hell is going on here? Well, the funny thing is, that this operation doesn't return the elements themselves, but the references to them and when we pass the third set, it has just the values themselves, without any references.

    @@ -1202,8 +1202,8 @@ a “tax” for having a borrow checker drilling your ass having your making sure you're not doing something naughty that may cause an undefined behavior.

    To resolve this we need to get an iterator that clones the elements:

    -
    let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
    let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
    let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

    let intersection: HashSet<_> = top.intersection(&bottom).cloned().collect();
    let intersection: HashSet<_> = intersection.intersection(&top_2).cloned().collect();
    let intersection: HashSet<_> = intersection.intersection(&bottom_2).cloned().collect();
    println!("Intersection: {:?}", intersection);
    -
    Intersection: {4}
    +
    let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
    let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
    let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

    let intersection: HashSet<_> = top.intersection(&bottom).cloned().collect();
    let intersection: HashSet<_> = intersection.intersection(&top_2).cloned().collect();
    let intersection: HashSet<_> = intersection.intersection(&bottom_2).cloned().collect();
    println!("Intersection: {:?}", intersection);
    +
    Intersection: {4}

    Solution

    The approach is pretty simple, if you omit the 1on1 with the compiler. You just have some fun with the set operations :)

    @@ -1217,7 +1217,7 @@ Find how many overlap and can take the day off.

    Day 5: Supply Stacks

    tl;dr

    Let's play with stacks of crates.

    Very easy problem with very annoying input. You can judge yourself:

    -
        [D]    
    [N] [C]
    [Z] [M] [P]
    1 2 3

    move 1 from 2 to 1
    move 3 from 1 to 3
    move 2 from 2 to 1
    move 1 from 1 to 2
    +
        [D]    
    [N] [C]
    [Z] [M] [P]
    1 2 3

    move 1 from 2 to 1
    move 3 from 1 to 3
    move 2 from 2 to 1
    move 1 from 1 to 2

    Good luck transforming that into something reasonable :)

    Fun fact

    Took me 40 minutes to parse this reasonably, including fighting the compiler.

    Solution

    @@ -1225,7 +1225,7 @@ Find how many overlap and can take the day off.

    the work. Later on I have decided to explore the std and interface of the std::vec::Vec and found split_off which takes an index and splits (duh) the vector:

    -
    let mut vec = vec![1, 2, 3];
    let vec2 = vec.split_off(1);
    assert_eq!(vec, [1]);
    assert_eq!(vec2, [2, 3]);
    +
    let mut vec = vec![1, 2, 3];
    let vec2 = vec.split_off(1);
    assert_eq!(vec, [1]);
    assert_eq!(vec2, [2, 3]);

    This helped me simplify my solution a lot and also get rid of some edge cases.

    Day 6: Tuning Trouble

    tl;dr

    Finding start of the message in a very weird protocol. Start of the message is @@ -1250,7 +1250,7 @@ directories that take a lot of space and should be deleted.

    Solution

    We need to “build” a file system from the input that is given in a following form:

    -
    $ cd /
    $ ls
    dir a
    14848514 b.txt
    8504156 c.dat
    dir d
    $ cd a
    $ ls
    dir e
    29116 f
    2557 g
    62596 h.lst
    $ cd e
    $ ls
    584 i
    $ cd ..
    $ cd ..
    $ cd d
    $ ls
    4060174 j
    8033020 d.log
    5626152 d.ext
    7214296 k
    +
    $ cd /
    $ ls
    dir a
    14848514 b.txt
    8504156 c.dat
    dir d
    $ cd a
    $ ls
    dir e
    29116 f
    2557 g
    62596 h.lst
    $ cd e
    $ ls
    584 i
    $ cd ..
    $ cd ..
    $ cd d
    $ ls
    4060174 j
    8033020 d.log
    5626152 d.ext
    7214296 k

    There are few ways in which you can achieve this and also you can assume some preconditions, but why would we do that, right? :)

    You can “slap” this in either HashMap or BTreeMap and call it a day. @@ -1270,7 +1270,7 @@ references are present) are checked dynamically.

    So in the end, if you wan to have Rc<RefCell<T>>.

    So, how are we going to represent the file system then? We will use an enumeration, hehe, which is an algebraic data type that can store some stuff in itself 😩

    -
    type FileHandle = Rc<RefCell<AocFile>>;

    #[derive(Debug)]
    enum AocFile {
    File(usize),
    Directory(BTreeMap<String, FileHandle>),
    }
    +
    type FileHandle = Rc<RefCell<AocFile>>;

    #[derive(Debug)]
    enum AocFile {
    File(usize),
    Directory(BTreeMap<String, FileHandle>),
    }

    Let's go over it! FileHandle represents dynamically allocated AocFile, not much to discuss. What does the #[derive(Debug)] do though? It lets us to print out the value of that enumeration, it's derived, so it's not as good as if we had @@ -1372,15 +1372,15 @@ problems in it. However the toolkit is questionable :/

    with rust-analyzer. Because of my choice of libraries, we will also introduce a .envrc file that can be used by direnv, which allows you to set specific environment variables when you enter a directory. In our case, we will use

    -
    # to show nice backtrace when using the color-eyre
    export RUST_BACKTRACE=1

    # to catch logs generated by tracing
    export RUST_LOG=trace
    +
    # to show nice backtrace when using the color-eyre
    export RUST_BACKTRACE=1

    # to catch logs generated by tracing
    export RUST_LOG=trace

    And for the one of the most obnoxious things ever, we will use a script to download the inputs instead of “clicking, opening and copying to a file1. There is no need to be fancy, so we will adjust Python script by Martin2.

    -
    #!/usr/bin/env python3

    import datetime
    import yaml
    import requests
    import sys


    def load_config():
    with open("env.yaml", "r") as f:
    js = yaml.load(f, Loader=yaml.Loader)
    return js["session"], js["year"]


    def get_input(session, year, day):
    return requests.get(
    f"https://adventofcode.com/{year}/day/{day}/input",
    cookies={"session": session},
    headers={
    "User-Agent": "{repo} by {mail}".format(
    repo="gitlab.com/mfocko/advent-of-code-2022",
    mail="me@mfocko.xyz",
    )
    },
    ).content.decode("utf-8")


    def main():
    day = datetime.datetime.now().day
    if len(sys.argv) == 2:
    day = sys.argv[1]

    session, year = load_config()
    problem_input = get_input(session, year, day)

    with open(f"./inputs/day{day:>02}.txt", "w") as f:
    f.write(problem_input)


    if __name__ == "__main__":
    main()
    +
    #!/usr/bin/env python3

    import datetime
    import yaml
    import requests
    import sys


    def load_config():
    with open("env.yaml", "r") as f:
    js = yaml.load(f, Loader=yaml.Loader)
    return js["session"], js["year"]


    def get_input(session, year, day):
    return requests.get(
    f"https://adventofcode.com/{year}/day/{day}/input",
    cookies={"session": session},
    headers={
    "User-Agent": "{repo} by {mail}".format(
    repo="gitlab.com/mfocko/advent-of-code-2022",
    mail="me@mfocko.xyz",
    )
    },
    ).content.decode("utf-8")


    def main():
    day = datetime.datetime.now().day
    if len(sys.argv) == 2:
    day = sys.argv[1]

    session, year = load_config()
    problem_input = get_input(session, year, day)

    with open(f"./inputs/day{day:>02}.txt", "w") as f:
    f.write(problem_input)


    if __name__ == "__main__":
    main()

    If the script is called without any arguments, it will deduce the day from the system, so we do not need to change the day every morning. It also requires a configuration file:

    -
    # env.yaml
    session: ‹your session cookie›
    year: 2022
    +
    # env.yaml
    session: ‹your session cookie›
    year: 2022

    Libraries

    Looking at the list of the libraries, I have chosen “a lot” of them. Let's walk through each of them.

    @@ -1409,7 +1409,7 @@ also we can follow KISS. I have 2 modules that my “library” exports parsing and one for 2D vector (that gets used quite often during Advent of Code).

    Key part is, of course, processing the input and my library exports following functions that get used a lot:

    -
    /// Reads file to the string.
    pub fn file_to_string<P: AsRef<Path>>(pathname: P) -> String;

    /// Reads file and returns it as a vector of characters.
    pub fn file_to_chars<P: AsRef<Path>>(pathname: P) -> Vec<char>;

    /// Reads file and returns a vector of parsed structures. Expects each structure
    /// on its own line in the file. And `T` needs to implement `FromStr` trait.
    pub fn file_to_structs<P: AsRef<Path>, T: FromStr>(pathname: P) -> Vec<T>
    where
    <T as FromStr>::Err: Debug;

    /// Converts iterator over strings to a vector of parsed structures. `T` needs
    /// to implement `FromStr` trait and its error must derive `Debug` trait.
    pub fn strings_to_structs<T: FromStr, U>(
    iter: impl Iterator<Item = U>
    ) -> Vec<T>
    where
    <T as std::str::FromStr>::Err: std::fmt::Debug,
    U: Deref<Target = str>;

    /// Reads file and returns it as a vector of its lines.
    pub fn file_to_lines<P: AsRef<Path>>(pathname: P) -> Vec<String>;
    +
    /// Reads file to the string.
    pub fn file_to_string<P: AsRef<Path>>(pathname: P) -> String;

    /// Reads file and returns it as a vector of characters.
    pub fn file_to_chars<P: AsRef<Path>>(pathname: P) -> Vec<char>;

    /// Reads file and returns a vector of parsed structures. Expects each structure
    /// on its own line in the file. And `T` needs to implement `FromStr` trait.
    pub fn file_to_structs<P: AsRef<Path>, T: FromStr>(pathname: P) -> Vec<T>
    where
    <T as FromStr>::Err: Debug;

    /// Converts iterator over strings to a vector of parsed structures. `T` needs
    /// to implement `FromStr` trait and its error must derive `Debug` trait.
    pub fn strings_to_structs<T: FromStr, U>(
    iter: impl Iterator<Item = U>
    ) -> Vec<T>
    where
    <T as std::str::FromStr>::Err: std::fmt::Debug,
    U: Deref<Target = str>;

    /// Reads file and returns it as a vector of its lines.
    pub fn file_to_lines<P: AsRef<Path>>(pathname: P) -> Vec<String>;

    As for the vector, I went with a rather simple implementation that allows only addition of the vectors for now and accessing the elements via functions x() and y(). Also the vector is generic, so we can use it with any numeric type we @@ -1418,36 +1418,36 @@ need.

    We can also prepare a template to quickly bootstrap each of the days. We know that each puzzle has 2 parts, which means that we can start with 2 functions that will solve them.

    -
    fn part1(input: &Input) -> Output {
    todo!()
    }

    fn part2(input: &Input) -> Output {
    todo!()
    }
    +
    fn part1(input: &Input) -> Output {
    todo!()
    }

    fn part2(input: &Input) -> Output {
    todo!()
    }

    Both functions take reference to the input and return some output (in majority of puzzles, it is the same type). todo!() can be used as a nice placeholder, it also causes a panic when reached and we could also provide some string with an explanation, e.g. todo!("part 1"). We have not given functions a specific type and to avoid as much copy-paste as possible, we will introduce type aliases.

    -
    type Input = String;
    type Output = i32;
    +
    type Input = String;
    type Output = i32;
    tip

    This allows us to quickly adjust the types only in one place without the need to do regex-replace or replace them manually.

    For each day we get a personalized input that is provided as a text file. Almost all the time, we would like to get some structured type out of that input, and therefore it makes sense to introduce a new function that will provide the parsing of the input.

    -
    fn parse_input(path: &str) -> Input {
    todo!()
    }
    +
    fn parse_input(path: &str) -> Input {
    todo!()
    }

    This “parser” will take a path to the file, just in case we would like to run the sample instead of input.

    OK, so now we can write a main function that will take all of the pieces and run them.

    -
    fn main() {
    let input = parse_input("inputs/dayXX.txt");

    println!("Part 1: {}", part_1(&input));
    println!("Part 2: {}", part_2(&input));
    }
    +
    fn main() {
    let input = parse_input("inputs/dayXX.txt");

    println!("Part 1: {}", part_1(&input));
    println!("Part 2: {}", part_2(&input));
    }

    This would definitely do :) But we have installed a few libraries and we want to use them. In this part we are going to utilize tracing (for tracing, duh…) and color-eyre (for better error reporting, e.g. from parsing).

    -
    fn main() -> Result<()> {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = parse_input("inputs/dayXX.txt");

    info!("Part 1: {}", part_1(&input));
    info!("Part 2: {}", part_2(&input));

    Ok(())
    }
    +
    fn main() -> Result<()> {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = parse_input("inputs/dayXX.txt");

    info!("Part 1: {}", part_1(&input));
    info!("Part 2: {}", part_2(&input));

    Ok(())
    }

    The first statement will set up tracing and configure it to print out the logs to terminal, based on the environment variable. We also change the formatting a bit, since we do not need all the fancy features of the logger. Pure initialization would get us logs like this:

    -
    2022-12-11T19:53:19.975343Z  INFO day01: Part 1: 0
    +
    2022-12-11T19:53:19.975343Z  INFO day01: Part 1: 0

    However after running that command, we will get the following:

    -
     INFO src/bin/day01.rs:35: Part 1: 0
    +
     INFO src/bin/day01.rs:35: Part 1: 0

    And the color_eyre::install()? is quite straightforward. We just initialize the error reporting by color eyre.

    caution

    Notice that we had to add Ok(()) to the end of the function and adjust the @@ -1455,7 +1455,7 @@ return type of the main to Result<()>. It is cau can be installed only once and therefore it can fail, that is how we got the ? at the end of the ::install which unwraps the »result« of the installation.

    Overall we will get to a template like this:

    -
    use aoc_2022::*;

    use color_eyre::eyre::Result;
    use tracing::info;
    use tracing_subscriber::EnvFilter;

    type Input = String;
    type Output = i32;

    fn parse_input(path: &str) -> Input {
    todo!()
    }

    fn part1(input: &Input) -> Output {
    todo!()
    }

    fn part2(input: &Input) -> Output {
    todo!()
    }

    fn main() -> Result<()> {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = parse_input("inputs/dayXX.txt");

    info!("Part 1: {}", part_1(&input));
    info!("Part 2: {}", part_2(&input));

    Ok(())
    }
    +
    use aoc_2022::*;

    use color_eyre::eyre::Result;
    use tracing::info;
    use tracing_subscriber::EnvFilter;

    type Input = String;
    type Output = i32;

    fn parse_input(path: &str) -> Input {
    todo!()
    }

    fn part1(input: &Input) -> Output {
    todo!()
    }

    fn part2(input: &Input) -> Output {
    todo!()
    }

    fn main() -> Result<()> {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = parse_input("inputs/dayXX.txt");

    info!("Part 1: {}", part_1(&input));
    info!("Part 2: {}", part_2(&input));

    Ok(())
    }

    Footnotes

    1. diff --git a/blog/feed.json b/blog/feed.json index 06f8f08..a6f59dc 100644 --- a/blog/feed.json +++ b/blog/feed.json @@ -6,7 +6,7 @@ "items": [ { "id": "https://blog.mfocko.xyz/blog/2023/08/02/copr", - "content_html": "

      When you decide to run Fedora on your VPS, you might get screwed over by using\nrandom repositories…

      \n

      When I “reserved” my VPS1 back in June '20, I slapped Fedora on it without\nthinking. I bet 99% of people would say that I'm crazy for doing such thing2,\nBUT I've been using Fedora on my PCs for some time already and it felt very\nstable and natural to just use, even for VPS.

      \n

      One of the first things I've done was setting up a mail server. You may guess\nwhat's the fun part about having a mail server… Yes, it's all the spam you\nreceive and only then you realize how much “crap” gets filtered on free mail\nservices. To battle this problem I chose to use\nrspamd that had CentOS support, but someone\nhad a Copr repository that I used to\ninstall it.

      \n

      How does Copr repositories work?

      \n

      If you have ever used Ubuntu, you might be familiar with the concept since it is\nvery close to PPAs.

      \n

      tl;dr of the whole process consists of

      \n
        \n
      1. enabling the Copr repository, and
      2. \n
      3. installing the desired package.
      4. \n
      \n

      So in shell you would do

      \n
      # dnf copr enable ‹copr-repository›
      # dnf install ‹package-from-the-repository›
      \n

      And… that's it! Nothing else needed! Simple, right? And literally same process\nas you would do for the PPA.

      \n
      AUR

      On the other hand, if you are familiar with the archLinux, you definitely know\nAUR and what it can do for you. Copr repository is pretty similar, but the\npackages are prebuilt in Copr and Copr repositories can carry the required\ndependencies for said packages, which simplifies the distribution, and can even\nhelp with installing singular packages (when you just need the dependency, not\neverything).

      \n

      My issue

      \n

      Now you might wonder how would I use it on my VPS. It's rather simple, once in\n6 months a new Fedora release comes out. And you need to upgrade to newer\nrelease… You don't need to do it right away and for such setup it probably isn't\neven recommended.

      \n
      tip

      Fedora releases are supported for a year, i.e. they live 6 months till the next\nrelease and then another 6 months till another release.

      Some people prefer to run one version “behind”. If you ever decide to run it on\nyour home server or in a similar setup, it might be a pretty good idea to\nfollow. I'm using the “latest greatest”, cause why not 😄

      One way or another, you still need to bump the release every six months, unless\nyou'd bump 2 releases at once every year, which would be a decision, since, at\nleast I, cannot see any benefits in it… You don't go for “stability”, cause once\na year you switch to the latest release and then, before you bump, you use one\nyear old software, so you're not even using the latest.

      \n

      Fast-forward 2 years in the future, new Fedora release came out (October '22)\nand I was doing an upgrade. Dependencies of the rspamd have been updated and\nrspamd builds in Copr have failed and no one fixed it. Cool, so now I can\nupgrade, but can either ignore the dependencies or uninstall the rspamd…

      \n

      How can Copr help?

      \n

      I have managed to find\nspecfile for the\nrspamd package that they use for CentOS. There were some files apart from the\nspecfile, so I had to make an SRPM locally and then… I just uploaded the SRPM\nto the Copr to\nbuild\nan RPM.

      \n

      I have switched the previous Copr repository for rspamd with my own and happily\nproceeded with the upgrade.

      \n

      Conclusion

      \n

      Copr is heavily used for testing builds on the upstream with\nPackit. However, as you can see, it is possible to use it\nvery well for packaging your own stuff and avoiding issues (such as the one\nI have described above), if need be.

      \n

      Footnotes

      \n
        \n
      1. \n

        vpsFree.cz

        \n
      2. \n
      3. \n

        Even though I've been running archLinux on some Raspberry Pi's and also\non one of my “home servers”, before getting the VPS. You could say I like\nto live on the edge…

        \n
      4. \n
      \n
      ", + "content_html": "

      When you decide to run Fedora on your VPS, you might get screwed over by using\nrandom repositories…

      \n

      When I “reserved” my VPS1 back in June '20, I slapped Fedora on it without\nthinking. I bet 99% of people would say that I'm crazy for doing such thing2,\nBUT I've been using Fedora on my PCs for some time already and it felt very\nstable and natural to just use, even for VPS.

      \n

      One of the first things I've done was setting up a mail server. You may guess\nwhat's the fun part about having a mail server… Yes, it's all the spam you\nreceive and only then you realize how much “crap” gets filtered on free mail\nservices. To battle this problem I chose to use\nrspamd that had CentOS support, but someone\nhad a Copr repository that I used to\ninstall it.

      \n

      How does Copr repositories work?

      \n

      If you have ever used Ubuntu, you might be familiar with the concept since it is\nvery close to PPAs.

      \n

      tl;dr of the whole process consists of

      \n
        \n
      1. enabling the Copr repository, and
      2. \n
      3. installing the desired package.
      4. \n
      \n

      So in shell you would do

      \n
      # dnf copr enable ‹copr-repository›
      # dnf install ‹package-from-the-repository›
      \n

      And… that's it! Nothing else needed! Simple, right? And literally same process\nas you would do for the PPA.

      \n
      AUR

      On the other hand, if you are familiar with the archLinux, you definitely know\nAUR and what it can do for you. Copr repository is pretty similar, but the\npackages are prebuilt in Copr and Copr repositories can carry the required\ndependencies for said packages, which simplifies the distribution, and can even\nhelp with installing singular packages (when you just need the dependency, not\neverything).

      \n

      My issue

      \n

      Now you might wonder how would I use it on my VPS. It's rather simple, once in\n6 months a new Fedora release comes out. And you need to upgrade to newer\nrelease… You don't need to do it right away and for such setup it probably isn't\neven recommended.

      \n
      tip

      Fedora releases are supported for a year, i.e. they live 6 months till the next\nrelease and then another 6 months till another release.

      Some people prefer to run one version “behind”. If you ever decide to run it on\nyour home server or in a similar setup, it might be a pretty good idea to\nfollow. I'm using the “latest greatest”, cause why not 😄

      One way or another, you still need to bump the release every six months, unless\nyou'd bump 2 releases at once every year, which would be a decision, since, at\nleast I, cannot see any benefits in it… You don't go for “stability”, cause once\na year you switch to the latest release and then, before you bump, you use one\nyear old software, so you're not even using the latest.

      \n

      Fast-forward 2 years in the future, new Fedora release came out (October '22)\nand I was doing an upgrade. Dependencies of the rspamd have been updated and\nrspamd builds in Copr have failed and no one fixed it. Cool, so now I can\nupgrade, but can either ignore the dependencies or uninstall the rspamd…

      \n

      How can Copr help?

      \n

      I have managed to find\nspecfile for the\nrspamd package that they use for CentOS. There were some files apart from the\nspecfile, so I had to make an SRPM locally and then… I just uploaded the SRPM\nto the Copr to\nbuild\nan RPM.

      \n

      I have switched the previous Copr repository for rspamd with my own and happily\nproceeded with the upgrade.

      \n

      Conclusion

      \n

      Copr is heavily used for testing builds on the upstream with\nPackit. However, as you can see, it is possible to use it\nvery well for packaging your own stuff and avoiding issues (such as the one\nI have described above), if need be.

      \n

      Footnotes

      \n
        \n
      1. \n

        vpsFree.cz

        \n
      2. \n
      3. \n

        Even though I've been running archLinux on some Raspberry Pi's and also\non one of my “home servers”, before getting the VPS. You could say I like\nto live on the edge…

        \n
      4. \n
      \n
      ", "url": "https://blog.mfocko.xyz/blog/2023/08/02/copr", "title": "How can Copr help with broken dependencies", "summary": "Copr comes to save you when maintainer doesn't care.", @@ -25,7 +25,7 @@ }, { "id": "https://blog.mfocko.xyz/blog/aoc-2022/4th-week", - "content_html": "

      Let's go through the fourth week of Advent of Code in Rust.

      \n

      Day 22: Monkey Map

      \n
      tl;dr

      Simulating a movement on a 2D map with given instructions. Map becomes a cube in\nthe 2nd part…

      \n
      Rant

      This was the most obnoxious problem of this year… and a lot of Rust issues have\nbeen hit.

      \n

      Solution

      \n

      It seems like a very simple problem to solve, but with very obnoxious changes in\nthe 2nd part and also it's relatively hard to decompose »properly«.

      \n

      Column iterator

      \n

      In the first part of the problem it was needed to know the boundaries of each\nrow and column, since I stored them in Vec<Vec<char>> and padded with spaces\nto ensure I have a rectangular 2D “array”. However when you wanted to go through\neach row and column to determine the boundaries, it was very easy to do for the\nrows (cause each row is a Vec element), but not for the columns, since they\nspan multiple rows.

      \n

      For this use case I have implemented my own column iterator:

      \n
      pub struct ColumnIterator<'a, T> {
      map: &'a [Vec<T>],
      column: usize,

      i: usize,
      }

      impl<'a, T> ColumnIterator<'a, T> {
      pub fn new(map: &'a [Vec<T>], column: usize) -> ColumnIterator<'a, T> {
      Self { map, column, i: 0 }
      }
      }

      impl<'a, T> Iterator for ColumnIterator<'a, T> {
      type Item = &'a T;

      fn next(&mut self) -> Option<Self::Item> {
      if self.i >= self.map.len() {
      return None;
      }

      self.i += 1;
      Some(&self.map[self.i - 1][self.column])
      }
      }
      \n

      Given this piece of an iterator, it is very easy to factor out the common\nfunctionality between the rows and columns into:

      \n
      let mut find_boundaries = |constructor: fn(usize) -> Orientation,
      iterator: &mut dyn Iterator<Item = &char>,
      upper_bound,
      i| {
      let mut first_non_empty = iterator.enumerate().skip_while(|&(_, &c)| c == ' ');
      let start = first_non_empty.next().unwrap().0 as isize;

      let mut last_non_empty = first_non_empty.skip_while(|&(_, &c)| c != ' ');
      let end = last_non_empty.next().unwrap_or((upper_bound, &'_')).0 as isize;

      boundaries.insert(constructor(i), start..end);
      };
      \n

      And then use it as such:

      \n
      // construct all horizontal boundaries
      (0..map.len()).for_each(|row| {
      find_boundaries(
      Orientation::horizontal,
      &mut map[row].iter(),
      map[row].len(),
      row,
      );
      });

      // construct all vertical boundaries
      (0..map[0].len()).for_each(|col| {
      find_boundaries(
      Orientation::vertical,
      &mut ColumnIterator::new(&map, col),
      map.len(),
      col,
      );
      });
      \n

      Walking around the map

      \n

      Once the 2nd part got introduced, you start to think about a way how not to\ncopy-paste a lot of stuff (I haven't avoided it anyways…). In this problem, I've\nchosen to introduce a trait (i.e. interface) for 2D and 3D walker.

      \n
      trait Wrap: Clone {
      type State;

      // simulation
      fn is_blocked(&self) -> bool;
      fn step(&mut self, steps: isize);
      fn turn_left(&mut self);
      fn turn_right(&mut self);

      // movement
      fn next(&self) -> (Self::State, Direction);

      // final answer
      fn answer(&self) -> Output;
      }
      \n

      Each walker maintains its own state and also provides the functions that are\nused during the simulation. The “promised” methods are separated into:

      \n
        \n
      • simulation-related: that are used during the simulation from the .fold()
      • \n
      • movement-related: just a one method that holds most of the logic differences\nbetween 2D and 3D
      • \n
      • final answer: which extracts the proof of solution from the\nimplementation-specific walker
      • \n
      \n

      Both 2D and 3D versions borrow the original input and therefore you must\nannotate the lifetime of it:

      \n
      struct Wrap2D<'a> {
      input: &'a Input,
      position: Position,
      direction: Direction,
      }
      impl<'a> Wrap2D<'a> {
      fn new(input: &'a Input) -> Wrap2D<'a> {
      // …
      \n

      Problems

      \n

      I have used a lot of closures for this problem and once I introduced a parameter\nthat was of unknown type (apart from the fact it implements a specific trait), I\ngot suggested a “fix” for the compilation error that resulted in something that\nwas not possible to parse, cause it, more than likely, violated the grammar.

      \n

      In a similar fashion, I have been suggested changes that led to a code that\ndidn't make sense by just looking at it (there was no need to try the changes),\nfor example one suggested change in the closure parameter caused disapperance of\nthe parameter name. 😄

      \n

      Clippy

      \n

      I have to admit that Clippy was rather helpful here, I'll include two examples\nof rather smart suggestions.

      \n

      When writing the parsing for this problem, the first thing I have spotted on the\nchar was the .is_digit() function that takes a radix as a parameter. Clippy\nnoticed that I use radix = 10 and suggested switching to .is_ascii_digit()\nthat does exactly the same thing:

      \n
      -                .take_while(|c| c.is_digit(10))
      + .take_while(|c| c.is_ascii_digit())
      \n

      Another useful suggestion appeared when working with the iterators and I wanted\nto get the nnn-th element from it. You know the .skip(), you know the\n.next(), just “slap” them together and we're done for 😁 Well, I got\nsuggested to use .nth() that does exactly the combination of the two mentioned\nmethods on iterators:

      \n
      -            match it.clone().skip(skip).next().unwrap() {
      + match it.clone().nth(skip).unwrap() {
      \n

      Day 23: Unstable Diffusion

      \n
      tl;dr

      Simulating movement of elves around with a set of specific rules.

      \n

      Solution

      \n

      There's not much to mention since it's just a cellular automaton simulation\n(even though the AoC rules for cellular automatons usually get out of hand\n😉).

      \n

      Although I had a need to determine boundaries of the elves' positions and ended\nup with a nasty DRY violation. Knowing that you you're looking for maximum and\nminimum that are, of course, exactly the same except for initial values and\ncomparators, it looks like a rather simple fix, but typing in Rust is something\nelse, right? In the end I settled for a function that computes both boundaries\nwithout any duplication while using a closure:

      \n
      fn get_bounds(positions: &Input) -> (Vector2D<isize>, Vector2D<isize>) {
      let f = |init, cmp: &dyn Fn(isize, isize) -> isize| {
      positions
      .iter()
      .fold(Vector2D::new(init, init), |acc, elf| {
      Vector2D::new(cmp(acc.x(), elf.x()), cmp(acc.y(), elf.y()))
      })
      };

      (f(isize::MAX, &min::<isize>), f(isize::MIN, &max::<isize>))
      }
      \n

      This function returns a pair of 2D vectors that represent opposite points of the\nbounding rectangle of all elves.

      \n

      You might ask why would we need a closure and the answer is that positions\ncannot be captured from within the nested function, only via closure. One more\nfun fact on top of that is the type of the comparator

      \n
      &dyn Fn(isize, isize) -> isize
      \n

      Once we remove the dyn keyword, compiler yells at us and also includes a way\nhow to get a more thorough explanation of the error by running

      \n

      $ rustc --explain E0782

      \n

      which shows us

      \n

      Trait objects must include the dyn keyword.

      \n

      Erroneous code example:

      \n
      trait Foo {}
      fn test(arg: Box<Foo>) {} // error!
      \n

      Trait objects are a way to call methods on types that are not known until\nruntime but conform to some trait.

      \n

      Trait objects should be formed with Box<dyn Foo>, but in the code above\ndyn is left off.

      \n

      This makes it harder to see that arg is a trait object and not a\nsimply a heap allocated type called Foo.

      \n

      To fix this issue, add dyn before the trait name.

      \n
      trait Foo {}
      fn test(arg: Box<dyn Foo>) {} // ok!
      \n

      This used to be allowed before edition 2021, but is now an error.

      \n
      Rant

      Not all of the explanations are helpful though, in some cases they might be even\nmore confusing than helpful, since they address very simple use cases.

      As you can see, even in this case there are two sides to the explanations:

        \n
      • it explains why you need to use dyn, but
      • \n
      • it still mentions that trait objects need to be heap-allocated via Box<T>\nthat, as you can see in my snippet, does not apply here 😄 IMO it's\ncaused by the fact that we are borrowing it and therefore we don't need to\ncare about the size or whereabouts of it.
      • \n
      \n
      C++ parallel

      If you dive into the explanation above, you can notice that the Box<dyn Trait>\npattern is very helpful for using types that are not known during compile-time.\nYou would use a very similar approach in C++ when parsing some data structure\nfrom input (let's say JSON for example).

      On the other hand, in this case, it doesn't really make much sense, cause you\ncan clearly see that the types are known during the compile-time, which in\nC++ could be easily resolved by templating the helper function.

      \n

      Day 24: Blizzard Basin

      \n
      tl;dr

      Navigating your way through a basin with series of blizzards that move around\nyou as you move.

      \n
      caution

      It's second to last day and I went “bonkers” on the Rust 😄 Proceed to\nread Solution part on your own risk.

      \n

      Solution

      \n

      You are given a map with blizzards all over the place and you're supposed to\nfind the minimum time it requires you to walk through the basin without getting\nin any of the blizzards.

      \n

      Breakdown

      \n

      Relatively simple, yet a bit annoying, approach can be taken. It's technically\na shortest-path algorithm implementation with some relaxation restrictions and\nbeing able to stay on one position for some time, so each vertex of the graph\nis determined by the position on the map and the timestamp. I have chosen to\nuse Vector3D<usize>, since x and y attributes can be used for the position\nand, well, let's use z for a timestamp, cause why not, right? 😉

      \n

      Evaluating the blizzards

      \n
      caution

      I think that this is the most perverted abuse of the traits in the whole 4 weeks\nof AoC in Rust…

      \n

      The blizzards move along their respective directions in time and loop around in\ntheir respective row/column. Each vertex holds position and time, so we can\njust index the basin with the vertex itself, right? Yes, we can 😈

      \n
      Fun fact

      While writing this part, I've recognized unnecessary verbosity in the code and\ncleaned it up a bit. The changed version is shown here and the original was just\nmore verbose.

      \n

      I'll skip the boring parts of checking bounds and entry/exit of the basin 😉\nWe can easily calculate positions of the blizzards using a modular arithmetics:

      \n
      impl Index<Position> for Basin {
      type Output = char;

      fn index(&self, index: Position) -> &Self::Output {
      // ‹skipped boring parts›

      // We need to account for the loops of the blizzards
      let width = self.cols - 2;
      let height = self.rows - 2;

      let blizzard_origin = |size, d, t, i| ((i - 1 + size + d * (t % size)) % size + 1) as usize;
      [
      (
      index.y() as usize,
      blizzard_origin(width, -1, index.z(), index.x()),
      '>',
      ),
      (
      index.y() as usize,
      blizzard_origin(width, 1, index.z(), index.x()),
      '<',
      ),
      (
      blizzard_origin(height, -1, index.z(), index.y()),
      index.x() as usize,
      'v',
      ),
      (
      blizzard_origin(height, 1, index.z(), index.y()),
      index.x() as usize,
      '^',
      ),
      ]
      .iter()
      .find_map(|&(y, x, direction)| {
      if self.map[y][x] == direction {
      Some(&self.map[y][x])
      } else {
      None
      }
      })
      .unwrap_or(&'.')
      }
      }
      \n

      As you can see, there is an expression for calculating the original position and\nit's used multiple times, so why not take it out to a lambda, right? 😉

      \n

      I couldn't get the rustfmt to format the for-loop nicely, so I've just\ndecided to go with iterating over an elements of a slice. I have used, once\nagain, a combination of two functions (find_map in this case) to do 2 things\nat once and at the end, if we haven't found any blizzard, we just return the\nempty space.

      \n

      I think it's a very nice (and naughty) way how to use the Index trait, don't\nyou think?

      \n

      Shortest-path algorithm

      \n

      For the shortest path you can choose and adjust any of the common shortest-path\nalgorithms, in my case, I have decided to use A* instead of Dijkstra's\nalgorithm, since it better reflects the cost function.

      \n
      Comparison of costs

      With the Dijkstra's algorithm I would proceed with the time attribute used as\na priority for the queue.

      Whereas with the A*, I have chosen to use both time and Manhattan distance\nthat promotes vertices closer to the exit and with a minimum time taken.

      \n

      Cost function is, of course, a closure 😉

      \n
      let cost = |p: Position| p.z() as usize + exit.y().abs_diff(p.y()) + exit.x().abs_diff(p.x());
      \n

      And also for checking the possible moves from the current vertex, I have\nimplemented, yet another, closure that yields an iterator with the next moves:

      \n
      let next_positions = |p| {
      [(0, 0, 1), (0, -1, 1), (0, 1, 1), (-1, 0, 1), (1, 0, 1)]
      .iter()
      .filter_map(move |&(x, y, t)| {
      let next_p = p + Vector3D::new(x, y, t);

      if basin[next_p] == '.' {
      Some(next_p)
      } else {
      None
      }
      })
      };
      \n

      Min-heap

      \n

      In this case I had a need to use the priority queue taking the elements with the\nlowest cost as the prioritized ones. Rust only offers you the BinaryHeap and\nthat is a max-heap. One of the ways how to achieve a min-heap is to put the\nelements in wrapped in a Reverse (as is even showed in the linked docs of\nthe BinaryHeap). However the wrapping affects the type of the heap and also\npopping the most prioritized elements yields values wrapped in the Reverse.

      \n

      For this purpose I have just taken the max-heap and wrapped it as a whole in a\nseparate structure providing just the desired methods:

      \n
      use std::cmp::{Ord, Reverse};
      use std::collections::BinaryHeap;

      pub struct MinHeap<T> {
      heap: BinaryHeap<Reverse<T>>,
      }

      impl<T: Ord> MinHeap<T> {
      pub fn new() -> MinHeap<T> {
      MinHeap {
      heap: BinaryHeap::new(),
      }
      }

      pub fn push(&mut self, item: T) {
      self.heap.push(Reverse(item))
      }

      pub fn pop(&mut self) -> Option<T> {
      self.heap.pop().map(|Reverse(x)| x)
      }
      }

      impl<T: Ord> Default for MinHeap<T> {
      fn default() -> Self {
      Self::new()
      }
      }
      \n

      Rest is just the algorithm implementation which is not that interesting.

      \n

      Day 25: Full of Hot Air

      \n
      tl;dr

      Playing around with a numbers in a special base.

      \n

      Getting flashbacks to the IB111 Foundations of Programming… Very nice “problem”\nwith a rather easy solution, as the last day always seems to be.

      \n

      Solution

      \n

      Implementing 2 functions, converting from the SNAFU base and back to the SNAFU\nbase representation. Let's do a bit more though! I have implemented two functions:

      \n
        \n
      • from_snafu
      • \n
      • to_snafu
      • \n
      \n

      Now it is apparent that all I do is number to string and string to number. Hmm…\nthat sounds familiar, doesn't it? Let's introduce a structure for the SNAFU numbers\nand implement the traits that we need.

      \n

      Let's start with a structure:

      \n
      #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
      struct SNAFU {
      value: i64,
      }
      \n

      Converting from &str

      \n

      We will start by implementing the FromStr trait that will help us parse our input.\nThis is rather simple, I can just take the from_snafu function, copy-paste it\ninto the from_str method and the number I get will be wrapped in Result and\nSNAFU structure.

      \n

      Converting to String

      \n

      This is more fun. In some cases you need to implement only one trait and others\nare automatically implemented using that one trait. In our case, if you look in\nthe documentation, you can see that ToString trait is automatically implemented\nfor any type that implements Display trait.

      \n

      Let's implement the Display trait then. We should be able to use the to_snafu\nfunction and just take the self.value from the SNAFU structure.

      \n

      And for the convenience of tests, we can also implement a rather simple From<i64>\ntrait for the SNAFU.

      \n

      Adjusting the code

      \n

      After those changes we need to adjust the code and tests.

      \n

      Parsing of the input is very easy, before we have used the lines, now we parse\neverything:

      \n
           fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
      - file_to_lines(pathname)
      + file_to_structs(pathname)
      }
      \n

      Part 1 needs to be adjusted a bit too:

      \n
           fn part_1(input: &Input) -> Output {
      - to_snafu(input.iter().map(|s| from_snafu(s)).sum())
      + SNAFU::from(input.iter().map(|s| s.value).sum::<i64>()).to_string()
      }
      \n

      You can also see that it simplifies the meaning a bit and it is more explicit than\nthe previous versions.

      \n

      And for the tests:

      \n
           #[test]
      fn test_from() {
      - for (n, s) in EXAMPLES.iter() {
      - assert_eq!(from_snafu(s), *n);
      + for (&n, s) in EXAMPLES.iter() {
      + assert_eq!(s.parse::<SNAFU>().unwrap().value, n);
      }
      }

      #[test]
      fn test_to() {
      - for (n, s) in EXAMPLES.iter() {
      - assert_eq!(to_snafu(*n), s.to_string());
      + for (&n, s) in EXAMPLES.iter() {
      + assert_eq!(SNAFU::from(n).to_string(), s.to_string());
      }
      \n

      Summary

      \n

      Let's wrap the whole thing up! Keeping in mind both AoC and the Rust…

      \n

      \"Finished

      \n

      Advent of Code

      \n

      This year was quite fun, even though most of the solutions and posts came in\nlater on (cough in '23 cough). Day 22 was the most obnoxious one… And also\nit feels like I used priority queues and tree data structures a lot 👀

      \n

      with Rust

      \n

      I must admit that a lot of compiler warnings and errors were very useful. Even\nthough I still found some instances where they didn't help at all or cause even\nworse issues than I had. Compilation times have been addressed with the caching.

      \n

      Building my first tree data structure in Rust has been a very “interesting”\njourney. Being able to write a more generic BFS algorithm that allows you to not\nduplicate code while still mantaining the desired functionality contributes to\na very readable code.

      \n

      I am definitely much more aware of the basic things that bloated Python is\nmissing, yet Rust has them…

      \n

      Using explicit types and writing down placeholder functions with todo!()\nmacros is very pleasant, since it allows you to easily navigate the type system\nduring the development when you don't even need to be sure how are you going to\nput the smaller pieces together.

      \n

      I have used a plethora of traits and also implemented some of them to either be\nidiomatic, or exploit the syntactic sugar they offer. Deriving the default trait\nimplementation is also very helpful in a lot of cases, e.g. debugging output,\ncopying, equality comparison, etc.

      \n

      I confess to touching more “cursed” parts of the Rust, such as macros to\ndeclutter the copy-paste for tests or writing my own structures that need to\ncarry a lifetime for their own fields.

      \n

      tl;dr Relatively pleasant language until you hit brick wall 😉

      \n
      \n

      See you next year! Maybe in Rust, maybe not 🙃

      ", + "content_html": "

      Let's go through the fourth week of Advent of Code in Rust.

      \n

      Day 22: Monkey Map

      \n
      tl;dr

      Simulating a movement on a 2D map with given instructions. Map becomes a cube in\nthe 2nd part…

      \n
      Rant

      This was the most obnoxious problem of this year… and a lot of Rust issues have\nbeen hit.

      \n

      Solution

      \n

      It seems like a very simple problem to solve, but with very obnoxious changes in\nthe 2nd part and also it's relatively hard to decompose »properly«.

      \n

      Column iterator

      \n

      In the first part of the problem it was needed to know the boundaries of each\nrow and column, since I stored them in Vec<Vec<char>> and padded with spaces\nto ensure I have a rectangular 2D “array”. However when you wanted to go through\neach row and column to determine the boundaries, it was very easy to do for the\nrows (cause each row is a Vec element), but not for the columns, since they\nspan multiple rows.

      \n

      For this use case I have implemented my own column iterator:

      \n
      pub struct ColumnIterator<'a, T> {
      map: &'a [Vec<T>],
      column: usize,

      i: usize,
      }

      impl<'a, T> ColumnIterator<'a, T> {
      pub fn new(map: &'a [Vec<T>], column: usize) -> ColumnIterator<'a, T> {
      Self { map, column, i: 0 }
      }
      }

      impl<'a, T> Iterator for ColumnIterator<'a, T> {
      type Item = &'a T;

      fn next(&mut self) -> Option<Self::Item> {
      if self.i >= self.map.len() {
      return None;
      }

      self.i += 1;
      Some(&self.map[self.i - 1][self.column])
      }
      }
      \n

      Given this piece of an iterator, it is very easy to factor out the common\nfunctionality between the rows and columns into:

      \n
      let mut find_boundaries = |constructor: fn(usize) -> Orientation,
      iterator: &mut dyn Iterator<Item = &char>,
      upper_bound,
      i| {
      let mut first_non_empty = iterator.enumerate().skip_while(|&(_, &c)| c == ' ');
      let start = first_non_empty.next().unwrap().0 as isize;

      let mut last_non_empty = first_non_empty.skip_while(|&(_, &c)| c != ' ');
      let end = last_non_empty.next().unwrap_or((upper_bound, &'_')).0 as isize;

      boundaries.insert(constructor(i), start..end);
      };
      \n

      And then use it as such:

      \n
      // construct all horizontal boundaries
      (0..map.len()).for_each(|row| {
      find_boundaries(
      Orientation::horizontal,
      &mut map[row].iter(),
      map[row].len(),
      row,
      );
      });

      // construct all vertical boundaries
      (0..map[0].len()).for_each(|col| {
      find_boundaries(
      Orientation::vertical,
      &mut ColumnIterator::new(&map, col),
      map.len(),
      col,
      );
      });
      \n

      Walking around the map

      \n

      Once the 2nd part got introduced, you start to think about a way how not to\ncopy-paste a lot of stuff (I haven't avoided it anyways…). In this problem, I've\nchosen to introduce a trait (i.e. interface) for 2D and 3D walker.

      \n
      trait Wrap: Clone {
      type State;

      // simulation
      fn is_blocked(&self) -> bool;
      fn step(&mut self, steps: isize);
      fn turn_left(&mut self);
      fn turn_right(&mut self);

      // movement
      fn next(&self) -> (Self::State, Direction);

      // final answer
      fn answer(&self) -> Output;
      }
      \n

      Each walker maintains its own state and also provides the functions that are\nused during the simulation. The “promised” methods are separated into:

      \n
        \n
      • simulation-related: that are used during the simulation from the .fold()
      • \n
      • movement-related: just a one method that holds most of the logic differences\nbetween 2D and 3D
      • \n
      • final answer: which extracts the proof of solution from the\nimplementation-specific walker
      • \n
      \n

      Both 2D and 3D versions borrow the original input and therefore you must\nannotate the lifetime of it:

      \n
      struct Wrap2D<'a> {
      input: &'a Input,
      position: Position,
      direction: Direction,
      }
      impl<'a> Wrap2D<'a> {
      fn new(input: &'a Input) -> Wrap2D<'a> {
      // …
      \n

      Problems

      \n

      I have used a lot of closures for this problem and once I introduced a parameter\nthat was of unknown type (apart from the fact it implements a specific trait), I\ngot suggested a “fix” for the compilation error that resulted in something that\nwas not possible to parse, cause it, more than likely, violated the grammar.

      \n

      In a similar fashion, I have been suggested changes that led to a code that\ndidn't make sense by just looking at it (there was no need to try the changes),\nfor example one suggested change in the closure parameter caused disapperance of\nthe parameter name. 😄

      \n

      Clippy

      \n

      I have to admit that Clippy was rather helpful here, I'll include two examples\nof rather smart suggestions.

      \n

      When writing the parsing for this problem, the first thing I have spotted on the\nchar was the .is_digit() function that takes a radix as a parameter. Clippy\nnoticed that I use radix = 10 and suggested switching to .is_ascii_digit()\nthat does exactly the same thing:

      \n
      -                .take_while(|c| c.is_digit(10))
      + .take_while(|c| c.is_ascii_digit())
      \n

      Another useful suggestion appeared when working with the iterators and I wanted\nto get the nnn-th element from it. You know the .skip(), you know the\n.next(), just “slap” them together and we're done for 😁 Well, I got\nsuggested to use .nth() that does exactly the combination of the two mentioned\nmethods on iterators:

      \n
      -            match it.clone().skip(skip).next().unwrap() {
      + match it.clone().nth(skip).unwrap() {
      \n

      Day 23: Unstable Diffusion

      \n
      tl;dr

      Simulating movement of elves around with a set of specific rules.

      \n

      Solution

      \n

      There's not much to mention since it's just a cellular automaton simulation\n(even though the AoC rules for cellular automatons usually get out of hand\n😉).

      \n

      Although I had a need to determine boundaries of the elves' positions and ended\nup with a nasty DRY violation. Knowing that you you're looking for maximum and\nminimum that are, of course, exactly the same except for initial values and\ncomparators, it looks like a rather simple fix, but typing in Rust is something\nelse, right? In the end I settled for a function that computes both boundaries\nwithout any duplication while using a closure:

      \n
      fn get_bounds(positions: &Input) -> (Vector2D<isize>, Vector2D<isize>) {
      let f = |init, cmp: &dyn Fn(isize, isize) -> isize| {
      positions
      .iter()
      .fold(Vector2D::new(init, init), |acc, elf| {
      Vector2D::new(cmp(acc.x(), elf.x()), cmp(acc.y(), elf.y()))
      })
      };

      (f(isize::MAX, &min::<isize>), f(isize::MIN, &max::<isize>))
      }
      \n

      This function returns a pair of 2D vectors that represent opposite points of the\nbounding rectangle of all elves.

      \n

      You might ask why would we need a closure and the answer is that positions\ncannot be captured from within the nested function, only via closure. One more\nfun fact on top of that is the type of the comparator

      \n
      &dyn Fn(isize, isize) -> isize
      \n

      Once we remove the dyn keyword, compiler yells at us and also includes a way\nhow to get a more thorough explanation of the error by running

      \n

      $ rustc --explain E0782

      \n

      which shows us

      \n

      Trait objects must include the dyn keyword.

      \n

      Erroneous code example:

      \n
      trait Foo {}
      fn test(arg: Box<Foo>) {} // error!
      \n

      Trait objects are a way to call methods on types that are not known until\nruntime but conform to some trait.

      \n

      Trait objects should be formed with Box<dyn Foo>, but in the code above\ndyn is left off.

      \n

      This makes it harder to see that arg is a trait object and not a\nsimply a heap allocated type called Foo.

      \n

      To fix this issue, add dyn before the trait name.

      \n
      trait Foo {}
      fn test(arg: Box<dyn Foo>) {} // ok!
      \n

      This used to be allowed before edition 2021, but is now an error.

      \n
      Rant

      Not all of the explanations are helpful though, in some cases they might be even\nmore confusing than helpful, since they address very simple use cases.

      As you can see, even in this case there are two sides to the explanations:

        \n
      • it explains why you need to use dyn, but
      • \n
      • it still mentions that trait objects need to be heap-allocated via Box<T>\nthat, as you can see in my snippet, does not apply here 😄 IMO it's\ncaused by the fact that we are borrowing it and therefore we don't need to\ncare about the size or whereabouts of it.
      • \n
      \n
      C++ parallel

      If you dive into the explanation above, you can notice that the Box<dyn Trait>\npattern is very helpful for using types that are not known during compile-time.\nYou would use a very similar approach in C++ when parsing some data structure\nfrom input (let's say JSON for example).

      On the other hand, in this case, it doesn't really make much sense, cause you\ncan clearly see that the types are known during the compile-time, which in\nC++ could be easily resolved by templating the helper function.

      \n

      Day 24: Blizzard Basin

      \n
      tl;dr

      Navigating your way through a basin with series of blizzards that move around\nyou as you move.

      \n
      caution

      It's second to last day and I went “bonkers” on the Rust 😄 Proceed to\nread Solution part on your own risk.

      \n

      Solution

      \n

      You are given a map with blizzards all over the place and you're supposed to\nfind the minimum time it requires you to walk through the basin without getting\nin any of the blizzards.

      \n

      Breakdown

      \n

      Relatively simple, yet a bit annoying, approach can be taken. It's technically\na shortest-path algorithm implementation with some relaxation restrictions and\nbeing able to stay on one position for some time, so each vertex of the graph\nis determined by the position on the map and the timestamp. I have chosen to\nuse Vector3D<usize>, since x and y attributes can be used for the position\nand, well, let's use z for a timestamp, cause why not, right? 😉

      \n

      Evaluating the blizzards

      \n
      caution

      I think that this is the most perverted abuse of the traits in the whole 4 weeks\nof AoC in Rust…

      \n

      The blizzards move along their respective directions in time and loop around in\ntheir respective row/column. Each vertex holds position and time, so we can\njust index the basin with the vertex itself, right? Yes, we can 😈

      \n
      Fun fact

      While writing this part, I've recognized unnecessary verbosity in the code and\ncleaned it up a bit. The changed version is shown here and the original was just\nmore verbose.

      \n

      I'll skip the boring parts of checking bounds and entry/exit of the basin 😉\nWe can easily calculate positions of the blizzards using a modular arithmetics:

      \n
      impl Index<Position> for Basin {
      type Output = char;

      fn index(&self, index: Position) -> &Self::Output {
      // ‹skipped boring parts›

      // We need to account for the loops of the blizzards
      let width = self.cols - 2;
      let height = self.rows - 2;

      let blizzard_origin = |size, d, t, i| ((i - 1 + size + d * (t % size)) % size + 1) as usize;
      [
      (
      index.y() as usize,
      blizzard_origin(width, -1, index.z(), index.x()),
      '>',
      ),
      (
      index.y() as usize,
      blizzard_origin(width, 1, index.z(), index.x()),
      '<',
      ),
      (
      blizzard_origin(height, -1, index.z(), index.y()),
      index.x() as usize,
      'v',
      ),
      (
      blizzard_origin(height, 1, index.z(), index.y()),
      index.x() as usize,
      '^',
      ),
      ]
      .iter()
      .find_map(|&(y, x, direction)| {
      if self.map[y][x] == direction {
      Some(&self.map[y][x])
      } else {
      None
      }
      })
      .unwrap_or(&'.')
      }
      }
      \n

      As you can see, there is an expression for calculating the original position and\nit's used multiple times, so why not take it out to a lambda, right? 😉

      \n

      I couldn't get the rustfmt to format the for-loop nicely, so I've just\ndecided to go with iterating over an elements of a slice. I have used, once\nagain, a combination of two functions (find_map in this case) to do 2 things\nat once and at the end, if we haven't found any blizzard, we just return the\nempty space.

      \n

      I think it's a very nice (and naughty) way how to use the Index trait, don't\nyou think?

      \n

      Shortest-path algorithm

      \n

      For the shortest path you can choose and adjust any of the common shortest-path\nalgorithms, in my case, I have decided to use A* instead of Dijkstra's\nalgorithm, since it better reflects the cost function.

      \n
      Comparison of costs

      With the Dijkstra's algorithm I would proceed with the time attribute used as\na priority for the queue.

      Whereas with the A*, I have chosen to use both time and Manhattan distance\nthat promotes vertices closer to the exit and with a minimum time taken.

      \n

      Cost function is, of course, a closure 😉

      \n
      let cost = |p: Position| p.z() as usize + exit.y().abs_diff(p.y()) + exit.x().abs_diff(p.x());
      \n

      And also for checking the possible moves from the current vertex, I have\nimplemented, yet another, closure that yields an iterator with the next moves:

      \n
      let next_positions = |p| {
      [(0, 0, 1), (0, -1, 1), (0, 1, 1), (-1, 0, 1), (1, 0, 1)]
      .iter()
      .filter_map(move |&(x, y, t)| {
      let next_p = p + Vector3D::new(x, y, t);

      if basin[next_p] == '.' {
      Some(next_p)
      } else {
      None
      }
      })
      };
      \n

      Min-heap

      \n

      In this case I had a need to use the priority queue taking the elements with the\nlowest cost as the prioritized ones. Rust only offers you the BinaryHeap and\nthat is a max-heap. One of the ways how to achieve a min-heap is to put the\nelements in wrapped in a Reverse (as is even showed in the linked docs of\nthe BinaryHeap). However the wrapping affects the type of the heap and also\npopping the most prioritized elements yields values wrapped in the Reverse.

      \n

      For this purpose I have just taken the max-heap and wrapped it as a whole in a\nseparate structure providing just the desired methods:

      \n
      use std::cmp::{Ord, Reverse};
      use std::collections::BinaryHeap;

      pub struct MinHeap<T> {
      heap: BinaryHeap<Reverse<T>>,
      }

      impl<T: Ord> MinHeap<T> {
      pub fn new() -> MinHeap<T> {
      MinHeap {
      heap: BinaryHeap::new(),
      }
      }

      pub fn push(&mut self, item: T) {
      self.heap.push(Reverse(item))
      }

      pub fn pop(&mut self) -> Option<T> {
      self.heap.pop().map(|Reverse(x)| x)
      }
      }

      impl<T: Ord> Default for MinHeap<T> {
      fn default() -> Self {
      Self::new()
      }
      }
      \n

      Rest is just the algorithm implementation which is not that interesting.

      \n

      Day 25: Full of Hot Air

      \n
      tl;dr

      Playing around with a numbers in a special base.

      \n

      Getting flashbacks to the IB111 Foundations of Programming… Very nice “problem”\nwith a rather easy solution, as the last day always seems to be.

      \n

      Solution

      \n

      Implementing 2 functions, converting from the SNAFU base and back to the SNAFU\nbase representation. Let's do a bit more though! I have implemented two functions:

      \n
        \n
      • from_snafu
      • \n
      • to_snafu
      • \n
      \n

      Now it is apparent that all I do is number to string and string to number. Hmm…\nthat sounds familiar, doesn't it? Let's introduce a structure for the SNAFU numbers\nand implement the traits that we need.

      \n

      Let's start with a structure:

      \n
      #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
      struct SNAFU {
      value: i64,
      }
      \n

      Converting from &str

      \n

      We will start by implementing the FromStr trait that will help us parse our input.\nThis is rather simple, I can just take the from_snafu function, copy-paste it\ninto the from_str method and the number I get will be wrapped in Result and\nSNAFU structure.

      \n

      Converting to String

      \n

      This is more fun. In some cases you need to implement only one trait and others\nare automatically implemented using that one trait. In our case, if you look in\nthe documentation, you can see that ToString trait is automatically implemented\nfor any type that implements Display trait.

      \n

      Let's implement the Display trait then. We should be able to use the to_snafu\nfunction and just take the self.value from the SNAFU structure.

      \n

      And for the convenience of tests, we can also implement a rather simple From<i64>\ntrait for the SNAFU.

      \n

      Adjusting the code

      \n

      After those changes we need to adjust the code and tests.

      \n

      Parsing of the input is very easy, before we have used the lines, now we parse\neverything:

      \n
           fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
      - file_to_lines(pathname)
      + file_to_structs(pathname)
      }
      \n

      Part 1 needs to be adjusted a bit too:

      \n
           fn part_1(input: &Input) -> Output {
      - to_snafu(input.iter().map(|s| from_snafu(s)).sum())
      + SNAFU::from(input.iter().map(|s| s.value).sum::<i64>()).to_string()
      }
      \n

      You can also see that it simplifies the meaning a bit and it is more explicit than\nthe previous versions.

      \n

      And for the tests:

      \n
           #[test]
      fn test_from() {
      - for (n, s) in EXAMPLES.iter() {
      - assert_eq!(from_snafu(s), *n);
      + for (&n, s) in EXAMPLES.iter() {
      + assert_eq!(s.parse::<SNAFU>().unwrap().value, n);
      }
      }

      #[test]
      fn test_to() {
      - for (n, s) in EXAMPLES.iter() {
      - assert_eq!(to_snafu(*n), s.to_string());
      + for (&n, s) in EXAMPLES.iter() {
      + assert_eq!(SNAFU::from(n).to_string(), s.to_string());
      }
      \n

      Summary

      \n

      Let's wrap the whole thing up! Keeping in mind both AoC and the Rust…

      \n

      \"Finished

      \n

      Advent of Code

      \n

      This year was quite fun, even though most of the solutions and posts came in\nlater on (cough in '23 cough). Day 22 was the most obnoxious one… And also\nit feels like I used priority queues and tree data structures a lot 👀

      \n

      with Rust

      \n

      I must admit that a lot of compiler warnings and errors were very useful. Even\nthough I still found some instances where they didn't help at all or cause even\nworse issues than I had. Compilation times have been addressed with the caching.

      \n

      Building my first tree data structure in Rust has been a very “interesting”\njourney. Being able to write a more generic BFS algorithm that allows you to not\nduplicate code while still mantaining the desired functionality contributes to\na very readable code.

      \n

      I am definitely much more aware of the basic things that bloated Python is\nmissing, yet Rust has them…

      \n

      Using explicit types and writing down placeholder functions with todo!()\nmacros is very pleasant, since it allows you to easily navigate the type system\nduring the development when you don't even need to be sure how are you going to\nput the smaller pieces together.

      \n

      I have used a plethora of traits and also implemented some of them to either be\nidiomatic, or exploit the syntactic sugar they offer. Deriving the default trait\nimplementation is also very helpful in a lot of cases, e.g. debugging output,\ncopying, equality comparison, etc.

      \n

      I confess to touching more “cursed” parts of the Rust, such as macros to\ndeclutter the copy-paste for tests or writing my own structures that need to\ncarry a lifetime for their own fields.

      \n

      tl;dr Relatively pleasant language until you hit brick wall 😉

      \n
      \n

      See you next year! Maybe in Rust, maybe not 🙃

      ", "url": "https://blog.mfocko.xyz/blog/aoc-2022/4th-week", "title": "4th week of Advent of Code '22 in Rust", "summary": "Surviving fourth week in Rust.", @@ -42,7 +42,7 @@ }, { "id": "https://blog.mfocko.xyz/blog/aoc-2022/3rd-week", - "content_html": "

      Let's go through the third week of Advent of Code in Rust.

      \n

      Day 15: Beacon Exclusion Zone

      \n
      tl;dr

      Triangulating a distress beacon based on the information from the sensors.

      \n

      Solution

      \n

      Relatively easy thing to implement, no major Rust issues hit.

      \n

      Day 16: Proboscidea Volcanium

      \n
      tl;dr

      Finding a max flow in a graph given some time constraints.

      \n

      Solution

      \n

      I have used some interesting things to implement this and make it easier for me.

      \n

      Indexing in graph

      \n

      I have come across a situation where I needed to keep more information regarding\nthe graph… In that case you can, of course, create a structure and keep it in,\nbut once you have multiple members in the structure it gets harder to work with\nsince you need to address the fields in the structure. When you work with graph,\nyou frequently need to access the vertices and in this case it felt a lot easier\nto implement the indexing in a graph, rather than explicitly access the\nunderlying data structure.

      \n

      Here you can see a rather short snippet from the solution that allows you to\n“index” the graph:

      \n
      impl Index<&str> for Graph {
      type Output = Vertex;

      fn index(&self, index: &str) -> &Self::Output {
      &self.g[index]
      }
      }
      \n

      Cartesian product

      \n

      During the implementation I had to utilize Floyd-Warshall algorithm for finding\nthe shortest path between pairs of vertices and utilized the iproduct! macro\nfrom the itertools. It is a very useful higher-order function that allows\nyou to keep the nesting of the loops at a minimum level while still maintaining\nthe same functionality.

      \n

      “Implementing” an iterator

      \n

      For the second part, you get to split the work between 2 actors. That way you\ncan achieve higher efficiency of the whole process that you're planning, but it\nalso makes it harder to evaluate algorithmically, since you need to check the\ndifferent ways the work can be split.

      \n

      Being affected by functional programming brain damage™️, I have chosen to\ndo this part by function that returns an iterator over the possible ways:

      \n
      fn pairings(
      valves: &BTreeSet<String>,
      ) -> impl Iterator<Item = (BTreeSet<String>, BTreeSet<String>)> + '_ {
      let mapping = valves.iter().collect_vec();

      let max_mask = 1 << (valves.len() - 1);

      (0..max_mask).map(move |mask| {
      let mut elephant = BTreeSet::new();
      let mut human = BTreeSet::new();

      for (i, &v) in mapping.iter().enumerate() {
      if (mask & (1 << i)) == 0 {
      human.insert(v.clone());
      } else {
      elephant.insert(v.clone());
      }
      }

      (human, elephant)
      })
      }
      \n

      Day 17: Pyroclastic Flow

      \n
      tl;dr

      Simulating an autonomous Tetris where pieces get affected by a series of jets of\nhot gas.

      \n

      Solution

      \n

      Similarly to the previous day I have created some iterators 😄

      \n

      Collision detection

      \n

      Once you need to check for collisions it is very helpful to be able to just\niterate through the positions that can actually collide with the wall or other\npiece.

      \n

      To get the desired behaviour, you can just compose few smaller functions:

      \n
      fn occupied(shape: &[Vec<char>]) -> impl Iterator<Item = Position> + '_ {
      shape.iter().enumerate().flat_map(|(y, row)| {
      row.iter().enumerate().filter_map(move |(x, c)| {
      if c == &'#' {
      Some(Vector2D::new(x as isize, y as isize))
      } else {
      None
      }
      })
      })
      }
      \n

      In the end, we get relative positions which we can adjust later when given the\nspecific positions from iterator. You can see some interesting parts in this:

      \n
        \n
      • .enumerate() allows us to get both the indices (coordinates) and the line\nor, later on, the character itself,
      • \n
      • .flat_map() flattens the iterator, i.e. when we return another iterator,\nthey just get chained instead of iterating over iterators (which sounds pretty\ndisturbing, doesn't it?),
      • \n
      • and finally .filter_map() which is pretty similar to the “basic” .map()\nwith a one, key, difference that it expects the items of an iterator to be\nmapped to an Option<T> from which it ignores nothing (as in None 😉)\nand also unwraps the values from Some(…).
      • \n
      \n

      Infinite iterator

      \n

      In the solution we cycle through both Tetris-like shapes that fall down and the\njets that move our pieces around. Initially I have implemented my own infinite\niterator that just yields the indices. It is a very simple, yet powerful, piece\nof code:

      \n
      struct InfiniteIndex {
      size: usize,
      i: usize,
      }

      impl InfiniteIndex {
      fn new(size: usize) -> InfiniteIndex {
      InfiniteIndex { size, i: size - 1 }
      }
      }

      impl Iterator for InfiniteIndex {
      type Item = usize;

      fn next(&mut self) -> Option<Self::Item> {
      self.i = (self.i + 1) % self.size;
      Some(self.i)
      }
      }
      \n

      However when I'm looking at the code now, it doesn't really make much sense…\nGuess what, we can use a built-in function that is implemented on iterators for\nthat! The function is called .cycle()

      \n

      On the other hand, I am not going to switch to that function, since it would\nintroduce an another myriad of issues caused by the fact that I create iterators\nright away in the constructor of my structure and the iterators would borrow\nboth the jets and shapes which would introduce a lifetime dependency into the\nstructure.

      \n

      Day 18: Boiling Boulders

      \n
      tl;dr

      Let's compute a surface area of some obsidian approximated via coordinates of\ncubes.

      \n

      Solution

      \n

      This day is kinda interesting, because it shows how easily you can complicate the\nproblem and also how much can you screw yourself over with the optimization and\n“smart” approach.

      \n

      For the first part you need to find the surface area of an obsidian that is\napproximated by cubes. Now, that is a very easy thing to do, just keep the track\nof already added cubes, and check if the newly added cube touches any face of any\nother cube. Simple, and with a BTreeSet relatively efficient way to do it.

      \n

      However the second part lets you on a secret that there may be some surface area\nfrom the “inside” too and you want to know only the one from the outside of the\nobsidian. I have seen some solutions later, but if you check your data, you might\nnotice that the bounding box of all the cubes isn't that big at all. Therefore I\nchose to pre-construct the box beforehand, fill in the cubes and then just run a\nBFS turning all the lava on the outside into the air. Now you just need to check\ncubes and count how many of their faces touch the air.

      \n

      Day 19: Not Enough Minerals

      \n
      tl;dr

      Finding out the best strategy for building robots to collect geodes.

      \n

      Solution

      \n

      Not much interesting stuff to mention apart from the suggestion to never believe\nthat the default implementation given by derive macro is what you want, it\ndoesn't have to be. 😄

      \n

      Day 20: Grove Positioning System

      \n
      tl;dr

      Shuffling around the circular linked list to find the coordinates.

      \n

      Now, small rant for this day is in place. They've never mentioned that coordinates\ncan repeat and therefore the values are non-unique. This is something that did\nnot happen in the given sample, but was present in the user input. It took »a lot«\nto realize that this is the issue.

      \n

      Solution

      \n

      I have tried implementing a circular linked list for this… and I have failed\nmiserably. To be fair, I still have no clue why. It was “fun” to play around with\nthe Rc<RefCell<T>>. In the end I failed on wrong answer. I have also encountered\na rather interesting issue with .borrow_mut() method being used on Rc<RefCell<T>>.

      \n

      .borrow_mut()

      \n

      Consider the following snippet of the code (taken from the documentation):

      \n
      use std::cell::{RefCell, RefMut};
      use std::collections::HashMap;
      use std::rc::Rc;
      // use std::borrow::BorrowMut;

      fn main() {
      let shared_map: Rc<RefCell<_>> = Rc::new(RefCell::new(HashMap::new()));
      // Create a new block to limit the scope of the dynamic borrow
      {
      let mut map: RefMut<_> = shared_map.borrow_mut();
      map.insert(\"africa\", 92388);
      map.insert(\"kyoto\", 11837);
      map.insert(\"piccadilly\", 11826);
      map.insert(\"marbles\", 38);
      }

      // Note that if we had not let the previous borrow of the cache fall out
      // of scope then the subsequent borrow would cause a dynamic thread panic.
      // This is the major hazard of using `RefCell`.
      let total: i32 = shared_map.borrow().values().sum();
      println!(\"{total}\");
      }
      \n

      We allocate a hash map on the heap and then in the inner block, we borrow it as\na mutable reference, so that we can use it.

      \n
      note

      It is a very primitive example for Rc<RefCell<T>> and mutable borrow.

      \n

      If you uncomment the 4th line with use std::borrow::BorrowMut;, you cannot\ncompile the code anymore, because of

      \n
         Compiling playground v0.0.1 (/playground)
      error[E0308]: mismatched types
      --> src/main.rs:10:34
      |
      10 | let mut map: RefMut<_> = shared_map.borrow_mut();
      | --------- ^^^^^^^^^^^^^^^^^^^^^^^ expected struct `RefMut`, found mutable reference
      | |
      | expected due to this
      |
      = note: expected struct `RefMut<'_, _>`
      found mutable reference `&mut Rc<RefCell<HashMap<_, _>>>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:11:13
      |
      11 | map.insert(\"africa\", 92388);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:12:13
      |
      12 | map.insert(\"kyoto\", 11837);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:13:13
      |
      13 | map.insert(\"piccadilly\", 11826);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:14:13
      |
      14 | map.insert(\"marbles\", 38);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      Some errors have detailed explanations: E0308, E0599.
      For more information about an error, try `rustc --explain E0308`.
      error: could not compile `playground` due to 5 previous errors
      \n

      It might seem a bit ridiculous. However, I got to a point where the compiler\nsuggested use std::borrow::BorrowMut; and it resulted in breaking parts of the\ncode that worked previously. I think it may be a good idea to go over what is\nhappening here.

      \n
      .borrow_mut() on Rc<RefCell<T>>
      \n

      Let's consider a variable x of type Rc<RefCell<T>>. What happens when you\ncall .borrow_mut() on it? We can look at the Rc type, and… hang on! There is\nneither .borrow_mut() method or BorrowMut trait implemented. How can we do it\nthen?

      \n

      Let's go further and we can see that RefCell<T> implements a .borrow_mut()\nmethod. OK, but how can we call it on the Rc<T>? Easily! Rc<T> implements\nDeref<T> and therefore you can call methods on Rc<T> objects as if they were\nT objects. If we read on Deref coercion, we can see the following:

      \n
      \n

      If T implements Deref<Target = U>, …:

      \n
        \n
      • \n
      • T implicitly implements all the (immutable) methods of the type U.
      • \n
      \n
      \n

      What is the requirement for the .borrow_mut() on RefCell<T>? Well, it needs\n&self, so the Deref implements the .borrow_mut() for the Rc<RefCell<T>>.

      \n
      BorrowMut trait
      \n

      I have not been able to find a lot on this trait. My guess is that it provides a\nmethod instead of a syntactic sugar (&mut x) for the mutable borrow. And also\nit provides default implementations for the types:

      \n
      impl BorrowMut<str> for String

      impl<T> BorrowMut<T> for &mut T
      where
      T: ?Sized,

      impl<T> BorrowMut<T> for T
      where
      T: ?Sized,

      impl<T, A> BorrowMut<[T]> for Vec<T, A>
      where
      A: Allocator,

      impl<T, A> BorrowMut<T> for Box<T, A>
      where
      A: Allocator,
      T: ?Sized,

      impl<T, const N: usize> BorrowMut<[T]> for [T; N]
      \n
      Conflict
      \n

      Now the question is why did it break the code… My first take was that the type\nRc<RefCell<T>> has some specialized implementation of the .borrow_mut() and\nthe use overrides it with the default, which is true in a sense. However\nthere is no specialized implementation. Let's have a look at the trait and the\ntype signature on the RefCell<T>:

      \n
      // trait
      pub trait BorrowMut<Borrowed>: Borrow<Borrowed>
      where
      Borrowed: ?Sized,
      {
      fn borrow_mut(&mut self) -> &mut Borrowed;
      }

      // ‹RefCell<T>.borrow_mut()› type signature
      pub fn borrow_mut(&self) -> RefMut<'_, T>
      \n

      I think that we can definitely agree on the fact that RefMut<'_, T> is not the\nRefCell<T>.

      \n

      In my opinion, RefCell<T> implements a separate .borrow_mut() rather\nthan implementing the interface, because it cannot satisfy the type requirements\nof the trait.

      \n
      caution

      I wonder how are we expected to deal with this conflict, if and when, we need\nboth the .borrow_mut() of the trait and .borrow_mut() of the RefCell<T>.

      \n
      Fun fact

      I was suggested by the compiler to do use std::borrow::BorrowMut; and break the\ncode.

      So much for the almighty and helpful compiler…

      \n

      Day 21: Monkey Math

      \n
      tl;dr

      Computing an expression tree and then also finding ideal value for a node.

      \n

      Solution

      \n

      Relatively simple, until you get to the 2nd part where you start to practice\na lot of the copy-paste. I have managed to sneak some perverted stuff in there\nthough :) Let's go through the details.

      \n

      Default trait

      \n

      For the first time and twice I had a need to have a default value for my types,\nenumerations in this case. Rust offers a very nice trait1 that is described\nas:

      \n
      \n

      A trait for giving a type a useful default value.

      \n
      \n

      I guess it sums it up nicely. The more interesting part about this is the fact\nthat you can use the macro machinery to save yourself some typing. If you have\nenumeration of which the default value doesn't bear any parameter, you can just\ndo2:

      \n
      #[derive(Default)]
      enum Color {
      #[default]
      White,
      Gray,
      Black,
      }
      \n

      Abusing negation

      \n

      If you want to use a unary minus operator on your own type, you can implement\na Neg trait3. I was dealing with a binary tree and needed a way how to look\nat the other side, so I have just implemented the negation for flipping between\nleft and right 😄

      \n

      Footnotes

      \n
        \n
      1. \n

        Default docs

        \n
      2. \n
      3. \n

        Pardon my example from the graph algorithms ;)

        \n
      4. \n
      5. \n

        Neg docs

        \n
      6. \n
      \n
      ", + "content_html": "

      Let's go through the third week of Advent of Code in Rust.

      \n

      Day 15: Beacon Exclusion Zone

      \n
      tl;dr

      Triangulating a distress beacon based on the information from the sensors.

      \n

      Solution

      \n

      Relatively easy thing to implement, no major Rust issues hit.

      \n

      Day 16: Proboscidea Volcanium

      \n
      tl;dr

      Finding a max flow in a graph given some time constraints.

      \n

      Solution

      \n

      I have used some interesting things to implement this and make it easier for me.

      \n

      Indexing in graph

      \n

      I have come across a situation where I needed to keep more information regarding\nthe graph… In that case you can, of course, create a structure and keep it in,\nbut once you have multiple members in the structure it gets harder to work with\nsince you need to address the fields in the structure. When you work with graph,\nyou frequently need to access the vertices and in this case it felt a lot easier\nto implement the indexing in a graph, rather than explicitly access the\nunderlying data structure.

      \n

      Here you can see a rather short snippet from the solution that allows you to\n“index” the graph:

      \n
      impl Index<&str> for Graph {
      type Output = Vertex;

      fn index(&self, index: &str) -> &Self::Output {
      &self.g[index]
      }
      }
      \n

      Cartesian product

      \n

      During the implementation I had to utilize Floyd-Warshall algorithm for finding\nthe shortest path between pairs of vertices and utilized the iproduct! macro\nfrom the itertools. It is a very useful higher-order function that allows\nyou to keep the nesting of the loops at a minimum level while still maintaining\nthe same functionality.

      \n

      “Implementing” an iterator

      \n

      For the second part, you get to split the work between 2 actors. That way you\ncan achieve higher efficiency of the whole process that you're planning, but it\nalso makes it harder to evaluate algorithmically, since you need to check the\ndifferent ways the work can be split.

      \n

      Being affected by functional programming brain damage™️, I have chosen to\ndo this part by function that returns an iterator over the possible ways:

      \n
      fn pairings(
      valves: &BTreeSet<String>,
      ) -> impl Iterator<Item = (BTreeSet<String>, BTreeSet<String>)> + '_ {
      let mapping = valves.iter().collect_vec();

      let max_mask = 1 << (valves.len() - 1);

      (0..max_mask).map(move |mask| {
      let mut elephant = BTreeSet::new();
      let mut human = BTreeSet::new();

      for (i, &v) in mapping.iter().enumerate() {
      if (mask & (1 << i)) == 0 {
      human.insert(v.clone());
      } else {
      elephant.insert(v.clone());
      }
      }

      (human, elephant)
      })
      }
      \n

      Day 17: Pyroclastic Flow

      \n
      tl;dr

      Simulating an autonomous Tetris where pieces get affected by a series of jets of\nhot gas.

      \n

      Solution

      \n

      Similarly to the previous day I have created some iterators 😄

      \n

      Collision detection

      \n

      Once you need to check for collisions it is very helpful to be able to just\niterate through the positions that can actually collide with the wall or other\npiece.

      \n

      To get the desired behaviour, you can just compose few smaller functions:

      \n
      fn occupied(shape: &[Vec<char>]) -> impl Iterator<Item = Position> + '_ {
      shape.iter().enumerate().flat_map(|(y, row)| {
      row.iter().enumerate().filter_map(move |(x, c)| {
      if c == &'#' {
      Some(Vector2D::new(x as isize, y as isize))
      } else {
      None
      }
      })
      })
      }
      \n

      In the end, we get relative positions which we can adjust later when given the\nspecific positions from iterator. You can see some interesting parts in this:

      \n
        \n
      • .enumerate() allows us to get both the indices (coordinates) and the line\nor, later on, the character itself,
      • \n
      • .flat_map() flattens the iterator, i.e. when we return another iterator,\nthey just get chained instead of iterating over iterators (which sounds pretty\ndisturbing, doesn't it?),
      • \n
      • and finally .filter_map() which is pretty similar to the “basic” .map()\nwith a one, key, difference that it expects the items of an iterator to be\nmapped to an Option<T> from which it ignores nothing (as in None 😉)\nand also unwraps the values from Some(…).
      • \n
      \n

      Infinite iterator

      \n

      In the solution we cycle through both Tetris-like shapes that fall down and the\njets that move our pieces around. Initially I have implemented my own infinite\niterator that just yields the indices. It is a very simple, yet powerful, piece\nof code:

      \n
      struct InfiniteIndex {
      size: usize,
      i: usize,
      }

      impl InfiniteIndex {
      fn new(size: usize) -> InfiniteIndex {
      InfiniteIndex { size, i: size - 1 }
      }
      }

      impl Iterator for InfiniteIndex {
      type Item = usize;

      fn next(&mut self) -> Option<Self::Item> {
      self.i = (self.i + 1) % self.size;
      Some(self.i)
      }
      }
      \n

      However when I'm looking at the code now, it doesn't really make much sense…\nGuess what, we can use a built-in function that is implemented on iterators for\nthat! The function is called .cycle()

      \n

      On the other hand, I am not going to switch to that function, since it would\nintroduce an another myriad of issues caused by the fact that I create iterators\nright away in the constructor of my structure and the iterators would borrow\nboth the jets and shapes which would introduce a lifetime dependency into the\nstructure.

      \n

      Day 18: Boiling Boulders

      \n
      tl;dr

      Let's compute a surface area of some obsidian approximated via coordinates of\ncubes.

      \n

      Solution

      \n

      This day is kinda interesting, because it shows how easily you can complicate the\nproblem and also how much can you screw yourself over with the optimization and\n“smart” approach.

      \n

      For the first part you need to find the surface area of an obsidian that is\napproximated by cubes. Now, that is a very easy thing to do, just keep the track\nof already added cubes, and check if the newly added cube touches any face of any\nother cube. Simple, and with a BTreeSet relatively efficient way to do it.

      \n

      However the second part lets you on a secret that there may be some surface area\nfrom the “inside” too and you want to know only the one from the outside of the\nobsidian. I have seen some solutions later, but if you check your data, you might\nnotice that the bounding box of all the cubes isn't that big at all. Therefore I\nchose to pre-construct the box beforehand, fill in the cubes and then just run a\nBFS turning all the lava on the outside into the air. Now you just need to check\ncubes and count how many of their faces touch the air.

      \n

      Day 19: Not Enough Minerals

      \n
      tl;dr

      Finding out the best strategy for building robots to collect geodes.

      \n

      Solution

      \n

      Not much interesting stuff to mention apart from the suggestion to never believe\nthat the default implementation given by derive macro is what you want, it\ndoesn't have to be. 😄

      \n

      Day 20: Grove Positioning System

      \n
      tl;dr

      Shuffling around the circular linked list to find the coordinates.

      \n

      Now, small rant for this day is in place. They've never mentioned that coordinates\ncan repeat and therefore the values are non-unique. This is something that did\nnot happen in the given sample, but was present in the user input. It took »a lot«\nto realize that this is the issue.

      \n

      Solution

      \n

      I have tried implementing a circular linked list for this… and I have failed\nmiserably. To be fair, I still have no clue why. It was “fun” to play around with\nthe Rc<RefCell<T>>. In the end I failed on wrong answer. I have also encountered\na rather interesting issue with .borrow_mut() method being used on Rc<RefCell<T>>.

      \n

      .borrow_mut()

      \n

      Consider the following snippet of the code (taken from the documentation):

      \n
      use std::cell::{RefCell, RefMut};
      use std::collections::HashMap;
      use std::rc::Rc;
      // use std::borrow::BorrowMut;

      fn main() {
      let shared_map: Rc<RefCell<_>> = Rc::new(RefCell::new(HashMap::new()));
      // Create a new block to limit the scope of the dynamic borrow
      {
      let mut map: RefMut<_> = shared_map.borrow_mut();
      map.insert(\"africa\", 92388);
      map.insert(\"kyoto\", 11837);
      map.insert(\"piccadilly\", 11826);
      map.insert(\"marbles\", 38);
      }

      // Note that if we had not let the previous borrow of the cache fall out
      // of scope then the subsequent borrow would cause a dynamic thread panic.
      // This is the major hazard of using `RefCell`.
      let total: i32 = shared_map.borrow().values().sum();
      println!(\"{total}\");
      }
      \n

      We allocate a hash map on the heap and then in the inner block, we borrow it as\na mutable reference, so that we can use it.

      \n
      note

      It is a very primitive example for Rc<RefCell<T>> and mutable borrow.

      \n

      If you uncomment the 4th line with use std::borrow::BorrowMut;, you cannot\ncompile the code anymore, because of

      \n
         Compiling playground v0.0.1 (/playground)
      error[E0308]: mismatched types
      --> src/main.rs:10:34
      |
      10 | let mut map: RefMut<_> = shared_map.borrow_mut();
      | --------- ^^^^^^^^^^^^^^^^^^^^^^^ expected struct `RefMut`, found mutable reference
      | |
      | expected due to this
      |
      = note: expected struct `RefMut<'_, _>`
      found mutable reference `&mut Rc<RefCell<HashMap<_, _>>>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:11:13
      |
      11 | map.insert(\"africa\", 92388);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:12:13
      |
      12 | map.insert(\"kyoto\", 11837);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:13:13
      |
      13 | map.insert(\"piccadilly\", 11826);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
      --> src/main.rs:14:13
      |
      14 | map.insert(\"marbles\", 38);
      | ^^^^^^ method not found in `RefMut<'_, _>`

      Some errors have detailed explanations: E0308, E0599.
      For more information about an error, try `rustc --explain E0308`.
      error: could not compile `playground` due to 5 previous errors
      \n

      It might seem a bit ridiculous. However, I got to a point where the compiler\nsuggested use std::borrow::BorrowMut; and it resulted in breaking parts of the\ncode that worked previously. I think it may be a good idea to go over what is\nhappening here.

      \n
      .borrow_mut() on Rc<RefCell<T>>
      \n

      Let's consider a variable x of type Rc<RefCell<T>>. What happens when you\ncall .borrow_mut() on it? We can look at the Rc type, and… hang on! There is\nneither .borrow_mut() method or BorrowMut trait implemented. How can we do it\nthen?

      \n

      Let's go further and we can see that RefCell<T> implements a .borrow_mut()\nmethod. OK, but how can we call it on the Rc<T>? Easily! Rc<T> implements\nDeref<T> and therefore you can call methods on Rc<T> objects as if they were\nT objects. If we read on Deref coercion, we can see the following:

      \n
      \n

      If T implements Deref<Target = U>, …:

      \n
        \n
      • \n
      • T implicitly implements all the (immutable) methods of the type U.
      • \n
      \n
      \n

      What is the requirement for the .borrow_mut() on RefCell<T>? Well, it needs\n&self, so the Deref implements the .borrow_mut() for the Rc<RefCell<T>>.

      \n
      BorrowMut trait
      \n

      I have not been able to find a lot on this trait. My guess is that it provides a\nmethod instead of a syntactic sugar (&mut x) for the mutable borrow. And also\nit provides default implementations for the types:

      \n
      impl BorrowMut<str> for String

      impl<T> BorrowMut<T> for &mut T
      where
      T: ?Sized,

      impl<T> BorrowMut<T> for T
      where
      T: ?Sized,

      impl<T, A> BorrowMut<[T]> for Vec<T, A>
      where
      A: Allocator,

      impl<T, A> BorrowMut<T> for Box<T, A>
      where
      A: Allocator,
      T: ?Sized,

      impl<T, const N: usize> BorrowMut<[T]> for [T; N]
      \n
      Conflict
      \n

      Now the question is why did it break the code… My first take was that the type\nRc<RefCell<T>> has some specialized implementation of the .borrow_mut() and\nthe use overrides it with the default, which is true in a sense. However\nthere is no specialized implementation. Let's have a look at the trait and the\ntype signature on the RefCell<T>:

      \n
      // trait
      pub trait BorrowMut<Borrowed>: Borrow<Borrowed>
      where
      Borrowed: ?Sized,
      {
      fn borrow_mut(&mut self) -> &mut Borrowed;
      }

      // ‹RefCell<T>.borrow_mut()› type signature
      pub fn borrow_mut(&self) -> RefMut<'_, T>
      \n

      I think that we can definitely agree on the fact that RefMut<'_, T> is not the\nRefCell<T>.

      \n

      In my opinion, RefCell<T> implements a separate .borrow_mut() rather\nthan implementing the interface, because it cannot satisfy the type requirements\nof the trait.

      \n
      caution

      I wonder how are we expected to deal with this conflict, if and when, we need\nboth the .borrow_mut() of the trait and .borrow_mut() of the RefCell<T>.

      \n
      Fun fact

      I was suggested by the compiler to do use std::borrow::BorrowMut; and break the\ncode.

      So much for the almighty and helpful compiler…

      \n

      Day 21: Monkey Math

      \n
      tl;dr

      Computing an expression tree and then also finding ideal value for a node.

      \n

      Solution

      \n

      Relatively simple, until you get to the 2nd part where you start to practice\na lot of the copy-paste. I have managed to sneak some perverted stuff in there\nthough :) Let's go through the details.

      \n

      Default trait

      \n

      For the first time and twice I had a need to have a default value for my types,\nenumerations in this case. Rust offers a very nice trait1 that is described\nas:

      \n
      \n

      A trait for giving a type a useful default value.

      \n
      \n

      I guess it sums it up nicely. The more interesting part about this is the fact\nthat you can use the macro machinery to save yourself some typing. If you have\nenumeration of which the default value doesn't bear any parameter, you can just\ndo2:

      \n
      #[derive(Default)]
      enum Color {
      #[default]
      White,
      Gray,
      Black,
      }
      \n

      Abusing negation

      \n

      If you want to use a unary minus operator on your own type, you can implement\na Neg trait3. I was dealing with a binary tree and needed a way how to look\nat the other side, so I have just implemented the negation for flipping between\nleft and right 😄

      \n

      Footnotes

      \n
        \n
      1. \n

        Default docs

        \n
      2. \n
      3. \n

        Pardon my example from the graph algorithms ;)

        \n
      4. \n
      5. \n

        Neg docs

        \n
      6. \n
      \n
      ", "url": "https://blog.mfocko.xyz/blog/aoc-2022/3rd-week", "title": "3rd week of Advent of Code '22 in Rust", "summary": "Surviving third week in Rust.", @@ -59,7 +59,7 @@ }, { "id": "https://blog.mfocko.xyz/blog/leetcode/sort-diagonally", - "content_html": "

      Let's try to solve one of the LeetCode challenges in easy and hard mode at the\nsame time.

      \n\n

      Problem description

      \n

      A matrix diagonal is a diagonal line of cells starting from some cell in\neither the topmost row or leftmost column and going in the bottom-right direction\nuntil reaching the matrix's end. For example, the matrix diagonal starting\nfrom mat[2][0], where mat is a 6 x 3 matrix, includes cells mat[2][0],\nmat[3][1], and mat[4][2].

      \n

      Given an m x n matrix mat of integers, sort each matrix diagonal in ascending\norder and return the resulting matrix.

      \n

      Example

      \n

      \"Image

      \n

      Skeleton and initial adjustments

      \n

      We are given the following skeleton for the C++ and the given challenge:

      \n
      class Solution {
      public:
      vector<vector<int>> diagonalSort(vector<vector<int>>& mat) {

      }
      };
      \n

      The task is to sort the passed matrix diagonally and then return it. First of all,\nI don't like to solve this in a web browser, so we'll need to adjust it accordingly\nfor running it locally. We'll start by including the vector header and using\nfully-qualified namespaces1 and also adding few tests:

      \n
      #include <cassert>
      #include <vector>

      using matrix = std::vector<std::vector<int>>;

      class Solution {
      public:
      matrix diagonalSort(matrix& mat)
      {
      }
      };

      static void test_case_1()
      {
      // Input: mat = [[3,3,1,1],[2,2,1,2],[1,1,1,2]]
      // Output: [[1,1,1,1],[1,2,2,2],[1,2,3,3]]

      Solution s;
      assert((s.diagonalSort(std::vector { std::vector { 3, 3, 1, 1 },
      std::vector { 2, 2, 1, 2 },
      std::vector { 1, 1, 1, 2 } })
      == std::vector { std::vector { 1, 1, 1, 1 },
      std::vector { 1, 2, 2, 2 },
      std::vector { 1, 2, 3, 3 } }));
      }

      static void test_case_2()
      {
      // Input: mat =
      // [[11,25,66,1,69,7],[23,55,17,45,15,52],[75,31,36,44,58,8],[22,27,33,25,68,4],[84,28,14,11,5,50]]
      // Output:
      // [[5,17,4,1,52,7],[11,11,25,45,8,69],[14,23,25,44,58,15],[22,27,31,36,50,66],[84,28,75,33,55,68]]

      Solution s;
      assert((s.diagonalSort(std::vector { std::vector { 11, 25, 66, 1, 69, 7 },
      std::vector { 23, 55, 17, 45, 15, 52 },
      std::vector { 75, 31, 36, 44, 58, 8 },
      std::vector { 22, 27, 33, 25, 68, 4 },
      std::vector { 84, 28, 14, 11, 5, 50 } })
      == std::vector { std::vector { 5, 17, 4, 1, 52, 7 },
      std::vector { 11, 11, 25, 45, 8, 69 },
      std::vector { 14, 23, 25, 44, 58, 15 },
      std::vector { 22, 27, 31, 36, 50, 66 },
      std::vector { 84, 28, 75, 33, 55, 68 } }));
      }

      int main()
      {
      test_case_1();
      test_case_2();

      return 0;
      }
      \n

      We need to return the matrix, but we're given a reference to the input matrix. We\ncan easily abuse the C++ here and just switch the reference to value, this way\nthe matrix will be copied when passed to the function, we can sort the copy and\njust return it back. And we also get yelled by the compiler for the fact that the\nmethod doesn't return anything yet, so to make it “shut up” we will just return\nthe input for now:

      \n
      -    matrix diagonalSort(matrix& mat)
      + matrix diagonalSort(matrix mat)
      {
      + return mat;
      }
      \n

      Now, we get the copy and we're good to go.

      \n

      Naïve solution

      \n

      As you may know, C++ offers a plethora of functions that can be used to your\nadvantage, given that you know how to “bend” the data structures accordingly.

      \n

      What does that mean for us? Well, we have an std::sort, we can use it, right?\nLet's have a look at it:

      \n
      template< class RandomIt >
      void sort( RandomIt first, RandomIt last );
      \n

      This overload is more than we need. What does it do? It just sorts the elements\nin the range [first, last) using operator< on them. We can't sort the whole\nmatrix using this, but… we can sort just »one« diagonal without doing much work\non our end.

      \n

      What is the RandomIt type though? If we look more into the documentation, we\ncan easily find the requirements for it and also learn that it's a random access\niterator and allows swapping its values at the same time.

      \n
      Random access iterator

      What is the random access iterator though? We can find it in a documentation\nand see the following description:

      \n

      A LegacyRandomAccessIterator is a LegacyBidirectionalIterator\nthat can be moved to point to any element in constant time.

      \n

      After that we can see all the requirements for it being listed. I don't feel like\nreading them right now, so we will just use it and see where the compilation blows\nup, i.e. “compiler-assisted development2 if you will ;)

      \n

      Now we know that we can use std::sort to sort the diagonal itself, but we also\nneed to get the diagonals somehow. I'm rather lazy, so I'll just delegate it to\nsomeone else3. And that way we get

      \n
      matrix diagonalSort(matrix mat)
      {
      // we iterate over the diagonals
      for (auto d : diagonals(mat)) {
      // and we sort each diagonal
      std::sort(d.begin(), d.end());
      }

      // we take the matrix by copy, so we can sort in-situ and return the copy
      // that we sorted
      return mat;
      }
      \n

      This solution looks very simple, doesn't it? Well, cause it is.\nLet's try compiling it:

      \n
      matrix-sort.cpp:11:23: error: use of undeclared identifier 'diagonals' [clang-diagnostic-error]
      for (auto d : diagonals(mat)) {
      ^
      Found compiler error(s).
      make: *** [makefile:14: tidy] Error 1
      \n

      OK, seems about right. We haven't implemented the diagonals yet. And based on\nwhat we've written so far, we need a function or a class diagonals that will\ngive us the diagonals we need.

      \n

      Implementing the diagonals

      \n

      Cool, so we need the function that will let us go through each and every diagonal\nin our matrix. We use the for-range loop, so whatever we get back from the\ndiagonals must support .begin() and .end(). Since I am a masochist, we will\ndo such functionality for a matrix of any type, not just the int from the challenge.

      \n

      As I said, we need to be able to

      \n
        \n
      • construct the object
      • \n
      • get the beginning
      • \n
      • get the end (the “sentinel”)
      • \n
      \n
      template <typename T>
      class diagonals {
      using matrix_t = std::vector<std::vector<T>>;

      matrix_t& _matrix;

      public:
      diagonals(matrix_t& m)
      : _matrix(m)
      {
      }
      diagonals_iter begin()
      {
      /* TODO */
      }
      diagonals_iter end()
      {
      /* TODO */
      }
      };
      \n

      Now we have a diagonals that we can use to go through the diagonals. We haven't\nimplemented the core of it yet. Let's go through what we have for now.

      \n

      We have a templated class with templated T that is used as a placeholder for any\ntype we would store in the matrix. Because I'm lazy, I have defined the matrix_t\ntype that is a “shortcut” for std::vector<std::vector<T>>, so I don't have to\ntype it out all the time. Of course, we need to store the matrix, we are given,\nas a private attribute. And then just have the constructor and the 2 methods we\nneed for the for-range.

      \n

      Iterating over diagonals

      \n

      Now that we have an object that will allow us to iterate through the diagonals,\nwe need to implement the iterating itself. We need to go through all of them, so\nwe have multiple options how to do so. I have decided to start from the “main”\ndiagonal that starts at (0, 0) index and then proceed with the diagonals starting\nin the first row, followed by the rest of the diagonals in the first column.

      \n

      We need to be able to tell that we've iterated through all of them, and also we\nneed to know which diagonal is next. For that purpose we will pass the indices\nof the first cell on the diagonal. That way we can always tell how to move forward.

      \n

      We will start by updating the begin and end to reflect our choice accordingly.

      \n
      diagonals_iter begin() { return diagonals_iter { _matrix, 0, 0 }; }
      diagonals_iter end() { return diagonals_iter { _matrix, 0, _matrix.size() }; }
      \n

      For the begin we return the first diagonal that starts at (0, 0). And because\nwe have decided to do the diagonals in the first column at the end, the first\ndiagonal that is not a valid one is the one at (0, height). Apart from the\nindices, we also need to pass reference to the matrix itself.

      \n
      note

      You may have noticed that we also include the diagonals that have length 1,\nspecifically the ones at (0, height - 1) and (width - 1, 0). We are implementing\nan iterator that should not care about the way it's being used. Therefore, we\ndon't care about the fact they don't need to be sorted.

      \n

      Cool, let's leave the iterator itself to someone else, right?4

      \n

      Implementing the iterator over diagonals

      \n

      We can start with a simple skeleton based on the information that we pass from\nthe diagonals. Also to utilize the matrix_t and also contain implementation\ndetails hidden away, we will put this code into the diagonals class.

      \n
      class diagonals_iter {
      matrix_t& m;
      std::size_t x;
      std::size_t y;

      public:
      diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }
      };
      \n

      In this case we will be implementing a “simple” forward iterator, so we don't\nneed to implement a lot. Notably it will be:

      \n
        \n
      • inequality operator (we need to know when we reach the end and have nothing to\niterate over)
      • \n
      • preincrementation operator (we need to be able to move around the iterable)
      • \n
      • dereference operator (we need to be able to retrieve the objects we iterate\nover)
      • \n
      \n
      class diagonals_iter {
      matrix_t& m;
      std::size_t x;
      std::size_t y;

      public:
      diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator!=(const diagonals_iter& rhs) const
      {
      // iterators are not equal if they reference different matrices, or
      // their positions differ
      return m != rhs.m || x != rhs.x || y != rhs.y;
      }

      diagonals_iter& operator++()
      {
      if (y != 0) {
      // iterating through diagonals down the first column
      y++;
      return *this;
      }

      // iterating the diagonals along the first row
      x++;
      if (x == m.front().size()) {
      // switching to diagonals in the first column
      x = 0;
      y++;
      }

      return *this;
      }

      diagonal<T> operator*() const { return diagonal { m, x, y }; }
      };
      \n

      Let's go one-by-one. Inequality operator is rather simple, just compare iterator's\nattributes field-by-field. If you think about it, checking inequality of two 2D\nvectors may be a bit inefficient, therefore, we can swap around and check it as\na last thing.

      \n
      -        return m != rhs.m || x != rhs.x || y != rhs.y;
      + return x != rhs.x || y != rhs.y || m != rhs.m;
      \n

      Preincrementation is where the magic happens. If you have a better look, you can\nsee two branches of this operation:

      \n
        \n
      1. When y != 0 (we're iterating over the diagonals in the first column)\nIn this case, we just bump the row and we're done.
      2. \n
      3. When y == 0 (we're iterating over the diagonals in the first row)\nIn this case, we bump the column and check if we haven't gotten out of bounds,\ni.e. the end of the first row. If we get out of the bounds, we're continuing\nwith the second diagonal in the first column.
      4. \n
      \n

      Dereferencing the iterator must “yield” something. In our case it will be the\ndiagonal that we want to sort. For sorting we need just the iterators that can\nmove around said diagonal. The simplest thing, we can do, is to delegate it to\nsomething else. In our case it will be a class called diagonal.

      \n

      Implementing the diagonal itself

      \n

      After implementing the iterator over diagonals, we know that all we need to describe\na diagonal is the matrix itself and the “start” of the diagonal (row and column).\nAnd we also know that the diagonal must provide some iterators for the std::sort\nfunction. We can start with the following skeleton:

      \n
      template <typename T>
      class diagonal {
      using matrix_t = std::vector<std::vector<T>>;

      matrix_t& matrix;
      std::size_t x;
      std::size_t y;

      public:
      diagonal(matrix_t& matrix, std::size_t x, std::size_t y)
      : matrix(matrix)
      , x(x)
      , y(y)
      {
      }

      diagonal_iter begin() const { return diagonal_iter { matrix, x, y }; }

      diagonal_iter end() const
      {
      auto max_x = matrix[y].size();
      auto max_y = matrix.size();

      // we need to find the distance in which we get out of bounds (either in
      // column or row)
      auto steps = std::min(max_x - x, max_y - y);

      return diagonal_iter { matrix, x + steps, y + steps };
      }
      };
      \n

      Initialization is rather simple, we just “keep” the stuff we get, begin is the\nsimplest, we just delegate.

      \n

      In case of the end, it gets more complicated. We need to know where is the “end”\nof the diagonal. Since end should point to the first element “after” the iterable,\nwe know that it's the first position of the iterator where either y becomes\nmatrix.size() or x becomes matrix[y].size(). Also we are moving along diagonal,\nduh, therefore we can deduce the first “position” afterwards by minimal amount of\nsteps to get out of the any column or row, hence std::min(max_x - x, max_y - y).\nFinal position is then computed simply by adding the steps to the beginning of\nthe diagonal.

      \n

      Now we just need to finish the iterator for the diagonal itself and we're done.

      \n

      Implementing diagonal_iter

      \n

      This part is the hardest from all we need to do. It's because of the requirements\nof the std::sort that requires us to implement a random access iterator. I have\nbriefly described it above, and “in a nutshell” it means that we need to implement\nan iterator that can move in constant time along the diagonal in any amount of\nsteps.

      \n

      Let's go through all of the functionality that our iterator needs to support to\nbe used in std::sort. We need the usual operations like:

      \n
        \n
      • equality/inequality
      • \n
      • incrementation
      • \n
      • dereferencing
      • \n
      \n

      We will also add all the types that our iterator uses with the category of the\niterator, i.e. what interface it supports:

      \n
      class diagonal_iter {
      // we need to keep reference to the matrix itself
      matrix_t& m;

      // we need to be able to tell our current position
      std::size_t x;
      std::size_t y;

      public:
      using difference_type = std::ptrdiff_t;
      using value_type = T;
      using pointer = T*;
      using reference = T&;
      using iterator_category = std::random_access_iterator_tag;

      diagonal_iter(matrix_t& matrix,
      std::size_t x,
      std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator==(const diagonal_iter& rhs) const
      {
      return x == rhs.x && y == rhs.y && m == rhs.m;
      }

      diagonal_iter& operator++()
      {
      // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
      // the same time
      x++;
      y++;
      return *this;
      }

      reference operator*() const { return m[y][x]; }
      };
      \n

      This is pretty similar to the previous iterator, but now we need to implement the\nremaining requirements of the random access iterator. Let's see what those are:

      \n
        \n
      • decrementation - cause we need to be able to move backwards too, since _random _\naccess iterator extends the interface of bidirectional iterator
      • \n
      • moving the iterator in either direction by steps given as an integer
      • \n
      • being able to tell the distance between two iterators
      • \n
      • define an ordering on the iterators
      • \n
      \n

      Let's fill them in:

      \n
      class diagonal_iter {
      // we need to keep reference to the matrix itself
      matrix_t& m;

      // we need to be able to tell our current position
      std::size_t x;
      std::size_t y;

      public:
      using difference_type = std::ptrdiff_t;
      using value_type = T;
      using pointer = T*;
      using reference = T&;
      using iterator_category = std::random_access_iterator_tag;

      diagonal_iter(matrix_t& matrix,
      std::size_t x,
      std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator==(const diagonal_iter& rhs) const
      {
      return x == rhs.x && y == rhs.y && m == rhs.m;
      }

      diagonal_iter& operator++()
      {
      // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
      // the same time
      x++;
      y++;
      return *this;
      }

      reference operator*() const { return m[y][x]; }

      // exactly opposite to the incrementation
      diagonal_iter operator--()
      {
      x--;
      y--;
      return *this;
      }

      // moving ‹n› steps back is same as calling decrementation ‹n›-times, so we
      // can just return a new iterator and subtract ‹n› from both coordinates in
      // the matrix
      diagonal_iter operator-(difference_type n) const
      {
      return diagonal_iter { m, x - n, y - n };
      }

      // here we assume that we are given two iterators on the same diagonal
      difference_type operator-(const diagonal_iter& rhs) const
      {
      assert(m == rhs.m);
      return x - rhs.x;
      }

      // counterpart of moving ‹n› steps backwards
      diagonal_iter operator+(difference_type n) const
      {
      return diagonal_iter { m, x + n, y + n };
      }

      // we compare the coordinates, and also assume that those 2 iterators are
      // lying on the same diagonal
      bool operator<(const diagonal_iter& rhs) const
      {
      assert(m == rhs.m);
      return x < rhs.x && y < rhs.y;
      }
      };
      \n

      At this point we could probably try and compile it, right? If we do so, we will\nget yelled at by a compiler for the following reasons:

      \n
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
      __last = __next;
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1817:11: note: in instantiation of function template specialization 'std::__unguarded_linear_insert<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Val_less_iter>' requested here
      std::__unguarded_linear_insert(__i,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1849:9: note: in instantiation of function template specialization 'std::__insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__insertion_sort(__first, __first + int(_S_threshold), __comp);
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__final_insertion_sort(__first, __last, __comp);
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
      ^
      matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
      std::sort(d.begin(), d.end());
      ^
      matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
      matrix_t& m;
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1830:2: error: no matching function for call to '__unguarded_linear_insert' [clang-diagnostic-error]
      std::__unguarded_linear_insert(__i,
      ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1850:9: note: in instantiation of function template specialization 'std::__unguarded_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__unguarded_insertion_sort(__first + int(_S_threshold), __last,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__final_insertion_sort(__first, __last, __comp);
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
      ^
      matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
      std::sort(d.begin(), d.end());
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1782:5: note: candidate template ignored: substitution failure [with _RandomAccessIterator = diagonal<int>::diagonal_iter, _Compare = __gnu_cxx::__ops::_Val_less_iter]
      __unguarded_linear_insert(_RandomAccessIterator __last,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1923:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
      __last = __cut;
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1937:9: note: in instantiation of function template specialization 'std::__introsort_loop<diagonal<int>::diagonal_iter, long, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__introsort_loop(__first, __last,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
      ^
      matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
      std::sort(d.begin(), d.end());
      ^
      matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
      matrix_t& m;
      ^
      \n

      That's a lot of noise, isn't it? Let's focus on the important parts:

      \n
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]

      matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
      matrix_t& m;
      ^
      \n

      Ah! We have a reference in our iterator, and this prevents us from having a copy\nassignment operator (that is used “somewhere” in the sorting algorithm). Well…\nLet's just wrap it!

      \n
      # we need to keep a different type than reference
      - matrix_t& m;
      + std::reference_wrapper<matrix_t> m;

      # in comparison we need to get the reference out of the wrapper first
      - return x == rhs.x && y == rhs.y && m == rhs.m;
      + return x == rhs.x && y == rhs.y && m.get() == rhs.m.get();

      # same when we return a reference to the “cell” in the matrix
      - reference operator*() const { return m[y][x]; }
      + reference operator*() const { return m.get()[y][x]; }

      # and finally in the assertions that we set for the “distance” and “less than”
      - assert(m == rhs.m);
      + assert(m.get() == rhs.m.get());
      \n

      We're done now! We have written an iterator over diagonals for a 2D vector. You can have a look at the final result here.

      \n

      Footnotes

      \n
        \n
      1. \n

        just because I'm used to it and don't care about your opinion ;)

        \n
      2. \n
      3. \n

        exercise at your own risk

        \n
      4. \n
      5. \n

        me in 5 minutes in fact, but don't make me scared

        \n
      6. \n
      7. \n

        me in the next section…

        \n
      8. \n
      \n
      ", + "content_html": "

      Let's try to solve one of the LeetCode challenges in easy and hard mode at the\nsame time.

      \n\n

      Problem description

      \n

      A matrix diagonal is a diagonal line of cells starting from some cell in\neither the topmost row or leftmost column and going in the bottom-right direction\nuntil reaching the matrix's end. For example, the matrix diagonal starting\nfrom mat[2][0], where mat is a 6 x 3 matrix, includes cells mat[2][0],\nmat[3][1], and mat[4][2].

      \n

      Given an m x n matrix mat of integers, sort each matrix diagonal in ascending\norder and return the resulting matrix.

      \n

      Example

      \n

      \"Image

      \n

      Skeleton and initial adjustments

      \n

      We are given the following skeleton for the C++ and the given challenge:

      \n
      class Solution {
      public:
      vector<vector<int>> diagonalSort(vector<vector<int>>& mat) {

      }
      };
      \n

      The task is to sort the passed matrix diagonally and then return it. First of all,\nI don't like to solve this in a web browser, so we'll need to adjust it accordingly\nfor running it locally. We'll start by including the vector header and using\nfully-qualified namespaces1 and also adding few tests:

      \n
      #include <cassert>
      #include <vector>

      using matrix = std::vector<std::vector<int>>;

      class Solution {
      public:
      matrix diagonalSort(matrix& mat)
      {
      }
      };

      static void test_case_1()
      {
      // Input: mat = [[3,3,1,1],[2,2,1,2],[1,1,1,2]]
      // Output: [[1,1,1,1],[1,2,2,2],[1,2,3,3]]

      Solution s;
      assert((s.diagonalSort(std::vector { std::vector { 3, 3, 1, 1 },
      std::vector { 2, 2, 1, 2 },
      std::vector { 1, 1, 1, 2 } })
      == std::vector { std::vector { 1, 1, 1, 1 },
      std::vector { 1, 2, 2, 2 },
      std::vector { 1, 2, 3, 3 } }));
      }

      static void test_case_2()
      {
      // Input: mat =
      // [[11,25,66,1,69,7],[23,55,17,45,15,52],[75,31,36,44,58,8],[22,27,33,25,68,4],[84,28,14,11,5,50]]
      // Output:
      // [[5,17,4,1,52,7],[11,11,25,45,8,69],[14,23,25,44,58,15],[22,27,31,36,50,66],[84,28,75,33,55,68]]

      Solution s;
      assert((s.diagonalSort(std::vector { std::vector { 11, 25, 66, 1, 69, 7 },
      std::vector { 23, 55, 17, 45, 15, 52 },
      std::vector { 75, 31, 36, 44, 58, 8 },
      std::vector { 22, 27, 33, 25, 68, 4 },
      std::vector { 84, 28, 14, 11, 5, 50 } })
      == std::vector { std::vector { 5, 17, 4, 1, 52, 7 },
      std::vector { 11, 11, 25, 45, 8, 69 },
      std::vector { 14, 23, 25, 44, 58, 15 },
      std::vector { 22, 27, 31, 36, 50, 66 },
      std::vector { 84, 28, 75, 33, 55, 68 } }));
      }

      int main()
      {
      test_case_1();
      test_case_2();

      return 0;
      }
      \n

      We need to return the matrix, but we're given a reference to the input matrix. We\ncan easily abuse the C++ here and just switch the reference to value, this way\nthe matrix will be copied when passed to the function, we can sort the copy and\njust return it back. And we also get yelled by the compiler for the fact that the\nmethod doesn't return anything yet, so to make it “shut up” we will just return\nthe input for now:

      \n
      -    matrix diagonalSort(matrix& mat)
      + matrix diagonalSort(matrix mat)
      {
      + return mat;
      }
      \n

      Now, we get the copy and we're good to go.

      \n

      Naïve solution

      \n

      As you may know, C++ offers a plethora of functions that can be used to your\nadvantage, given that you know how to “bend” the data structures accordingly.

      \n

      What does that mean for us? Well, we have an std::sort, we can use it, right?\nLet's have a look at it:

      \n
      template< class RandomIt >
      void sort( RandomIt first, RandomIt last );
      \n

      This overload is more than we need. What does it do? It just sorts the elements\nin the range [first, last) using operator< on them. We can't sort the whole\nmatrix using this, but… we can sort just »one« diagonal without doing much work\non our end.

      \n

      What is the RandomIt type though? If we look more into the documentation, we\ncan easily find the requirements for it and also learn that it's a random access\niterator and allows swapping its values at the same time.

      \n
      Random access iterator

      What is the random access iterator though? We can find it in a documentation\nand see the following description:

      \n

      A LegacyRandomAccessIterator is a LegacyBidirectionalIterator\nthat can be moved to point to any element in constant time.

      \n

      After that we can see all the requirements for it being listed. I don't feel like\nreading them right now, so we will just use it and see where the compilation blows\nup, i.e. “compiler-assisted development2 if you will ;)

      \n

      Now we know that we can use std::sort to sort the diagonal itself, but we also\nneed to get the diagonals somehow. I'm rather lazy, so I'll just delegate it to\nsomeone else3. And that way we get

      \n
      matrix diagonalSort(matrix mat)
      {
      // we iterate over the diagonals
      for (auto d : diagonals(mat)) {
      // and we sort each diagonal
      std::sort(d.begin(), d.end());
      }

      // we take the matrix by copy, so we can sort in-situ and return the copy
      // that we sorted
      return mat;
      }
      \n

      This solution looks very simple, doesn't it? Well, cause it is.\nLet's try compiling it:

      \n
      matrix-sort.cpp:11:23: error: use of undeclared identifier 'diagonals' [clang-diagnostic-error]
      for (auto d : diagonals(mat)) {
      ^
      Found compiler error(s).
      make: *** [makefile:14: tidy] Error 1
      \n

      OK, seems about right. We haven't implemented the diagonals yet. And based on\nwhat we've written so far, we need a function or a class diagonals that will\ngive us the diagonals we need.

      \n

      Implementing the diagonals

      \n

      Cool, so we need the function that will let us go through each and every diagonal\nin our matrix. We use the for-range loop, so whatever we get back from the\ndiagonals must support .begin() and .end(). Since I am a masochist, we will\ndo such functionality for a matrix of any type, not just the int from the challenge.

      \n

      As I said, we need to be able to

      \n
        \n
      • construct the object
      • \n
      • get the beginning
      • \n
      • get the end (the “sentinel”)
      • \n
      \n
      template <typename T>
      class diagonals {
      using matrix_t = std::vector<std::vector<T>>;

      matrix_t& _matrix;

      public:
      diagonals(matrix_t& m)
      : _matrix(m)
      {
      }
      diagonals_iter begin()
      {
      /* TODO */
      }
      diagonals_iter end()
      {
      /* TODO */
      }
      };
      \n

      Now we have a diagonals that we can use to go through the diagonals. We haven't\nimplemented the core of it yet. Let's go through what we have for now.

      \n

      We have a templated class with templated T that is used as a placeholder for any\ntype we would store in the matrix. Because I'm lazy, I have defined the matrix_t\ntype that is a “shortcut” for std::vector<std::vector<T>>, so I don't have to\ntype it out all the time. Of course, we need to store the matrix, we are given,\nas a private attribute. And then just have the constructor and the 2 methods we\nneed for the for-range.

      \n

      Iterating over diagonals

      \n

      Now that we have an object that will allow us to iterate through the diagonals,\nwe need to implement the iterating itself. We need to go through all of them, so\nwe have multiple options how to do so. I have decided to start from the “main”\ndiagonal that starts at (0, 0) index and then proceed with the diagonals starting\nin the first row, followed by the rest of the diagonals in the first column.

      \n

      We need to be able to tell that we've iterated through all of them, and also we\nneed to know which diagonal is next. For that purpose we will pass the indices\nof the first cell on the diagonal. That way we can always tell how to move forward.

      \n

      We will start by updating the begin and end to reflect our choice accordingly.

      \n
      diagonals_iter begin() { return diagonals_iter { _matrix, 0, 0 }; }
      diagonals_iter end() { return diagonals_iter { _matrix, 0, _matrix.size() }; }
      \n

      For the begin we return the first diagonal that starts at (0, 0). And because\nwe have decided to do the diagonals in the first column at the end, the first\ndiagonal that is not a valid one is the one at (0, height). Apart from the\nindices, we also need to pass reference to the matrix itself.

      \n
      note

      You may have noticed that we also include the diagonals that have length 1,\nspecifically the ones at (0, height - 1) and (width - 1, 0). We are implementing\nan iterator that should not care about the way it's being used. Therefore, we\ndon't care about the fact they don't need to be sorted.

      \n

      Cool, let's leave the iterator itself to someone else, right?4

      \n

      Implementing the iterator over diagonals

      \n

      We can start with a simple skeleton based on the information that we pass from\nthe diagonals. Also to utilize the matrix_t and also contain implementation\ndetails hidden away, we will put this code into the diagonals class.

      \n
      class diagonals_iter {
      matrix_t& m;
      std::size_t x;
      std::size_t y;

      public:
      diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }
      };
      \n

      In this case we will be implementing a “simple” forward iterator, so we don't\nneed to implement a lot. Notably it will be:

      \n
        \n
      • inequality operator (we need to know when we reach the end and have nothing to\niterate over)
      • \n
      • preincrementation operator (we need to be able to move around the iterable)
      • \n
      • dereference operator (we need to be able to retrieve the objects we iterate\nover)
      • \n
      \n
      class diagonals_iter {
      matrix_t& m;
      std::size_t x;
      std::size_t y;

      public:
      diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator!=(const diagonals_iter& rhs) const
      {
      // iterators are not equal if they reference different matrices, or
      // their positions differ
      return m != rhs.m || x != rhs.x || y != rhs.y;
      }

      diagonals_iter& operator++()
      {
      if (y != 0) {
      // iterating through diagonals down the first column
      y++;
      return *this;
      }

      // iterating the diagonals along the first row
      x++;
      if (x == m.front().size()) {
      // switching to diagonals in the first column
      x = 0;
      y++;
      }

      return *this;
      }

      diagonal<T> operator*() const { return diagonal { m, x, y }; }
      };
      \n

      Let's go one-by-one. Inequality operator is rather simple, just compare iterator's\nattributes field-by-field. If you think about it, checking inequality of two 2D\nvectors may be a bit inefficient, therefore, we can swap around and check it as\na last thing.

      \n
      -        return m != rhs.m || x != rhs.x || y != rhs.y;
      + return x != rhs.x || y != rhs.y || m != rhs.m;
      \n

      Preincrementation is where the magic happens. If you have a better look, you can\nsee two branches of this operation:

      \n
        \n
      1. When y != 0 (we're iterating over the diagonals in the first column)\nIn this case, we just bump the row and we're done.
      2. \n
      3. When y == 0 (we're iterating over the diagonals in the first row)\nIn this case, we bump the column and check if we haven't gotten out of bounds,\ni.e. the end of the first row. If we get out of the bounds, we're continuing\nwith the second diagonal in the first column.
      4. \n
      \n

      Dereferencing the iterator must “yield” something. In our case it will be the\ndiagonal that we want to sort. For sorting we need just the iterators that can\nmove around said diagonal. The simplest thing, we can do, is to delegate it to\nsomething else. In our case it will be a class called diagonal.

      \n

      Implementing the diagonal itself

      \n

      After implementing the iterator over diagonals, we know that all we need to describe\na diagonal is the matrix itself and the “start” of the diagonal (row and column).\nAnd we also know that the diagonal must provide some iterators for the std::sort\nfunction. We can start with the following skeleton:

      \n
      template <typename T>
      class diagonal {
      using matrix_t = std::vector<std::vector<T>>;

      matrix_t& matrix;
      std::size_t x;
      std::size_t y;

      public:
      diagonal(matrix_t& matrix, std::size_t x, std::size_t y)
      : matrix(matrix)
      , x(x)
      , y(y)
      {
      }

      diagonal_iter begin() const { return diagonal_iter { matrix, x, y }; }

      diagonal_iter end() const
      {
      auto max_x = matrix[y].size();
      auto max_y = matrix.size();

      // we need to find the distance in which we get out of bounds (either in
      // column or row)
      auto steps = std::min(max_x - x, max_y - y);

      return diagonal_iter { matrix, x + steps, y + steps };
      }
      };
      \n

      Initialization is rather simple, we just “keep” the stuff we get, begin is the\nsimplest, we just delegate.

      \n

      In case of the end, it gets more complicated. We need to know where is the “end”\nof the diagonal. Since end should point to the first element “after” the iterable,\nwe know that it's the first position of the iterator where either y becomes\nmatrix.size() or x becomes matrix[y].size(). Also we are moving along diagonal,\nduh, therefore we can deduce the first “position” afterwards by minimal amount of\nsteps to get out of the any column or row, hence std::min(max_x - x, max_y - y).\nFinal position is then computed simply by adding the steps to the beginning of\nthe diagonal.

      \n

      Now we just need to finish the iterator for the diagonal itself and we're done.

      \n

      Implementing diagonal_iter

      \n

      This part is the hardest from all we need to do. It's because of the requirements\nof the std::sort that requires us to implement a random access iterator. I have\nbriefly described it above, and “in a nutshell” it means that we need to implement\nan iterator that can move in constant time along the diagonal in any amount of\nsteps.

      \n

      Let's go through all of the functionality that our iterator needs to support to\nbe used in std::sort. We need the usual operations like:

      \n
        \n
      • equality/inequality
      • \n
      • incrementation
      • \n
      • dereferencing
      • \n
      \n

      We will also add all the types that our iterator uses with the category of the\niterator, i.e. what interface it supports:

      \n
      class diagonal_iter {
      // we need to keep reference to the matrix itself
      matrix_t& m;

      // we need to be able to tell our current position
      std::size_t x;
      std::size_t y;

      public:
      using difference_type = std::ptrdiff_t;
      using value_type = T;
      using pointer = T*;
      using reference = T&;
      using iterator_category = std::random_access_iterator_tag;

      diagonal_iter(matrix_t& matrix,
      std::size_t x,
      std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator==(const diagonal_iter& rhs) const
      {
      return x == rhs.x && y == rhs.y && m == rhs.m;
      }

      diagonal_iter& operator++()
      {
      // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
      // the same time
      x++;
      y++;
      return *this;
      }

      reference operator*() const { return m[y][x]; }
      };
      \n

      This is pretty similar to the previous iterator, but now we need to implement the\nremaining requirements of the random access iterator. Let's see what those are:

      \n
        \n
      • decrementation - cause we need to be able to move backwards too, since _random _\naccess iterator extends the interface of bidirectional iterator
      • \n
      • moving the iterator in either direction by steps given as an integer
      • \n
      • being able to tell the distance between two iterators
      • \n
      • define an ordering on the iterators
      • \n
      \n

      Let's fill them in:

      \n
      class diagonal_iter {
      // we need to keep reference to the matrix itself
      matrix_t& m;

      // we need to be able to tell our current position
      std::size_t x;
      std::size_t y;

      public:
      using difference_type = std::ptrdiff_t;
      using value_type = T;
      using pointer = T*;
      using reference = T&;
      using iterator_category = std::random_access_iterator_tag;

      diagonal_iter(matrix_t& matrix,
      std::size_t x,
      std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator==(const diagonal_iter& rhs) const
      {
      return x == rhs.x && y == rhs.y && m == rhs.m;
      }

      diagonal_iter& operator++()
      {
      // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
      // the same time
      x++;
      y++;
      return *this;
      }

      reference operator*() const { return m[y][x]; }

      // exactly opposite to the incrementation
      diagonal_iter operator--()
      {
      x--;
      y--;
      return *this;
      }

      // moving ‹n› steps back is same as calling decrementation ‹n›-times, so we
      // can just return a new iterator and subtract ‹n› from both coordinates in
      // the matrix
      diagonal_iter operator-(difference_type n) const
      {
      return diagonal_iter { m, x - n, y - n };
      }

      // here we assume that we are given two iterators on the same diagonal
      difference_type operator-(const diagonal_iter& rhs) const
      {
      assert(m == rhs.m);
      return x - rhs.x;
      }

      // counterpart of moving ‹n› steps backwards
      diagonal_iter operator+(difference_type n) const
      {
      return diagonal_iter { m, x + n, y + n };
      }

      // we compare the coordinates, and also assume that those 2 iterators are
      // lying on the same diagonal
      bool operator<(const diagonal_iter& rhs) const
      {
      assert(m == rhs.m);
      return x < rhs.x && y < rhs.y;
      }
      };
      \n

      At this point we could probably try and compile it, right? If we do so, we will\nget yelled at by a compiler for the following reasons:

      \n
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
      __last = __next;
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1817:11: note: in instantiation of function template specialization 'std::__unguarded_linear_insert<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Val_less_iter>' requested here
      std::__unguarded_linear_insert(__i,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1849:9: note: in instantiation of function template specialization 'std::__insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__insertion_sort(__first, __first + int(_S_threshold), __comp);
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__final_insertion_sort(__first, __last, __comp);
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
      ^
      matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
      std::sort(d.begin(), d.end());
      ^
      matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
      matrix_t& m;
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1830:2: error: no matching function for call to '__unguarded_linear_insert' [clang-diagnostic-error]
      std::__unguarded_linear_insert(__i,
      ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1850:9: note: in instantiation of function template specialization 'std::__unguarded_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__unguarded_insertion_sort(__first + int(_S_threshold), __last,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__final_insertion_sort(__first, __last, __comp);
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
      ^
      matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
      std::sort(d.begin(), d.end());
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1782:5: note: candidate template ignored: substitution failure [with _RandomAccessIterator = diagonal<int>::diagonal_iter, _Compare = __gnu_cxx::__ops::_Val_less_iter]
      __unguarded_linear_insert(_RandomAccessIterator __last,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1923:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
      __last = __cut;
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1937:9: note: in instantiation of function template specialization 'std::__introsort_loop<diagonal<int>::diagonal_iter, long, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__introsort_loop(__first, __last,
      ^
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
      std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
      ^
      matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
      std::sort(d.begin(), d.end());
      ^
      matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
      matrix_t& m;
      ^
      \n

      That's a lot of noise, isn't it? Let's focus on the important parts:

      \n
      /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]

      matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
      matrix_t& m;
      ^
      \n

      Ah! We have a reference in our iterator, and this prevents us from having a copy\nassignment operator (that is used “somewhere” in the sorting algorithm). Well…\nLet's just wrap it!

      \n
      # we need to keep a different type than reference
      - matrix_t& m;
      + std::reference_wrapper<matrix_t> m;

      # in comparison we need to get the reference out of the wrapper first
      - return x == rhs.x && y == rhs.y && m == rhs.m;
      + return x == rhs.x && y == rhs.y && m.get() == rhs.m.get();

      # same when we return a reference to the “cell” in the matrix
      - reference operator*() const { return m[y][x]; }
      + reference operator*() const { return m.get()[y][x]; }

      # and finally in the assertions that we set for the “distance” and “less than”
      - assert(m == rhs.m);
      + assert(m.get() == rhs.m.get());
      \n

      We're done now! We have written an iterator over diagonals for a 2D vector. You can have a look at the final result here.

      \n

      Footnotes

      \n
        \n
      1. \n

        just because I'm used to it and don't care about your opinion ;)

        \n
      2. \n
      3. \n

        exercise at your own risk

        \n
      4. \n
      5. \n

        me in 5 minutes in fact, but don't make me scared

        \n
      6. \n
      7. \n

        me in the next section…

        \n
      8. \n
      \n
      ", "url": "https://blog.mfocko.xyz/blog/leetcode/sort-diagonally", "title": "Sort the matrix diagonally", "summary": "Compiler assisted development.", @@ -76,7 +76,7 @@ }, { "id": "https://blog.mfocko.xyz/blog/aoc-2022/2nd-week", - "content_html": "

      Let's go through the second week of Advent of Code in Rust.

      \n

      Day 8: Treetop Tree House

      \n
      tl;dr

      We get a forest and we want to know how many trees are visible from the outside.\nApart from that we want to find the best view.

      \n

      Nothing interesting. We are moving around 2D map though. And indexing can get a\nbit painful when doing so, let's refactor it a bit ;) During the preparation for\nthe AoC, I have written Vector2D and now it's time to extend it with indexing\nof Vec of Vecs. In my solution I was manipulating with indices in the following\nway:

      \n
        \n
      • swapping them
      • \n
      • checking whether they are correct indices for the Vec<Vec<T>>
      • \n
      • indexing Vec<Vec<T>> with them
      • \n
      \n
      caution

      I'm getting familiar with Rust and starting to “abuse” it… While doing so, I'm\nalso uncovering some “features” that I don't really like. Therefore I will mark\nall of my rants with thicc «↯» mark and will try to “lock” them into their\nown “box of hell”.

      \n

      Swapping indices

      \n

      Relatively simple implementation, just take the values, swap them and return new\nvector.

      \n
      impl<T: Copy> Vector2D<T> {
      pub fn swap(&self) -> Self {
      Self {
      x: self.y,
      y: self.x,
      }
      }
      }
      \n

      Pretty straight-forward implementation, but let's talk about the T: Copy. We\nneed to use it, since we are returning a new vector, with swapped values.\nIf we had values that cannot be copied, the only thing we could do, would be a\nvector of references (and it would also introduce a lifetime, to which we'll get\nlater on). This is pretty similar with the operations on sets from the first week.

      \n

      Indexing Vec

      \n

      I will start with the indexing, cause bound-checking is a bit more… complicated\nthan I would like to.

      \n
      pub fn index<'a, T, U>(v: &'a [Vec<U>], idx: &Vector2D<T>) -> &'a U
      where
      usize: TryFrom<T>,
      <usize as TryFrom<T>>::Error: Debug,
      T: Copy,
      {
      let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
      &v[y][x]
      }
      \n

      Let's talk about this mess… Body of the function is probably the most easy part\nand should not be hard to understand, we just take the x and y and convert\nthem both to usize type that can be used later on for indexing.

      \n

      The type signature of the function is where the fun is at 😉 We are trying\nto convert unknown type to usize, so we must bound the T as a type that can\nbe converted to usize, that's how we got usize: TryFrom<T> which basically\nsays that usize must implement TryFrom<T> trait and therefore allows us to\nconvert the indices to actual usize indices. Using .unwrap() also forces us\nto bound the error that can occur when converting T into usize, that's how\nwe get <usize as TryFrom<T>>::Error: Debug which loosely means

      \n
      \n

      error during conversion of T into usize must implement Debug,\ni.e. can be printed in some way or other

      \n
      \n

      T: Copy is required by .try_into() which takes T by-value.

      \n

      And now we are left only with the first line of the definition.

      \n
      note

      Skilled Rustaceans might notice that this implementation is rather flaky and can\nbreak in multiple places at once. I'll get back to it…

      \n

      Let's split it in multiple parts:

      \n
        \n
      • v: &'a [Vec<U>] represents the 2D Vec, we are indexing, Vec implements\nSlice trait and clippy recommends using &[T] to &Vec<T>, exact details\nare unknown to me
      • \n
      • idx: &Vector2D<T> represents the indices which we use, we take them by\nreference to avoid an unnecessary copy
      • \n
      • -> &'a U means that we are returning a reference to some value of type U.\nNow the question is what does the 'a mean, we can also see it as a generic\ntype declared along T and U. And the answer is relatively simple, 'a\nrepresents a lifetime. We take the v by a reference and return a reference,\nborrow checker validates all of the borrows (or references), so we need to\nspecify that our returned value has the same lifetime as the vector we have\ntaken by a reference, i.e. returned reference must live at least as long as the\nv. This way we can “be sure” that the returned reference is valid.
      • \n
      \n
      Issues
      \n

      First issue that our implementation has is the fact that we cannot get a mutable\nreference out of that function. This could be easily resolved by introducing new\nfunction, e.g. index_mut. Which I have actually done while writing this part:

      \n
      pub fn index_mut<'a, T, U>(v: &'a mut [Vec<U>], idx: &Vector2D<T>) -> &'a mut U
      where
      usize: TryFrom<T>,
      <usize as TryFrom<T>>::Error: Debug,
      T: Copy,
      {
      let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
      &mut v[y][x]
      }
      \n
      «↯» Why can't we use one function?

      When we consider a Vec<T>, we don't need to consider containers as T, Rust\nimplements indexing as traits Index<T> and IndexMut<T> that do the dirty work\nbehind syntactic sugar of container[idx].

      However, implementing of traits is not allowed for external types, i.e. types\nthat you haven't defined yourself. This means that you can implement indexing\nover containers that you have implemented yourself, but you cannot use your own\ntypes for indexing “built-in” types.

      Another part of this rabbit hole is trait SliceIndex<T> that is of a relevance\nbecause of

      impl<T, I> Index<I> for [T]
      where
      I: SliceIndex<[T]>

      impl<T, I, A> Index<I> for Vec<T, A>
      where
      I: SliceIndex<[T]>,
      A: Allocator

      impl<T, I, const N: usize> Index<I> for [T; N]
      where
      [T]: Index<I>

      In other words, if your type implements SliceIndex<T> trait, it can be used\nfor indexing. As of now, this trait has all of its required methods experimental\nand is marked as unsafe.

      \n

      Another problem is a requirement for indexing either [Vec<T>] or Vec<Vec<T>>.\nThis requirement could be countered by removing inner type Vec<T> and constraining\nit by a trait Index (or IndexMut respectively) in a following way

      \n
      pub fn index<'a, C, T>(v: &'a [C], idx: &Vector2D<T>) -> &'a C::Output
      where
      usize: TryFrom<T>,
      <usize as TryFrom<T>>::Error: Debug,
      T: Copy,
      C: Index<usize>
      {
      let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
      &v[y][x]
      }
      \n

      Given this, we can also give a more meaningful typename for indexing type, such\nas I.

      \n

      Checking bounds

      \n

      Now we can get to the boundary checks, it is very similar, but a more… dirty.\nFirst approach that came up was to convert the indices in Vector2D to usize,\nbut when you add the indices up, e.g. when checking the neighbors, you can end\nup with negative values which, unlike in C++, causes an error (instead of underflow\nthat you can use to your advantage; you can easily guess how).

      \n

      So how can we approach this then? Well… we will convert the bounds instead of\nthe indices and that lead us to:

      \n
      pub fn in_range<T, U>(v: &[Vec<U>], idx: &Vector2D<T>) -> bool
      where
      usize: TryInto<T>,
      <usize as TryInto<T>>::Error: Debug,
      T: PartialOrd + Copy,
      {
      idx.y >= 0.try_into().unwrap()
      && idx.y < v.len().try_into().unwrap()
      && idx.x >= 0.try_into().unwrap()
      && idx.x
      < v[TryInto::<usize>::try_into(idx.y).unwrap()]
      .len()
      .try_into()
      .unwrap()
      }
      \n

      You can tell that it's definitely a shitty code. Let's improve it now! We will\nget back to the original idea, but do it better. We know that we cannot convert\nnegative values into usize, but we also know that conversion like that\nreturns a Result<T, E> which we can use to our advantage.

      \n
      pub fn in_range<T, U>(v: &[Vec<U>], idx: &Vector2D<T>) -> bool
      where
      T: Copy,
      usize: TryFrom<T>,
      {
      usize::try_from(idx.y)
      .and_then(|y| usize::try_from(idx.x).map(|x| y < v.len() && x < v[y].len()))
      .unwrap_or(false)
      }
      \n

      Result<T, E> is a type similar to Either in Haskell and it allows us to chain\nmultiple operations on correct results or propagate the original error without\ndoing anything. Let's dissect it one-by-one.

      \n

      try_from is a method implemented in TryFrom trait, that allows you to convert\ntypes and either successfully convert them or fail (with a reasonable error). This\nmethod returns Result<T, E>.

      \n

      We call and_then on that result, let's have a look at the type signature of\nand_then, IMO it explains more than enough:

      \n
      pub fn and_then<U, F>(self, op: F) -> Result<U, E>
      where
      F: FnOnce(T) -> Result<U, E>
      \n

      OK… So it takes the result and a function and returns another result with\ndifferent value and different error. However we can see that the function, which\nrepresents an operation on a result, takes just the value, i.e. it doesn't care\nabout any previous error. To make it short:

      \n
      \n

      and_then allows us to run an operation, which can fail, on the correct result

      \n
      \n

      We parsed a y index and now we try to convert the x index with try_from\nagain, but on that result we use map rather than and_then, why would that be?

      \n
      pub fn map<U, F>(self, op: F) -> Result<U, E>
      where
      F: FnOnce(T) -> U
      \n

      Huh… map performs an operation that cannot fail. And finally we use\nunwrap_or which takes the value from result, or in case of an error returns the\ndefault that we define.

      \n

      How does this work then? If y is negative, the conversion fails and the error\npropagates all the way to unwrap_or, if y can be a correct usize value, then\nwe do the same with x. If x is negative, we propagate the error as with y,\nand if it's not, then we check whether it exceeds the higher bounds or not.

      \n

      Solution

      \n

      Relatively simple, you just need follow the rules and not get too smart, otherwise\nit will get back at you.

      \n

      Day 9: Rope Bridge

      \n
      tl;dr

      We get a rope with knots and we want to track how many different positions are\nvisited with the rope's tail.

      \n

      By this day, I have come to a conclusion that current skeleton for each day\ngenerates a lot of boilerplate. And even though it can be easily copied, it's\njust a waste of space and unnecessary code. Let's “simplify” this (on one end\nwhile creating monster on the other end). I've gone through what we need in the\npreparations for the AoC. Let's sum up our requirements:

      \n
        \n
      • parsing
      • \n
      • part 1 & 2
      • \n
      • running on sample / input
      • \n
      • tests
      • \n
      \n

      Parsing and implementation of both parts is code that changes each day and we\ncannot do anything about it. However running and testing can be simplified!

      \n

      Let's introduce and export a new module solution that will take care of all of\nthis. We will start by introducing a trait for each day.

      \n
      pub trait Solution<Input, Output: Display> {
      fn parse_input<P: AsRef<Path>>(pathname: P) -> Input;

      fn part_1(input: &Input) -> Output;
      fn part_2(input: &Input) -> Output;
      }
      \n

      This does a lot of work for us already, we have defined a trait and for each day\nwe will create a structure representing a specific day. That structure will also\nimplement the Solution trait.

      \n

      Now we need to get rid of the boilerplate, we can't get rid of the main function,\nbut we can at least move out the functionality.

      \n
      fn run(type_of_input: &str) -> Result<()>
      where
      Self: Sized,
      {
      tracing_subscriber::fmt()
      .with_env_filter(EnvFilter::from_default_env())
      .with_target(false)
      .with_file(true)
      .with_line_number(true)
      .without_time()
      .compact()
      .init();
      color_eyre::install()?;

      let input = Self::parse_input(format!(\"{}s/{}.txt\", type_of_input, Self::day()));

      info!(\"Part 1: {}\", Self::part_1(&input));
      info!(\"Part 2: {}\", Self::part_2(&input));

      Ok(())
      }

      fn main() -> Result<()>
      where
      Self: Sized,
      {
      Self::run(\"input\")
      }
      \n

      This is all part of the Solution trait, which can implement methods while being\ndependent on what is provided by the implementing types. In this case, we just\nneed to bound the Output type to implement Display that is necessary for the\ninfo! and format string there.

      \n

      Now we can get to first of the nasty things we are going to do… And it is the\nday() method that you can see being used when constructing path to the input\nfile. That method will generate a name of the file, e.g. day01 and we know that\nwe can somehow deduce it from the structure name, given we name it reasonably.

      \n
      fn day() -> String {
      let mut day = String::from(type_name::<Self>().split(\"::\").next().unwrap());
      day.make_ascii_lowercase();

      day.to_string()
      }
      \n
      type_name

      This feature is still experimental and considered to be internal, it is not\nadvised to use it any production code.

      \n

      And now we can get to the nastiest stuff 😩 We will generate the tests!

      \n

      We want to be able to generate tests for sample input in a following way:

      \n
      test_sample!(day_01, Day01, 42, 69);
      \n

      There's not much we can do, so we will write a macro to generate the tests for us.

      \n
      #[macro_export]
      macro_rules! test_sample {
      ($mod_name:ident, $day_struct:tt, $part_1:expr, $part_2:expr) => {
      #[cfg(test)]
      mod $mod_name {
      use super::*;

      #[test]
      fn test_part_1() {
      let sample =
      $day_struct::parse_input(&format!(\"samples/{}.txt\", $day_struct::day()));
      assert_eq!($day_struct::part_1(&sample), $part_1);
      }

      #[test]
      fn test_part_2() {
      let sample =
      $day_struct::parse_input(&format!(\"samples/{}.txt\", $day_struct::day()));
      assert_eq!($day_struct::part_2(&sample), $part_2);
      }
      }
      };
      }
      \n

      We have used it in a similar way as macros in C/C++, one of the things that we\ncan use to our advantage is defining “type” of the parameters for the macro. All\nparameters have their name prefixed with $ sign and you can define various “forms”\nof your macro. Let's go through it!

      \n

      We have following parameters:

      \n
        \n
      • $mod_name which represents the name for the module with tests, it is typed\nwith ident which means that we want a valid identifier to be passed in.
      • \n
      • $day_struct represents the structure that will be used for tests, it is typed\nwith tt which represents a token tree, in our case it is a type.
      • \n
      • $part_X represents the expected output for the Xth part and is of type expr\nwhich literally means an expression.
      • \n
      \n

      Apart from that we need to use #[macro_export] to mark the macro as exported\nfor usage outside of the module. Now our skeleton looks like:

      \n
      use aoc_2022::*;

      type Input = String;
      type Output = String;

      struct DayXX;
      impl Solution<Input, Output> for DayXX {
      fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
      file_to_string(pathname)
      }

      fn part_1(input: &Input) -> Output {
      todo!()
      }

      fn part_2(input: &Input) -> Output {
      todo!()
      }
      }

      fn main() -> Result<()> {
      // DayXX::run(\"sample\")
      DayXX::main()
      }

      // test_sample!(day_XX, DayXX, , );
      \n

      Solution

      \n

      Not much to talk about, it is relatively easy to simulate.

      \n

      Day 10: Cathode-Ray Tube

      \n
      tl;dr

      Emulating basic arithmetic operations on a CPU and drawing on CRT based on the\nCPU's accumulator.

      \n

      In this day I have discovered an issue with my design of the Solution trait.\nAnd the issue is caused by different types of Output for the part 1 and part 2.

      \n

      Problem is relatively simple and consists of simulating a CPU, I have approached\nit in a following way:

      \n
      fn evaluate_instructions(instructions: &[Instruction], mut out: Output) -> Output {
      instructions
      .iter()
      .fold(State::new(), |state, instruction| {
      state.execute(instruction, &mut out)
      });

      out
      }
      \n

      We just take the instructions, we have some state of the CPU and we execute the\ninstructions one-by-one. Perfect usage of the fold (or reduce as you may know\nit from other languages).

      \n

      You can also see that we have an Output type, so the question is how can we fix\nthat problem. And the answer is very simple and functional. Rust allows you to\nhave an enumeration that can bear some other values apart from the type itself.

      \n
      tip

      We could've seen something like this with the Result<T, E> type that can be\ndefined as

      enum Result<T, E> {
      Ok(T),
      Err(E)
      }
      What does that mean though?

      When we have an Ok value, it has the result itself, and when we get an Err\nvalue, it has the error. This also allows us to handle results in a rather\npretty way:

      match do_something(x) {
      Ok(y) => {
      println!(\"SUCCESS: {}\", y);
      },
      Err(y) => {
      eprintln!(\"ERROR: {}\", y);
      }
      }
      \n

      My solution has a following outline:

      \n
      fn execute(&self, i: &Instruction, output: &mut Output) -> State {
      // execute the instruction

      // collect results if necessary
      match output {
      Output::Part1(x) => self.execute_part_1(y, x),
      Output::Part2(x) => self.execute_part_2(y, x),
      }

      // return the obtained state
      new_state
      }
      \n

      You might think that it's a perfectly reasonable thing to do. Yes, but notice\nthat the match statement doesn't collect the changes in any way and also we\npass output by &mut, so it is shared across each iteration of the fold.

      \n

      The dirty and ingenious thing is that xs are passed by &mut too and therefore\nthey are directly modified by the helper functions. To sum it up and let it sit

      \n
      \n

      We are collecting the result into an enumeration that is shared\nacross all iterations of fold.

      \n
      \n

      Solution

      \n

      Similar to Day 9, but there are some technical details that can get you.

      \n

      Day 11: Monkey in the Middle

      \n
      tl;dr

      Simulation of monkeys throwing stuff around and measuring your stress levels\nwhile your stuff is being passed around.

      \n

      I think I decided to use regular expressions here for the first time, cause\nparsing the input was a pain.

      \n

      Also I didn't expect to implement Euclidean algorithm in Rust…

      \n

      Solution

      \n

      Again, we're just running a simulation. Though I must admit it was very easy to\nmake a small technical mistakes that could affect the final results very late.

      \n

      Day 12: Hill Climbing Algorithm

      \n
      tl;dr

      Finding shortest path up the hill and also shortest path down to the ground while\nalso rolling down the hill…

      \n

      As I have said in the tl;dr, we are looking for the shortest path, but the start\nand goal differ for the part 1 and 2. So I have decided to refactor my solution\nto a BFS algorithm that takes necessary parameters via functions:

      \n
      fn bfs<F, G>(
      graph: &[Vec<char>], start: &Position, has_edge: F, is_target: G
      ) -> Option<usize>
      where
      F: Fn(&[Vec<char>], &Position, &Position) -> bool,
      G: Fn(&[Vec<char>], &Position) -> bool
      \n

      We pass the initial vertex from the caller and everything else is left to the BFS\nalgorithm, based on the has_edge and is_target functions.

      \n

      This was easy! And that is not very usual in Rust once you want to pass around\nfunctions. 👀

      \n

      Solution

      \n

      Looking for the shortest path… Must be Dijkstra, right? Nope! Half of the\nReddit got jebaited though. In all fairness, nothing stops you from implementing\nthe Dijkstra's algorithm for finding the shortest path, but if you know that\nall connected vertices are in a unit (actually d=1d = 1d=1) distance from each other,\nthen you know that running Dijkstra is equivalent to running BFS, only with worse\ntime complexity, because of the priority heap instead of the queue.

      \n

      Day 13: Distress Signal

      \n
      tl;dr

      Processing packets with structured data from the distress signal.

      \n

      You can implement a lot of traits if you want to. It is imperative to implement\nordering on the packets. I had a typo, so I also proceeded to implement a Display\ntrait for debugging purposes:

      \n
      impl Display for Packet {
      fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
      match self {
      Packet::Integer(x) => write!(f, \"{x}\"),
      Packet::List(lst) => write!(f, \"[{}]\", lst.iter().map(|p| format!(\"{p}\")).join(\",\")),
      }
      }
      }
      \n

      Solution

      \n

      A lot of technical details… Parsing is nasty too…

      \n

      Day 14: Regolith Reservoir

      \n
      tl;dr

      Let's simulate falling sand grain-by-grain.

      \n

      Again, both parts are relatively similar with minimal changes, so it is a good\nidea to refactor it a bit. Similar approach to the BFS above. Also this is the\nfirst day where I ran into efficiency issues and had to redo my solution to speed\nit up just a bit.

      \n

      Solution

      \n

      Tedious.

      \n

      Post Mortem

      \n

      Indexing

      \n

      I was asked about the indexing after publishing the blog. And truly it is rather\ncomplicated topic, especially after releasing SliceIndex<I> trait. I couldn't\nleave it be, so I tried to implement the Index and IndexMut trait.

      \n
      note

      I have also mentioned that the SliceIndex trait is unsafe, but truth be told,\nonly unsafe part are the 2 methods that are named *unchecked*. Anyways, I will\nbe implementing the Index* traits for now, rather than the SliceIndex.

      \n

      It's relatively straightforward…

      \n
      impl<I, C> Index<Vector2D<I>> for [C]
      where
      I: Copy + TryInto<usize>,
      <I as TryInto<usize>>::Error: Debug,
      C: Index<usize>,
      {
      type Output = C::Output;

      fn index(&self, index: Vector2D<I>) -> &Self::Output {
      let (x, y): (usize, usize) =
      (index.x.try_into().unwrap(), index.y.try_into().unwrap());
      &self[y][x]
      }
      }

      impl<I, C> IndexMut<Vector2D<I>> for [C]
      where
      I: Copy + TryInto<usize>,
      <I as TryInto<usize>>::Error: Debug,
      C: IndexMut<usize>,
      {
      fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
      let (x, y): (usize, usize) =
      (index.x.try_into().unwrap(), index.y.try_into().unwrap());
      &mut self[y][x]
      }
      }
      \n

      We can see a lot of similarities to the implementation of index and index_mut\nfunctions. In the end, they are 1:1, just wrapped in the trait that provides a\nsyntax sugar for container[idx].

      \n
      note

      I have also switched from using the TryFrom to TryInto trait, since it better\nmatches what we are using, the .try_into rather than usize::try_from.

      Also implementing TryFrom automatically provides you with a TryInto trait,\nsince it is relatively easy to implement. Just compare the following:

      pub trait TryFrom<T>: Sized {
      type Error;

      fn try_from(value: T) -> Result<Self, Self::Error>;
      }

      pub trait TryInto<T>: Sized {
      type Error;

      fn try_into(self) -> Result<T, Self::Error>;
      }
      \n

      OK, so we have our trait implemented, we should be able to use container[index],\nright? Yes… but actually no 😦

      \n
      error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
      --> src/bin/day08.rs:26:18
      |
      26 | if trees[pos] > tallest {
      | ^^^ slice indices are of type `usize` or ranges of `usize`
      |
      = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
      = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

      error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
      --> src/bin/day08.rs:30:28
      |
      30 | max(tallest, trees[pos])
      | ^^^ slice indices are of type `usize` or ranges of `usize`
      |
      = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
      = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

      error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<isize>`
      --> src/bin/day08.rs:52:28
      |
      52 | let max_height = trees[position];
      | ^^^^^^^^ slice indices are of type `usize` or ranges of `usize`
      |
      = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<isize>`
      = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<isize>>`
      \n

      Why? We have it implemented for the slices ([C]), why doesn't it work? Well,\nthe fun part consists of the fact that in other place, where we were using it,\nwe were passing the &[Vec<T>], but this is coming from a helper functions that\ntake &Vec<Vec<T>> instead. And… we don't implement Index and IndexMut for\nthose. Just for the slices. 🤯 What are we going to do about it?

      \n

      We can either start copy-pasting or be smarter about it… I choose to be smarter,\nso let's implement a macro! The only difference across the implementations are\nthe types of the outer containers. Implementation doesn't differ at all!

      \n

      Implementing the macro can be done in a following way:

      \n
      macro_rules! generate_indices {
      ($container:ty) => {
      impl<I, C> Index<Vector2D<I>> for $container
      where
      I: Copy + TryInto<usize>,
      <I as TryInto<usize>>::Error: Debug,
      C: Index<usize>,
      {
      type Output = C::Output;

      fn index(&self, index: Vector2D<I>) -> &Self::Output {
      let (x, y): (usize, usize) =
      (index.x.try_into().unwrap(), index.y.try_into().unwrap());
      &self[y][x]
      }
      }

      impl<I, C> IndexMut<Vector2D<I>> for $container
      where
      I: Copy + TryInto<usize>,
      <I as TryInto<usize>>::Error: Debug,
      C: IndexMut<usize>,
      {
      fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
      let (x, y): (usize, usize) =
      (index.x.try_into().unwrap(), index.y.try_into().unwrap());
      &mut self[y][x]
      }
      }
      };
      }
      \n

      And now we can simply do

      \n
      generate_indices!(VecDeque<C>);
      generate_indices!([C]);
      generate_indices!(Vec<C>);
      // generate_indices!([C; N], const N: usize);
      \n

      The last type (I took the inspiration from the implementations of the Index and\nIndexMut traits) is a bit problematic, because of the const N: usize part,\nwhich I haven't managed to be able to parse. And that's how I got rid of the error.

      \n
      note

      If I were to use 2D-indexing over [C; N] slices, I'd probably just go with the\ncopy-paste, cause the cost of this “monstrosity” outweighs the benefits of no DRY.

      \n

      Cause of the problem

      \n

      This issue is relatively funny. If you don't use any type aliases, just the raw\ntypes, you'll get suggested certain changes by the clippy. For example if you\nconsider the following piece of code

      \n
      fn get_sum(nums: &Vec<i32>) -> i32 {
      nums.iter().sum()
      }

      fn main() {
      let nums = vec![1, 2, 3];
      println!(\"Sum: {}\", get_sum(&nums));
      }
      \n

      and you run clippy on it, you will get

      \n
      Checking playground v0.0.1 (/playground)
      warning: writing `&Vec` instead of `&[_]` involves a new object where a slice will do
      --> src/main.rs:1:18
      |
      1 | fn get_sum(nums: &Vec<i32>) -> i32 {
      | ^^^^^^^^^ help: change this to: `&[i32]`
      |
      = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#ptr_arg
      = note: `#[warn(clippy::ptr_arg)]` on by default

      warning: `playground` (bin \"playground\") generated 1 warning
      Finished dev [unoptimized + debuginfo] target(s) in 0.61s
      \n

      However, if you introduce a type alias, such as

      \n
      type Numbers = Vec<i32>;
      \n

      Then clippy won't say anything, cause there is literally nothing to suggest.\nHowever the outcome is not the same…

      ", + "content_html": "

      Let's go through the second week of Advent of Code in Rust.

      \n

      Day 8: Treetop Tree House

      \n
      tl;dr

      We get a forest and we want to know how many trees are visible from the outside.\nApart from that we want to find the best view.

      \n

      Nothing interesting. We are moving around 2D map though. And indexing can get a\nbit painful when doing so, let's refactor it a bit ;) During the preparation for\nthe AoC, I have written Vector2D and now it's time to extend it with indexing\nof Vec of Vecs. In my solution I was manipulating with indices in the following\nway:

      \n
        \n
      • swapping them
      • \n
      • checking whether they are correct indices for the Vec<Vec<T>>
      • \n
      • indexing Vec<Vec<T>> with them
      • \n
      \n
      caution

      I'm getting familiar with Rust and starting to “abuse” it… While doing so, I'm\nalso uncovering some “features” that I don't really like. Therefore I will mark\nall of my rants with thicc «↯» mark and will try to “lock” them into their\nown “box of hell”.

      \n

      Swapping indices

      \n

      Relatively simple implementation, just take the values, swap them and return new\nvector.

      \n
      impl<T: Copy> Vector2D<T> {
      pub fn swap(&self) -> Self {
      Self {
      x: self.y,
      y: self.x,
      }
      }
      }
      \n

      Pretty straight-forward implementation, but let's talk about the T: Copy. We\nneed to use it, since we are returning a new vector, with swapped values.\nIf we had values that cannot be copied, the only thing we could do, would be a\nvector of references (and it would also introduce a lifetime, to which we'll get\nlater on). This is pretty similar with the operations on sets from the first week.

      \n

      Indexing Vec

      \n

      I will start with the indexing, cause bound-checking is a bit more… complicated\nthan I would like to.

      \n
      pub fn index<'a, T, U>(v: &'a [Vec<U>], idx: &Vector2D<T>) -> &'a U
      where
      usize: TryFrom<T>,
      <usize as TryFrom<T>>::Error: Debug,
      T: Copy,
      {
      let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
      &v[y][x]
      }
      \n

      Let's talk about this mess… Body of the function is probably the most easy part\nand should not be hard to understand, we just take the x and y and convert\nthem both to usize type that can be used later on for indexing.

      \n

      The type signature of the function is where the fun is at 😉 We are trying\nto convert unknown type to usize, so we must bound the T as a type that can\nbe converted to usize, that's how we got usize: TryFrom<T> which basically\nsays that usize must implement TryFrom<T> trait and therefore allows us to\nconvert the indices to actual usize indices. Using .unwrap() also forces us\nto bound the error that can occur when converting T into usize, that's how\nwe get <usize as TryFrom<T>>::Error: Debug which loosely means

      \n
      \n

      error during conversion of T into usize must implement Debug,\ni.e. can be printed in some way or other

      \n
      \n

      T: Copy is required by .try_into() which takes T by-value.

      \n

      And now we are left only with the first line of the definition.

      \n
      note

      Skilled Rustaceans might notice that this implementation is rather flaky and can\nbreak in multiple places at once. I'll get back to it…

      \n

      Let's split it in multiple parts:

      \n
        \n
      • v: &'a [Vec<U>] represents the 2D Vec, we are indexing, Vec implements\nSlice trait and clippy recommends using &[T] to &Vec<T>, exact details\nare unknown to me
      • \n
      • idx: &Vector2D<T> represents the indices which we use, we take them by\nreference to avoid an unnecessary copy
      • \n
      • -> &'a U means that we are returning a reference to some value of type U.\nNow the question is what does the 'a mean, we can also see it as a generic\ntype declared along T and U. And the answer is relatively simple, 'a\nrepresents a lifetime. We take the v by a reference and return a reference,\nborrow checker validates all of the borrows (or references), so we need to\nspecify that our returned value has the same lifetime as the vector we have\ntaken by a reference, i.e. returned reference must live at least as long as the\nv. This way we can “be sure” that the returned reference is valid.
      • \n
      \n
      Issues
      \n

      First issue that our implementation has is the fact that we cannot get a mutable\nreference out of that function. This could be easily resolved by introducing new\nfunction, e.g. index_mut. Which I have actually done while writing this part:

      \n
      pub fn index_mut<'a, T, U>(v: &'a mut [Vec<U>], idx: &Vector2D<T>) -> &'a mut U
      where
      usize: TryFrom<T>,
      <usize as TryFrom<T>>::Error: Debug,
      T: Copy,
      {
      let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
      &mut v[y][x]
      }
      \n
      «↯» Why can't we use one function?

      When we consider a Vec<T>, we don't need to consider containers as T, Rust\nimplements indexing as traits Index<T> and IndexMut<T> that do the dirty work\nbehind syntactic sugar of container[idx].

      However, implementing of traits is not allowed for external types, i.e. types\nthat you haven't defined yourself. This means that you can implement indexing\nover containers that you have implemented yourself, but you cannot use your own\ntypes for indexing “built-in” types.

      Another part of this rabbit hole is trait SliceIndex<T> that is of a relevance\nbecause of

      impl<T, I> Index<I> for [T]
      where
      I: SliceIndex<[T]>

      impl<T, I, A> Index<I> for Vec<T, A>
      where
      I: SliceIndex<[T]>,
      A: Allocator

      impl<T, I, const N: usize> Index<I> for [T; N]
      where
      [T]: Index<I>

      In other words, if your type implements SliceIndex<T> trait, it can be used\nfor indexing. As of now, this trait has all of its required methods experimental\nand is marked as unsafe.

      \n

      Another problem is a requirement for indexing either [Vec<T>] or Vec<Vec<T>>.\nThis requirement could be countered by removing inner type Vec<T> and constraining\nit by a trait Index (or IndexMut respectively) in a following way

      \n
      pub fn index<'a, C, T>(v: &'a [C], idx: &Vector2D<T>) -> &'a C::Output
      where
      usize: TryFrom<T>,
      <usize as TryFrom<T>>::Error: Debug,
      T: Copy,
      C: Index<usize>
      {
      let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
      &v[y][x]
      }
      \n

      Given this, we can also give a more meaningful typename for indexing type, such\nas I.

      \n

      Checking bounds

      \n

      Now we can get to the boundary checks, it is very similar, but a more… dirty.\nFirst approach that came up was to convert the indices in Vector2D to usize,\nbut when you add the indices up, e.g. when checking the neighbors, you can end\nup with negative values which, unlike in C++, causes an error (instead of underflow\nthat you can use to your advantage; you can easily guess how).

      \n

      So how can we approach this then? Well… we will convert the bounds instead of\nthe indices and that lead us to:

      \n
      pub fn in_range<T, U>(v: &[Vec<U>], idx: &Vector2D<T>) -> bool
      where
      usize: TryInto<T>,
      <usize as TryInto<T>>::Error: Debug,
      T: PartialOrd + Copy,
      {
      idx.y >= 0.try_into().unwrap()
      && idx.y < v.len().try_into().unwrap()
      && idx.x >= 0.try_into().unwrap()
      && idx.x
      < v[TryInto::<usize>::try_into(idx.y).unwrap()]
      .len()
      .try_into()
      .unwrap()
      }
      \n

      You can tell that it's definitely a shitty code. Let's improve it now! We will\nget back to the original idea, but do it better. We know that we cannot convert\nnegative values into usize, but we also know that conversion like that\nreturns a Result<T, E> which we can use to our advantage.

      \n
      pub fn in_range<T, U>(v: &[Vec<U>], idx: &Vector2D<T>) -> bool
      where
      T: Copy,
      usize: TryFrom<T>,
      {
      usize::try_from(idx.y)
      .and_then(|y| usize::try_from(idx.x).map(|x| y < v.len() && x < v[y].len()))
      .unwrap_or(false)
      }
      \n

      Result<T, E> is a type similar to Either in Haskell and it allows us to chain\nmultiple operations on correct results or propagate the original error without\ndoing anything. Let's dissect it one-by-one.

      \n

      try_from is a method implemented in TryFrom trait, that allows you to convert\ntypes and either successfully convert them or fail (with a reasonable error). This\nmethod returns Result<T, E>.

      \n

      We call and_then on that result, let's have a look at the type signature of\nand_then, IMO it explains more than enough:

      \n
      pub fn and_then<U, F>(self, op: F) -> Result<U, E>
      where
      F: FnOnce(T) -> Result<U, E>
      \n

      OK… So it takes the result and a function and returns another result with\ndifferent value and different error. However we can see that the function, which\nrepresents an operation on a result, takes just the value, i.e. it doesn't care\nabout any previous error. To make it short:

      \n
      \n

      and_then allows us to run an operation, which can fail, on the correct result

      \n
      \n

      We parsed a y index and now we try to convert the x index with try_from\nagain, but on that result we use map rather than and_then, why would that be?

      \n
      pub fn map<U, F>(self, op: F) -> Result<U, E>
      where
      F: FnOnce(T) -> U
      \n

      Huh… map performs an operation that cannot fail. And finally we use\nunwrap_or which takes the value from result, or in case of an error returns the\ndefault that we define.

      \n

      How does this work then? If y is negative, the conversion fails and the error\npropagates all the way to unwrap_or, if y can be a correct usize value, then\nwe do the same with x. If x is negative, we propagate the error as with y,\nand if it's not, then we check whether it exceeds the higher bounds or not.

      \n

      Solution

      \n

      Relatively simple, you just need follow the rules and not get too smart, otherwise\nit will get back at you.

      \n

      Day 9: Rope Bridge

      \n
      tl;dr

      We get a rope with knots and we want to track how many different positions are\nvisited with the rope's tail.

      \n

      By this day, I have come to a conclusion that current skeleton for each day\ngenerates a lot of boilerplate. And even though it can be easily copied, it's\njust a waste of space and unnecessary code. Let's “simplify” this (on one end\nwhile creating monster on the other end). I've gone through what we need in the\npreparations for the AoC. Let's sum up our requirements:

      \n
        \n
      • parsing
      • \n
      • part 1 & 2
      • \n
      • running on sample / input
      • \n
      • tests
      • \n
      \n

      Parsing and implementation of both parts is code that changes each day and we\ncannot do anything about it. However running and testing can be simplified!

      \n

      Let's introduce and export a new module solution that will take care of all of\nthis. We will start by introducing a trait for each day.

      \n
      pub trait Solution<Input, Output: Display> {
      fn parse_input<P: AsRef<Path>>(pathname: P) -> Input;

      fn part_1(input: &Input) -> Output;
      fn part_2(input: &Input) -> Output;
      }
      \n

      This does a lot of work for us already, we have defined a trait and for each day\nwe will create a structure representing a specific day. That structure will also\nimplement the Solution trait.

      \n

      Now we need to get rid of the boilerplate, we can't get rid of the main function,\nbut we can at least move out the functionality.

      \n
      fn run(type_of_input: &str) -> Result<()>
      where
      Self: Sized,
      {
      tracing_subscriber::fmt()
      .with_env_filter(EnvFilter::from_default_env())
      .with_target(false)
      .with_file(true)
      .with_line_number(true)
      .without_time()
      .compact()
      .init();
      color_eyre::install()?;

      let input = Self::parse_input(format!(\"{}s/{}.txt\", type_of_input, Self::day()));

      info!(\"Part 1: {}\", Self::part_1(&input));
      info!(\"Part 2: {}\", Self::part_2(&input));

      Ok(())
      }

      fn main() -> Result<()>
      where
      Self: Sized,
      {
      Self::run(\"input\")
      }
      \n

      This is all part of the Solution trait, which can implement methods while being\ndependent on what is provided by the implementing types. In this case, we just\nneed to bound the Output type to implement Display that is necessary for the\ninfo! and format string there.

      \n

      Now we can get to first of the nasty things we are going to do… And it is the\nday() method that you can see being used when constructing path to the input\nfile. That method will generate a name of the file, e.g. day01 and we know that\nwe can somehow deduce it from the structure name, given we name it reasonably.

      \n
      fn day() -> String {
      let mut day = String::from(type_name::<Self>().split(\"::\").next().unwrap());
      day.make_ascii_lowercase();

      day.to_string()
      }
      \n
      type_name

      This feature is still experimental and considered to be internal, it is not\nadvised to use it any production code.

      \n

      And now we can get to the nastiest stuff 😩 We will generate the tests!

      \n

      We want to be able to generate tests for sample input in a following way:

      \n
      test_sample!(day_01, Day01, 42, 69);
      \n

      There's not much we can do, so we will write a macro to generate the tests for us.

      \n
      #[macro_export]
      macro_rules! test_sample {
      ($mod_name:ident, $day_struct:tt, $part_1:expr, $part_2:expr) => {
      #[cfg(test)]
      mod $mod_name {
      use super::*;

      #[test]
      fn test_part_1() {
      let sample =
      $day_struct::parse_input(&format!(\"samples/{}.txt\", $day_struct::day()));
      assert_eq!($day_struct::part_1(&sample), $part_1);
      }

      #[test]
      fn test_part_2() {
      let sample =
      $day_struct::parse_input(&format!(\"samples/{}.txt\", $day_struct::day()));
      assert_eq!($day_struct::part_2(&sample), $part_2);
      }
      }
      };
      }
      \n

      We have used it in a similar way as macros in C/C++, one of the things that we\ncan use to our advantage is defining “type” of the parameters for the macro. All\nparameters have their name prefixed with $ sign and you can define various “forms”\nof your macro. Let's go through it!

      \n

      We have following parameters:

      \n
        \n
      • $mod_name which represents the name for the module with tests, it is typed\nwith ident which means that we want a valid identifier to be passed in.
      • \n
      • $day_struct represents the structure that will be used for tests, it is typed\nwith tt which represents a token tree, in our case it is a type.
      • \n
      • $part_X represents the expected output for the Xth part and is of type expr\nwhich literally means an expression.
      • \n
      \n

      Apart from that we need to use #[macro_export] to mark the macro as exported\nfor usage outside of the module. Now our skeleton looks like:

      \n
      use aoc_2022::*;

      type Input = String;
      type Output = String;

      struct DayXX;
      impl Solution<Input, Output> for DayXX {
      fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
      file_to_string(pathname)
      }

      fn part_1(input: &Input) -> Output {
      todo!()
      }

      fn part_2(input: &Input) -> Output {
      todo!()
      }
      }

      fn main() -> Result<()> {
      // DayXX::run(\"sample\")
      DayXX::main()
      }

      // test_sample!(day_XX, DayXX, , );
      \n

      Solution

      \n

      Not much to talk about, it is relatively easy to simulate.

      \n

      Day 10: Cathode-Ray Tube

      \n
      tl;dr

      Emulating basic arithmetic operations on a CPU and drawing on CRT based on the\nCPU's accumulator.

      \n

      In this day I have discovered an issue with my design of the Solution trait.\nAnd the issue is caused by different types of Output for the part 1 and part 2.

      \n

      Problem is relatively simple and consists of simulating a CPU, I have approached\nit in a following way:

      \n
      fn evaluate_instructions(instructions: &[Instruction], mut out: Output) -> Output {
      instructions
      .iter()
      .fold(State::new(), |state, instruction| {
      state.execute(instruction, &mut out)
      });

      out
      }
      \n

      We just take the instructions, we have some state of the CPU and we execute the\ninstructions one-by-one. Perfect usage of the fold (or reduce as you may know\nit from other languages).

      \n

      You can also see that we have an Output type, so the question is how can we fix\nthat problem. And the answer is very simple and functional. Rust allows you to\nhave an enumeration that can bear some other values apart from the type itself.

      \n
      tip

      We could've seen something like this with the Result<T, E> type that can be\ndefined as

      enum Result<T, E> {
      Ok(T),
      Err(E)
      }
      What does that mean though?

      When we have an Ok value, it has the result itself, and when we get an Err\nvalue, it has the error. This also allows us to handle results in a rather\npretty way:

      match do_something(x) {
      Ok(y) => {
      println!(\"SUCCESS: {}\", y);
      },
      Err(y) => {
      eprintln!(\"ERROR: {}\", y);
      }
      }
      \n

      My solution has a following outline:

      \n
      fn execute(&self, i: &Instruction, output: &mut Output) -> State {
      // execute the instruction

      // collect results if necessary
      match output {
      Output::Part1(x) => self.execute_part_1(y, x),
      Output::Part2(x) => self.execute_part_2(y, x),
      }

      // return the obtained state
      new_state
      }
      \n

      You might think that it's a perfectly reasonable thing to do. Yes, but notice\nthat the match statement doesn't collect the changes in any way and also we\npass output by &mut, so it is shared across each iteration of the fold.

      \n

      The dirty and ingenious thing is that xs are passed by &mut too and therefore\nthey are directly modified by the helper functions. To sum it up and let it sit

      \n
      \n

      We are collecting the result into an enumeration that is shared\nacross all iterations of fold.

      \n
      \n

      Solution

      \n

      Similar to Day 9, but there are some technical details that can get you.

      \n

      Day 11: Monkey in the Middle

      \n
      tl;dr

      Simulation of monkeys throwing stuff around and measuring your stress levels\nwhile your stuff is being passed around.

      \n

      I think I decided to use regular expressions here for the first time, cause\nparsing the input was a pain.

      \n

      Also I didn't expect to implement Euclidean algorithm in Rust…

      \n

      Solution

      \n

      Again, we're just running a simulation. Though I must admit it was very easy to\nmake a small technical mistakes that could affect the final results very late.

      \n

      Day 12: Hill Climbing Algorithm

      \n
      tl;dr

      Finding shortest path up the hill and also shortest path down to the ground while\nalso rolling down the hill…

      \n

      As I have said in the tl;dr, we are looking for the shortest path, but the start\nand goal differ for the part 1 and 2. So I have decided to refactor my solution\nto a BFS algorithm that takes necessary parameters via functions:

      \n
      fn bfs<F, G>(
      graph: &[Vec<char>], start: &Position, has_edge: F, is_target: G
      ) -> Option<usize>
      where
      F: Fn(&[Vec<char>], &Position, &Position) -> bool,
      G: Fn(&[Vec<char>], &Position) -> bool
      \n

      We pass the initial vertex from the caller and everything else is left to the BFS\nalgorithm, based on the has_edge and is_target functions.

      \n

      This was easy! And that is not very usual in Rust once you want to pass around\nfunctions. 👀

      \n

      Solution

      \n

      Looking for the shortest path… Must be Dijkstra, right? Nope! Half of the\nReddit got jebaited though. In all fairness, nothing stops you from implementing\nthe Dijkstra's algorithm for finding the shortest path, but if you know that\nall connected vertices are in a unit (actually d=1d = 1d=1) distance from each other,\nthen you know that running Dijkstra is equivalent to running BFS, only with worse\ntime complexity, because of the priority heap instead of the queue.

      \n

      Day 13: Distress Signal

      \n
      tl;dr

      Processing packets with structured data from the distress signal.

      \n

      You can implement a lot of traits if you want to. It is imperative to implement\nordering on the packets. I had a typo, so I also proceeded to implement a Display\ntrait for debugging purposes:

      \n
      impl Display for Packet {
      fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
      match self {
      Packet::Integer(x) => write!(f, \"{x}\"),
      Packet::List(lst) => write!(f, \"[{}]\", lst.iter().map(|p| format!(\"{p}\")).join(\",\")),
      }
      }
      }
      \n

      Solution

      \n

      A lot of technical details… Parsing is nasty too…

      \n

      Day 14: Regolith Reservoir

      \n
      tl;dr

      Let's simulate falling sand grain-by-grain.

      \n

      Again, both parts are relatively similar with minimal changes, so it is a good\nidea to refactor it a bit. Similar approach to the BFS above. Also this is the\nfirst day where I ran into efficiency issues and had to redo my solution to speed\nit up just a bit.

      \n

      Solution

      \n

      Tedious.

      \n

      Post Mortem

      \n

      Indexing

      \n

      I was asked about the indexing after publishing the blog. And truly it is rather\ncomplicated topic, especially after releasing SliceIndex<I> trait. I couldn't\nleave it be, so I tried to implement the Index and IndexMut trait.

      \n
      note

      I have also mentioned that the SliceIndex trait is unsafe, but truth be told,\nonly unsafe part are the 2 methods that are named *unchecked*. Anyways, I will\nbe implementing the Index* traits for now, rather than the SliceIndex.

      \n

      It's relatively straightforward…

      \n
      impl<I, C> Index<Vector2D<I>> for [C]
      where
      I: Copy + TryInto<usize>,
      <I as TryInto<usize>>::Error: Debug,
      C: Index<usize>,
      {
      type Output = C::Output;

      fn index(&self, index: Vector2D<I>) -> &Self::Output {
      let (x, y): (usize, usize) =
      (index.x.try_into().unwrap(), index.y.try_into().unwrap());
      &self[y][x]
      }
      }

      impl<I, C> IndexMut<Vector2D<I>> for [C]
      where
      I: Copy + TryInto<usize>,
      <I as TryInto<usize>>::Error: Debug,
      C: IndexMut<usize>,
      {
      fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
      let (x, y): (usize, usize) =
      (index.x.try_into().unwrap(), index.y.try_into().unwrap());
      &mut self[y][x]
      }
      }
      \n

      We can see a lot of similarities to the implementation of index and index_mut\nfunctions. In the end, they are 1:1, just wrapped in the trait that provides a\nsyntax sugar for container[idx].

      \n
      note

      I have also switched from using the TryFrom to TryInto trait, since it better\nmatches what we are using, the .try_into rather than usize::try_from.

      Also implementing TryFrom automatically provides you with a TryInto trait,\nsince it is relatively easy to implement. Just compare the following:

      pub trait TryFrom<T>: Sized {
      type Error;

      fn try_from(value: T) -> Result<Self, Self::Error>;
      }

      pub trait TryInto<T>: Sized {
      type Error;

      fn try_into(self) -> Result<T, Self::Error>;
      }
      \n

      OK, so we have our trait implemented, we should be able to use container[index],\nright? Yes… but actually no 😦

      \n
      error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
      --> src/bin/day08.rs:26:18
      |
      26 | if trees[pos] > tallest {
      | ^^^ slice indices are of type `usize` or ranges of `usize`
      |
      = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
      = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

      error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
      --> src/bin/day08.rs:30:28
      |
      30 | max(tallest, trees[pos])
      | ^^^ slice indices are of type `usize` or ranges of `usize`
      |
      = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
      = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

      error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<isize>`
      --> src/bin/day08.rs:52:28
      |
      52 | let max_height = trees[position];
      | ^^^^^^^^ slice indices are of type `usize` or ranges of `usize`
      |
      = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<isize>`
      = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<isize>>`
      \n

      Why? We have it implemented for the slices ([C]), why doesn't it work? Well,\nthe fun part consists of the fact that in other place, where we were using it,\nwe were passing the &[Vec<T>], but this is coming from a helper functions that\ntake &Vec<Vec<T>> instead. And… we don't implement Index and IndexMut for\nthose. Just for the slices. 🤯 What are we going to do about it?

      \n

      We can either start copy-pasting or be smarter about it… I choose to be smarter,\nso let's implement a macro! The only difference across the implementations are\nthe types of the outer containers. Implementation doesn't differ at all!

      \n

      Implementing the macro can be done in a following way:

      \n
      macro_rules! generate_indices {
      ($container:ty) => {
      impl<I, C> Index<Vector2D<I>> for $container
      where
      I: Copy + TryInto<usize>,
      <I as TryInto<usize>>::Error: Debug,
      C: Index<usize>,
      {
      type Output = C::Output;

      fn index(&self, index: Vector2D<I>) -> &Self::Output {
      let (x, y): (usize, usize) =
      (index.x.try_into().unwrap(), index.y.try_into().unwrap());
      &self[y][x]
      }
      }

      impl<I, C> IndexMut<Vector2D<I>> for $container
      where
      I: Copy + TryInto<usize>,
      <I as TryInto<usize>>::Error: Debug,
      C: IndexMut<usize>,
      {
      fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
      let (x, y): (usize, usize) =
      (index.x.try_into().unwrap(), index.y.try_into().unwrap());
      &mut self[y][x]
      }
      }
      };
      }
      \n

      And now we can simply do

      \n
      generate_indices!(VecDeque<C>);
      generate_indices!([C]);
      generate_indices!(Vec<C>);
      // generate_indices!([C; N], const N: usize);
      \n

      The last type (I took the inspiration from the implementations of the Index and\nIndexMut traits) is a bit problematic, because of the const N: usize part,\nwhich I haven't managed to be able to parse. And that's how I got rid of the error.

      \n
      note

      If I were to use 2D-indexing over [C; N] slices, I'd probably just go with the\ncopy-paste, cause the cost of this “monstrosity” outweighs the benefits of no DRY.

      \n

      Cause of the problem

      \n

      This issue is relatively funny. If you don't use any type aliases, just the raw\ntypes, you'll get suggested certain changes by the clippy. For example if you\nconsider the following piece of code

      \n
      fn get_sum(nums: &Vec<i32>) -> i32 {
      nums.iter().sum()
      }

      fn main() {
      let nums = vec![1, 2, 3];
      println!(\"Sum: {}\", get_sum(&nums));
      }
      \n

      and you run clippy on it, you will get

      \n
      Checking playground v0.0.1 (/playground)
      warning: writing `&Vec` instead of `&[_]` involves a new object where a slice will do
      --> src/main.rs:1:18
      |
      1 | fn get_sum(nums: &Vec<i32>) -> i32 {
      | ^^^^^^^^^ help: change this to: `&[i32]`
      |
      = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#ptr_arg
      = note: `#[warn(clippy::ptr_arg)]` on by default

      warning: `playground` (bin \"playground\") generated 1 warning
      Finished dev [unoptimized + debuginfo] target(s) in 0.61s
      \n

      However, if you introduce a type alias, such as

      \n
      type Numbers = Vec<i32>;
      \n

      Then clippy won't say anything, cause there is literally nothing to suggest.\nHowever the outcome is not the same…

      ", "url": "https://blog.mfocko.xyz/blog/aoc-2022/2nd-week", "title": "2nd week of Advent of Code '22 in Rust", "summary": "Surviving second week in Rust.", @@ -93,7 +93,7 @@ }, { "id": "https://blog.mfocko.xyz/blog/aoc-2022/1st-week", - "content_html": "

      Let's go through the first week of Advent of Code in Rust.

      \n
      note

      If you wish to have a look at the solutions, you can follow them on my GitLab.\nMore specifically in the /src/bin/.

      \n

      I will try to summarize my experience with using Rust for the AoC. Trying it out\nages ago, I believe it will be pain and suffering, but we will see. For each\nday I will also try to give a tl;dr of the problem, so that you can better imagine\nthe relation to my woes or 👍 moments.

      \n

      Day 1: Calorie Counting

      \n
      tl;dr

      As the name suggests, we get the calories of the food contained in the elves\nbackpacks and we want to choose the elf that has the most food ;)

      \n
      \n

      Wakey wakey!

      \n
      \n

      Programming in Rust at 6am definitely hits. I've also forgotten to mention how I\nhandle samples. With each puzzle you usually get a sample input and expected\noutput. You can use them to verify that your solution works, or usually doesn't.

      \n

      At first I've decided to put asserts into my main, something like

      \n
      assert_eq!(part_1(&sample), 24000);
      info!(\"Part 1: {}\", part_1(&input));

      assert_eq!(part_2(&sample), 45000);
      info!(\"Part 2: {}\", part_2(&input));
      \n

      However, once you get further, the sample input may take some time to run itself.\nSo in the end, I have decided to turn them into unit tests:

      \n
      #[cfg(test)]
      mod tests {
      use super::*;

      #[test]
      fn test_part_1() {
      let sample = parse_input(\"samples/day01.txt\");
      assert_eq!(part_1(&sample), 24000);
      }

      #[test]
      fn test_part_2() {
      let sample = parse_input(\"samples/day01.txt\");
      assert_eq!(part_2(&sample), 45000);
      }
      }
      \n

      And later on I have noticed, it's hard to tell the difference between the days,\nso I further renamed the mod from generic tests to reflect the days.

      \n

      Also after finishing the first day puzzle, I have installed an sccache to\ncache the builds, so that the build time is lower, cause it was kinda unbearable.

      \n

      Solution

      \n

      Well, it's a pretty simple problem. You just take the input, sum the calories and\nfind the biggest one. However, if we try to generalize to more than the biggest\none, the fun appears. We have few options:

      \n
        \n
      • keep all the calories, sort them, take what we need
      • \n
      • keep all the calories and use max heap
      • \n
      • use min heap and maintain at most N calories that we need
      • \n
      \n

      Day 2: Rock Paper Scissors

      \n
      tl;dr

      You want to know what score did you achieve while playing Rock Paper Scissors.\nAnd then you want to be strategic about it.

      \n

      Apart from the technical details of the puzzle, it went relatively smooth.

      \n

      Solution

      \n

      I took relatively naïve approach and then tried to simplify it.

      \n

      Day 3: Rucksack Reorganization

      \n
      tl;dr

      Let's go reorganize elves' backpacks! Each backpacks has 2 compartments and you\nwant to find the common item among those compartments. Each of them has priority,\nyou care only about the sum.

      \n

      This is the day where I started to fight the compiler and neither of us decided\nto give up. Let's dive into it \\o/

      \n
      Fun fact

      Fighting the compiler took me 30 minutes.

      \n

      We need to find a common item among 2 collections, that's an easy task, right?\nWe can construct 2 sets and find an intersection:

      \n
      let top: HashSet<i32> = [1, 2, 3].iter().collect();
      let bottom: HashSet<i32> = [3, 4, 5].iter().collect();
      \n

      Now, the first issue that we encounter is caused by the fact that we are using\na slice (the […]), iterator of that returns references to the numbers.\nAnd we get immediately yelled at by the compiler, because the numbers are discarded\nafter running the .collect. To fix this, we can use .into_iter:

      \n
      let top: HashSet<i32> = [1, 2, 3].into_iter().collect();
      let bottom: HashSet<i32> = [3, 4, 5].into_iter().collect();
      \n

      This way the numbers will get copied instead of referenced. OK, let's find the\nintersection of those 2 collections:

      \n
      println!(\"Common elements: {:?}\", top.intersection(&bottom));
      \n
      Common elements: [3]
      \n
      caution

      Notice that we need to do &bottom. It explicitly specifies that .intersection\nborrows the bottom, i.e. takes an immutable reference to it.

      \n

      That's what we want, right? Looks like it! \\o/

      \n

      Next part wants us to find the common element among all of the backpacks. OK, so\nthat should be fairly easy, we have an intersection and we want to find intersection\nover all of them.

      \n

      Let's have a look at the type of the .intersection

      \n
      pub fn intersection<'a>(
          &'a self,
          other: &'a HashSet<T, S>
      ) -> Intersection<'a, T, S>
      \n

      OK… Huh… But we have an example there!

      \n
      let intersection: HashSet<_> = a.intersection(&b).collect();
      \n

      Cool, that's all we need.

      \n
      let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
      let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
      let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
      let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

      let intersection: HashSet<_> = top.intersection(&bottom).collect();
      println!(\"Intersection: {:?}\", intersection);
      \n
      Intersection: {3, 4}
      \n

      Cool, so let's do the intersection with the top_2:

      \n
      let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
      let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
      let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
      let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

      let intersection: HashSet<_> = top.intersection(&bottom).collect();
      let intersection: HashSet<_> = intersection.intersection(&top_2).collect();
      println!(\"Intersection: {:?}\", intersection);
      \n

      And we get yelled at by the compiler:

      \n
      error[E0308]: mismatched types
      --> src/main.rs:10:58
      |
      10 | let intersection: HashSet<_> = intersection.intersection(&top_2).collect();
      | ------------ ^^^^^^ expected `&i32`, found `i32`
      | |
      | arguments to this function are incorrect
      |
      = note: expected reference `&HashSet<&i32>`
      found reference `&HashSet<i32>`
      \n

      /o\\ What the hell is going on here? Well, the funny thing is, that this operation\ndoesn't return the elements themselves, but the references to them and when we pass\nthe third set, it has just the values themselves, without any references.

      \n
      tip

      It may seem as a very weird decision, but in fact it makes some sense… It allows\nyou to do intersection of items that may not be possible to copy. Overall this is\na “tax” for having a borrow checker drilling your ass having your back and\nmaking sure you're not doing something naughty that may cause an undefined\nbehavior.

      \n

      To resolve this we need to get an iterator that clones the elements:

      \n
      let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
      let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
      let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
      let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

      let intersection: HashSet<_> = top.intersection(&bottom).cloned().collect();
      let intersection: HashSet<_> = intersection.intersection(&top_2).cloned().collect();
      let intersection: HashSet<_> = intersection.intersection(&bottom_2).cloned().collect();
      println!(\"Intersection: {:?}\", intersection);
      \n
      Intersection: {4}
      \n

      Solution

      \n

      The approach is pretty simple, if you omit the 1on1 with the compiler. You just\nhave some fun with the set operations :)

      \n

      Day 4: Camp Cleanup

      \n
      tl;dr

      Elves are cleaning up the camp and they got overlapping sections to clean up.\nFind how many overlap and can take the day off.

      \n

      RangeInclusive is your friend not an enemy :)

      \n

      Solution

      \n

      Relatively easy, you just need to parse the input and know what you want. Rust's\nRangeInclusive type helped a lot, cause it took care of all abstractions.

      \n

      Day 5: Supply Stacks

      \n
      tl;dr

      Let's play with stacks of crates.

      \n

      Very easy problem with very annoying input. You can judge yourself:

      \n
          [D]    
      [N] [C]
      [Z] [M] [P]
      1 2 3

      move 1 from 2 to 1
      move 3 from 1 to 3
      move 2 from 2 to 1
      move 1 from 1 to 2
      \n

      Good luck transforming that into something reasonable :)

      \n
      Fun fact

      Took me 40 minutes to parse this reasonably, including fighting the compiler.

      \n

      Solution

      \n

      For the initial solution I went with a manual solution (as in I have done all\nthe work. Later on I have decided to explore the std and interface of the\nstd::vec::Vec and found split_off which takes an index and splits (duh)\nthe vector:

      \n
      let mut vec = vec![1, 2, 3];
      let vec2 = vec.split_off(1);
      assert_eq!(vec, [1]);
      assert_eq!(vec2, [2, 3]);
      \n

      This helped me simplify my solution a lot and also get rid of some edge cases.

      \n

      Day 6: Tuning Trouble

      \n
      tl;dr

      Finding start of the message in a very weird protocol. Start of the message is\ndenoted by NNN unique consecutive characters.

      \n

      Solution

      \n

      A lot of different approaches, knowing that we are dealing with input consisting\nsolely of ASCII letters, I bit the bullet and went with sliding window and\nconstructing sets from that window, checking if the set is as big as the window.

      \n

      One possible optimization could consist of keeping a bit-vector (i.e. usize\nvariable) of encountered characters and updating it as we go. However this has\na different issue and that is removal of the characters from the left side of the\nwindow. We don't know if the same character is not included later on.

      \n

      Other option is to do similar thing, but keeping the frequencies of the letters,\nand again knowing we have only ASCII letters we can optimize by having a vector\nof 26 elements that keeps count for each lowercase letter.

      \n

      Day 7: No Space Left On Device

      \n
      tl;dr

      Let's simulate du to get some stats about our file system and then pinpoint\ndirectories that take a lot of space and should be deleted.

      \n
      \n

      I was waiting for this moment, and yet it got me!\nimagine me swearing for hours

      \n
      \n

      Solution

      \n

      We need to “build” a file system from the input that is given in a following form:

      \n
      $ cd /
      $ ls
      dir a
      14848514 b.txt
      8504156 c.dat
      dir d
      $ cd a
      $ ls
      dir e
      29116 f
      2557 g
      62596 h.lst
      $ cd e
      $ ls
      584 i
      $ cd ..
      $ cd ..
      $ cd d
      $ ls
      4060174 j
      8033020 d.log
      5626152 d.ext
      7214296 k
      \n

      There are few ways in which you can achieve this and also you can assume some\npreconditions, but why would we do that, right? :)

      \n

      You can “slap” this in either HashMap or BTreeMap and call it a day.\nAnd that would be boring…

      \n
      tip

      BTreeMap is quite fitting for this, don't you think?

      \n

      I always wanted to try allocation on heap in Rust, so I chose to implement a tree.\nI fought with the Box<T> for some time and was losing…

      \n

      Then I looked up some implementations of trees or linked lists and decided to try\nRc<Cell<T>>. And I got my ass whopped by the compiler once again. /o\\

      \n
      tip

      Box<T> represents a dynamically allocated memory on heap. It is a single pointer,\nyou can imagine this as std::unique_ptr<T> in C++.

      Rc<T> represents a dynamically allocated memory on heap. On top of that it is\nreference counted (that's what the Rc stands for). You can imagine this as\nstd::shared_ptr<T> in C++.

      Now the fun stuff. Neither of them lets you mutate the contents of the memory.

      Cell<T> allows you to mutate the memory. Can be used reasonably with types that\ncan be copied, because the memory safety is guaranteed by copying the contents\nwhen there is more than one mutable reference to the memory.

      RefCell<T> is similar to the Cell<T>, but the borrowing rules (how many mutable\nreferences are present) are checked dynamically.

      So in the end, if you want something like std::shared_ptr<T> in Rust, you want\nto have Rc<RefCell<T>>.

      \n

      So, how are we going to represent the file system then? We will use an enumeration,\nhehe, which is an algebraic data type that can store some stuff in itself 😩

      \n
      type FileHandle = Rc<RefCell<AocFile>>;

      #[derive(Debug)]
      enum AocFile {
      File(usize),
      Directory(BTreeMap<String, FileHandle>),
      }
      \n

      Let's go over it! FileHandle represents dynamically allocated AocFile, not\nmuch to discuss. What does the #[derive(Debug)] do though? It lets us to print\nout the value of that enumeration, it's derived, so it's not as good as if we had\nimplemented it ourselves, but it's good enough for debugging, hence the name.

      \n

      Now to the fun part! AocFile value can be represented in two ways:

      \n
        \n
      • File(usize), e.g. AocFile::File(123) and we can pattern match it, if we\nneed to
      • \n
      • Directory(BTreeMap<String, FileHandle>) will represent the directory and will\ncontain map matching the name of the files (or directories) within to their\nrespective file handles
      • \n
      \n

      I will omit the details about constructing this file system, cause there are a lot\nof technicalities introduced by the nature of the input. However if you are\ninterested, you can have a look at my solution.

      \n

      We need to find small enough directories and also find the smallest directory that\nwill free enough space. Now the question is, how could we do that. And there are\nmultiple ways I will describe.

      \n

      I have chosen to implement tree catamorphism 😩. It is basically a fold\nover a tree data structure. We descent down into the leaves and propagate computed\nresults all the way to the root. You can also notice that this approach is very\nsimilar to dynamic programming, we find overlapping sections of the computation\nand try to minimize the additional work (in this case: we need to know sizes of\nour descendants, but we have already been there).

      \n

      Another approach that has been suggested to me few days later is running DFS on\nthe graph. And, funnily enough, we would still need to combine what we found in\nthe branches where we descent. So in the end, it would work very similarly to my\nsolution.

      \n

      One of the more exotic options would be precomputing the required information at\nthe same time as parsing. That could be done by adding additional fields to the\nnodes which would allow storing such information and updating it as we construct\nthe file system.

      \n

      Post Mortem

      \n

      Things that have been brought up in the discussion later on.

      \n

      Rc<T> vs Rc<RefCell<T>>

      \n

      It has been brought up that I have a contradicting statement regarding the\ndynamically allocated memory. Specifically:

      \n
        \n
      • You can imagine Rc<T> as an std::shared_ptr<T> (in C++)
      • \n
      • When you want an equivalent of std::shared_ptr<T>, you want to use\nRc<RefCell<T>>
      • \n
      \n

      Now, in Rust it is a bit more complicated, because the type that represents the\n“shared pointer” is Rc<T>. What RefCell<T> does is making sure that there is\nonly one “owner” of a mutable reference at a time (and dynamically, as opposed\nto the Cell<T>).

      \n

      Therefore to be precise and correct about the equivalents of std::shared_ptr<T>\nin Rust, we can say that

      \n
        \n
      • Rc<T> is an equivalent of a const std::shared_ptr<T>,
      • \n
      • and Rc<RefCell<T>> is an equivalent of a std::shared_ptr<T>.
      • \n
      \n

      You can easily see that they only differ in the mutability. (And even that is not\nas simple as it seems, because there is also Cell<T>)

      ", + "content_html": "

      Let's go through the first week of Advent of Code in Rust.

      \n
      note

      If you wish to have a look at the solutions, you can follow them on my GitLab.\nMore specifically in the /src/bin/.

      \n

      I will try to summarize my experience with using Rust for the AoC. Trying it out\nages ago, I believe it will be pain and suffering, but we will see. For each\nday I will also try to give a tl;dr of the problem, so that you can better imagine\nthe relation to my woes or 👍 moments.

      \n

      Day 1: Calorie Counting

      \n
      tl;dr

      As the name suggests, we get the calories of the food contained in the elves\nbackpacks and we want to choose the elf that has the most food ;)

      \n
      \n

      Wakey wakey!

      \n
      \n

      Programming in Rust at 6am definitely hits. I've also forgotten to mention how I\nhandle samples. With each puzzle you usually get a sample input and expected\noutput. You can use them to verify that your solution works, or usually doesn't.

      \n

      At first I've decided to put asserts into my main, something like

      \n
      assert_eq!(part_1(&sample), 24000);
      info!(\"Part 1: {}\", part_1(&input));

      assert_eq!(part_2(&sample), 45000);
      info!(\"Part 2: {}\", part_2(&input));
      \n

      However, once you get further, the sample input may take some time to run itself.\nSo in the end, I have decided to turn them into unit tests:

      \n
      #[cfg(test)]
      mod tests {
      use super::*;

      #[test]
      fn test_part_1() {
      let sample = parse_input(\"samples/day01.txt\");
      assert_eq!(part_1(&sample), 24000);
      }

      #[test]
      fn test_part_2() {
      let sample = parse_input(\"samples/day01.txt\");
      assert_eq!(part_2(&sample), 45000);
      }
      }
      \n

      And later on I have noticed, it's hard to tell the difference between the days,\nso I further renamed the mod from generic tests to reflect the days.

      \n

      Also after finishing the first day puzzle, I have installed an sccache to\ncache the builds, so that the build time is lower, cause it was kinda unbearable.

      \n

      Solution

      \n

      Well, it's a pretty simple problem. You just take the input, sum the calories and\nfind the biggest one. However, if we try to generalize to more than the biggest\none, the fun appears. We have few options:

      \n
        \n
      • keep all the calories, sort them, take what we need
      • \n
      • keep all the calories and use max heap
      • \n
      • use min heap and maintain at most N calories that we need
      • \n
      \n

      Day 2: Rock Paper Scissors

      \n
      tl;dr

      You want to know what score did you achieve while playing Rock Paper Scissors.\nAnd then you want to be strategic about it.

      \n

      Apart from the technical details of the puzzle, it went relatively smooth.

      \n

      Solution

      \n

      I took relatively naïve approach and then tried to simplify it.

      \n

      Day 3: Rucksack Reorganization

      \n
      tl;dr

      Let's go reorganize elves' backpacks! Each backpacks has 2 compartments and you\nwant to find the common item among those compartments. Each of them has priority,\nyou care only about the sum.

      \n

      This is the day where I started to fight the compiler and neither of us decided\nto give up. Let's dive into it \\o/

      \n
      Fun fact

      Fighting the compiler took me 30 minutes.

      \n

      We need to find a common item among 2 collections, that's an easy task, right?\nWe can construct 2 sets and find an intersection:

      \n
      let top: HashSet<i32> = [1, 2, 3].iter().collect();
      let bottom: HashSet<i32> = [3, 4, 5].iter().collect();
      \n

      Now, the first issue that we encounter is caused by the fact that we are using\na slice (the […]), iterator of that returns references to the numbers.\nAnd we get immediately yelled at by the compiler, because the numbers are discarded\nafter running the .collect. To fix this, we can use .into_iter:

      \n
      let top: HashSet<i32> = [1, 2, 3].into_iter().collect();
      let bottom: HashSet<i32> = [3, 4, 5].into_iter().collect();
      \n

      This way the numbers will get copied instead of referenced. OK, let's find the\nintersection of those 2 collections:

      \n
      println!(\"Common elements: {:?}\", top.intersection(&bottom));
      \n
      Common elements: [3]
      \n
      caution

      Notice that we need to do &bottom. It explicitly specifies that .intersection\nborrows the bottom, i.e. takes an immutable reference to it.

      \n

      That's what we want, right? Looks like it! \\o/

      \n

      Next part wants us to find the common element among all of the backpacks. OK, so\nthat should be fairly easy, we have an intersection and we want to find intersection\nover all of them.

      \n

      Let's have a look at the type of the .intersection

      \n
      pub fn intersection<'a>(
          &'a self,
          other: &'a HashSet<T, S>
      ) -> Intersection<'a, T, S>
      \n

      OK… Huh… But we have an example there!

      \n
      let intersection: HashSet<_> = a.intersection(&b).collect();
      \n

      Cool, that's all we need.

      \n
      let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
      let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
      let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
      let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

      let intersection: HashSet<_> = top.intersection(&bottom).collect();
      println!(\"Intersection: {:?}\", intersection);
      \n
      Intersection: {3, 4}
      \n

      Cool, so let's do the intersection with the top_2:

      \n
      let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
      let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
      let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
      let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

      let intersection: HashSet<_> = top.intersection(&bottom).collect();
      let intersection: HashSet<_> = intersection.intersection(&top_2).collect();
      println!(\"Intersection: {:?}\", intersection);
      \n

      And we get yelled at by the compiler:

      \n
      error[E0308]: mismatched types
      --> src/main.rs:10:58
      |
      10 | let intersection: HashSet<_> = intersection.intersection(&top_2).collect();
      | ------------ ^^^^^^ expected `&i32`, found `i32`
      | |
      | arguments to this function are incorrect
      |
      = note: expected reference `&HashSet<&i32>`
      found reference `&HashSet<i32>`
      \n

      /o\\ What the hell is going on here? Well, the funny thing is, that this operation\ndoesn't return the elements themselves, but the references to them and when we pass\nthe third set, it has just the values themselves, without any references.

      \n
      tip

      It may seem as a very weird decision, but in fact it makes some sense… It allows\nyou to do intersection of items that may not be possible to copy. Overall this is\na “tax” for having a borrow checker drilling your ass having your back and\nmaking sure you're not doing something naughty that may cause an undefined\nbehavior.

      \n

      To resolve this we need to get an iterator that clones the elements:

      \n
      let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
      let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
      let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
      let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

      let intersection: HashSet<_> = top.intersection(&bottom).cloned().collect();
      let intersection: HashSet<_> = intersection.intersection(&top_2).cloned().collect();
      let intersection: HashSet<_> = intersection.intersection(&bottom_2).cloned().collect();
      println!(\"Intersection: {:?}\", intersection);
      \n
      Intersection: {4}
      \n

      Solution

      \n

      The approach is pretty simple, if you omit the 1on1 with the compiler. You just\nhave some fun with the set operations :)

      \n

      Day 4: Camp Cleanup

      \n
      tl;dr

      Elves are cleaning up the camp and they got overlapping sections to clean up.\nFind how many overlap and can take the day off.

      \n

      RangeInclusive is your friend not an enemy :)

      \n

      Solution

      \n

      Relatively easy, you just need to parse the input and know what you want. Rust's\nRangeInclusive type helped a lot, cause it took care of all abstractions.

      \n

      Day 5: Supply Stacks

      \n
      tl;dr

      Let's play with stacks of crates.

      \n

      Very easy problem with very annoying input. You can judge yourself:

      \n
          [D]    
      [N] [C]
      [Z] [M] [P]
      1 2 3

      move 1 from 2 to 1
      move 3 from 1 to 3
      move 2 from 2 to 1
      move 1 from 1 to 2
      \n

      Good luck transforming that into something reasonable :)

      \n
      Fun fact

      Took me 40 minutes to parse this reasonably, including fighting the compiler.

      \n

      Solution

      \n

      For the initial solution I went with a manual solution (as in I have done all\nthe work. Later on I have decided to explore the std and interface of the\nstd::vec::Vec and found split_off which takes an index and splits (duh)\nthe vector:

      \n
      let mut vec = vec![1, 2, 3];
      let vec2 = vec.split_off(1);
      assert_eq!(vec, [1]);
      assert_eq!(vec2, [2, 3]);
      \n

      This helped me simplify my solution a lot and also get rid of some edge cases.

      \n

      Day 6: Tuning Trouble

      \n
      tl;dr

      Finding start of the message in a very weird protocol. Start of the message is\ndenoted by NNN unique consecutive characters.

      \n

      Solution

      \n

      A lot of different approaches, knowing that we are dealing with input consisting\nsolely of ASCII letters, I bit the bullet and went with sliding window and\nconstructing sets from that window, checking if the set is as big as the window.

      \n

      One possible optimization could consist of keeping a bit-vector (i.e. usize\nvariable) of encountered characters and updating it as we go. However this has\na different issue and that is removal of the characters from the left side of the\nwindow. We don't know if the same character is not included later on.

      \n

      Other option is to do similar thing, but keeping the frequencies of the letters,\nand again knowing we have only ASCII letters we can optimize by having a vector\nof 26 elements that keeps count for each lowercase letter.

      \n

      Day 7: No Space Left On Device

      \n
      tl;dr

      Let's simulate du to get some stats about our file system and then pinpoint\ndirectories that take a lot of space and should be deleted.

      \n
      \n

      I was waiting for this moment, and yet it got me!\nimagine me swearing for hours

      \n
      \n

      Solution

      \n

      We need to “build” a file system from the input that is given in a following form:

      \n
      $ cd /
      $ ls
      dir a
      14848514 b.txt
      8504156 c.dat
      dir d
      $ cd a
      $ ls
      dir e
      29116 f
      2557 g
      62596 h.lst
      $ cd e
      $ ls
      584 i
      $ cd ..
      $ cd ..
      $ cd d
      $ ls
      4060174 j
      8033020 d.log
      5626152 d.ext
      7214296 k
      \n

      There are few ways in which you can achieve this and also you can assume some\npreconditions, but why would we do that, right? :)

      \n

      You can “slap” this in either HashMap or BTreeMap and call it a day.\nAnd that would be boring…

      \n
      tip

      BTreeMap is quite fitting for this, don't you think?

      \n

      I always wanted to try allocation on heap in Rust, so I chose to implement a tree.\nI fought with the Box<T> for some time and was losing…

      \n

      Then I looked up some implementations of trees or linked lists and decided to try\nRc<Cell<T>>. And I got my ass whopped by the compiler once again. /o\\

      \n
      tip

      Box<T> represents a dynamically allocated memory on heap. It is a single pointer,\nyou can imagine this as std::unique_ptr<T> in C++.

      Rc<T> represents a dynamically allocated memory on heap. On top of that it is\nreference counted (that's what the Rc stands for). You can imagine this as\nstd::shared_ptr<T> in C++.

      Now the fun stuff. Neither of them lets you mutate the contents of the memory.

      Cell<T> allows you to mutate the memory. Can be used reasonably with types that\ncan be copied, because the memory safety is guaranteed by copying the contents\nwhen there is more than one mutable reference to the memory.

      RefCell<T> is similar to the Cell<T>, but the borrowing rules (how many mutable\nreferences are present) are checked dynamically.

      So in the end, if you want something like std::shared_ptr<T> in Rust, you want\nto have Rc<RefCell<T>>.

      \n

      So, how are we going to represent the file system then? We will use an enumeration,\nhehe, which is an algebraic data type that can store some stuff in itself 😩

      \n
      type FileHandle = Rc<RefCell<AocFile>>;

      #[derive(Debug)]
      enum AocFile {
      File(usize),
      Directory(BTreeMap<String, FileHandle>),
      }
      \n

      Let's go over it! FileHandle represents dynamically allocated AocFile, not\nmuch to discuss. What does the #[derive(Debug)] do though? It lets us to print\nout the value of that enumeration, it's derived, so it's not as good as if we had\nimplemented it ourselves, but it's good enough for debugging, hence the name.

      \n

      Now to the fun part! AocFile value can be represented in two ways:

      \n
        \n
      • File(usize), e.g. AocFile::File(123) and we can pattern match it, if we\nneed to
      • \n
      • Directory(BTreeMap<String, FileHandle>) will represent the directory and will\ncontain map matching the name of the files (or directories) within to their\nrespective file handles
      • \n
      \n

      I will omit the details about constructing this file system, cause there are a lot\nof technicalities introduced by the nature of the input. However if you are\ninterested, you can have a look at my solution.

      \n

      We need to find small enough directories and also find the smallest directory that\nwill free enough space. Now the question is, how could we do that. And there are\nmultiple ways I will describe.

      \n

      I have chosen to implement tree catamorphism 😩. It is basically a fold\nover a tree data structure. We descent down into the leaves and propagate computed\nresults all the way to the root. You can also notice that this approach is very\nsimilar to dynamic programming, we find overlapping sections of the computation\nand try to minimize the additional work (in this case: we need to know sizes of\nour descendants, but we have already been there).

      \n

      Another approach that has been suggested to me few days later is running DFS on\nthe graph. And, funnily enough, we would still need to combine what we found in\nthe branches where we descent. So in the end, it would work very similarly to my\nsolution.

      \n

      One of the more exotic options would be precomputing the required information at\nthe same time as parsing. That could be done by adding additional fields to the\nnodes which would allow storing such information and updating it as we construct\nthe file system.

      \n

      Post Mortem

      \n

      Things that have been brought up in the discussion later on.

      \n

      Rc<T> vs Rc<RefCell<T>>

      \n

      It has been brought up that I have a contradicting statement regarding the\ndynamically allocated memory. Specifically:

      \n
        \n
      • You can imagine Rc<T> as an std::shared_ptr<T> (in C++)
      • \n
      • When you want an equivalent of std::shared_ptr<T>, you want to use\nRc<RefCell<T>>
      • \n
      \n

      Now, in Rust it is a bit more complicated, because the type that represents the\n“shared pointer” is Rc<T>. What RefCell<T> does is making sure that there is\nonly one “owner” of a mutable reference at a time (and dynamically, as opposed\nto the Cell<T>).

      \n

      Therefore to be precise and correct about the equivalents of std::shared_ptr<T>\nin Rust, we can say that

      \n
        \n
      • Rc<T> is an equivalent of a const std::shared_ptr<T>,
      • \n
      • and Rc<RefCell<T>> is an equivalent of a std::shared_ptr<T>.
      • \n
      \n

      You can easily see that they only differ in the mutability. (And even that is not\nas simple as it seems, because there is also Cell<T>)

      ", "url": "https://blog.mfocko.xyz/blog/aoc-2022/1st-week", "title": "1st week of Advent of Code '22 in Rust", "summary": "Surviving first week in Rust.", @@ -110,7 +110,7 @@ }, { "id": "https://blog.mfocko.xyz/blog/aoc-2022/intro", - "content_html": "

      Let's talk about the preparations for this year's Advent of Code.

      \n

      Choosing a language

      \n

      When choosing a language for AoC, you usually want a language that gives you a\nquick feedback which allows you to iterate quickly to the solution of the puzzle.\nOne of the most common choices is Python, many people also use JavaScript or Ruby.

      \n

      Given the competitive nature of the AoC and popularity among competitive programming,\nC++ might be also a very good choice. Only if you are familiar with it, I guess…

      \n

      If you want a challenge, you might also choose to rotate the languages each day.\nThough I prefer to use only one language.

      \n

      For this year I have been deciding between Rust, C++ and Pascal or Ada.

      \n

      I have tried Rust last year and have survived with it for 3 days and then gave\nup and switched to Kotlin, which was pretty good given it is “Java undercover”.\nI pretty much like the ideas behind Rust, I am not sure about the whole cult and\nimplementation of those ideas though. After some years with C/C++, I would say\nthat Rust feels too safe for my taste and tries to “punish me” even for the\nmost trivial things.

      \n

      C++ is a very robust, but also comes with a wide variety of options providing you\nthe ability to shoot yourself in the leg. I have tried to solve few days of previous\nAdvent of Code events, it was relatively easy to solve the problems in C++, given\nthat I do not admit writing my own iterator for enumerate

      \n

      Pascal or Ada were meme choices :) Ada is heavily inspired by Pascal and has a\npretty nice standard library that offers enough to be able to quickly solve some\nproblems in it. However the toolkit is questionable :/

      \n

      Choosing libraries

      \n

      Preparations for Rust

      \n

      All of the sources, later on including solutions, can be found at my\nGitLab.

      \n

      Toolkit

      \n

      Since we are using Rust, we are going to use a Cargo and more than likely VSCode\nwith rust-analyzer. Because of my choice of libraries, we will also introduce\na .envrc file that can be used by direnv, which allows you to set specific\nenvironment variables when you enter a directory. In our case, we will use

      \n
      # to show nice backtrace when using the color-eyre
      export RUST_BACKTRACE=1

      # to catch logs generated by tracing
      export RUST_LOG=trace
      \n

      And for the one of the most obnoxious things ever, we will use a script to download\nthe inputs instead of “clicking, opening and copying to a file1. There is\nno need to be fancy, so we will adjust Python script by Martin2.

      \n
      #!/usr/bin/env python3

      import datetime
      import yaml
      import requests
      import sys


      def load_config():
      with open(\"env.yaml\", \"r\") as f:
      js = yaml.load(f, Loader=yaml.Loader)
      return js[\"session\"], js[\"year\"]


      def get_input(session, year, day):
      return requests.get(
      f\"https://adventofcode.com/{year}/day/{day}/input\",
      cookies={\"session\": session},
      headers={
      \"User-Agent\": \"{repo} by {mail}\".format(
      repo=\"gitlab.com/mfocko/advent-of-code-2022\",
      mail=\"me@mfocko.xyz\",
      )
      },
      ).content.decode(\"utf-8\")


      def main():
      day = datetime.datetime.now().day
      if len(sys.argv) == 2:
      day = sys.argv[1]

      session, year = load_config()
      problem_input = get_input(session, year, day)

      with open(f\"./inputs/day{day:>02}.txt\", \"w\") as f:
      f.write(problem_input)


      if __name__ == \"__main__\":
      main()
      \n

      If the script is called without any arguments, it will deduce the day from the\nsystem, so we do not need to change the day every morning. It also requires a\nconfiguration file:

      \n
      # env.yaml
      session: ‹your session cookie›
      year: 2022
      \n

      Libraries

      \n

      Looking at the list of the libraries, I have chosen “a lot” of them. Let's walk\nthrough each of them.

      \n

      tracing and tracing-subscriber are the crates that can be used for tracing\nand logging of your Rust programs, there are also other crates that can help you\nwith providing backtrace to the Sentry in case you have deployed your application\nsomewhere and you want to watch over it. In our use case we will just utilize the\nmacros for debugging in the terminal.

      \n

      thiserror, anyhow and color-eyre are used for error reporting.\nthiserror is a very good choice for libraries, cause it extends the Error\nfrom the std and allows you to create more convenient error types. Next is\nanyhow which kinda builds on top of the thiserror and provides you with simpler\nerror handling in binaries3. And finally we have color-eyre which, as I found\nout later, is a colorful (wink wink) extension of eyre which is fork of anyhow\nwhile supporting customized reports.

      \n

      In the end I have decided to remove thiserror and anyhow, since first one is\nsuitable for libraries and the latter was basically fully replaced by {color-,}eyre.

      \n

      regex and lazy_static are a very good and also, I hope, self-explanatory\ncombination. lazy_static allows you to have static variables that must be initialized\nduring runtime.

      \n

      itertools provides some nice extensions to the iterators from the std.

      \n

      My own “library”

      \n

      When creating the crate for this year's Advent of Code, I have chosen a library\ntype. Even though standard library is huge, some things might not be included and\nalso we can follow KISS. I have 2 modules that my “library” exports, one for\nparsing and one for 2D vector (that gets used quite often during Advent of Code).

      \n

      Key part is, of course, processing the input and my library exports following\nfunctions that get used a lot:

      \n
      /// Reads file to the string.
      pub fn file_to_string<P: AsRef<Path>>(pathname: P) -> String;

      /// Reads file and returns it as a vector of characters.
      pub fn file_to_chars<P: AsRef<Path>>(pathname: P) -> Vec<char>;

      /// Reads file and returns a vector of parsed structures. Expects each structure
      /// on its own line in the file. And `T` needs to implement `FromStr` trait.
      pub fn file_to_structs<P: AsRef<Path>, T: FromStr>(pathname: P) -> Vec<T>
      where
      <T as FromStr>::Err: Debug;

      /// Converts iterator over strings to a vector of parsed structures. `T` needs
      /// to implement `FromStr` trait and its error must derive `Debug` trait.
      pub fn strings_to_structs<T: FromStr, U>(
      iter: impl Iterator<Item = U>
      ) -> Vec<T>
      where
      <T as std::str::FromStr>::Err: std::fmt::Debug,
      U: Deref<Target = str>;

      /// Reads file and returns it as a vector of its lines.
      pub fn file_to_lines<P: AsRef<Path>>(pathname: P) -> Vec<String>;
      \n

      As for the vector, I went with a rather simple implementation that allows only\naddition of the vectors for now and accessing the elements via functions x()\nand y(). Also the vector is generic, so we can use it with any numeric type we\nneed.

      \n

      Skeleton

      \n

      We can also prepare a template to quickly bootstrap each of the days. We know\nthat each puzzle has 2 parts, which means that we can start with 2 functions that\nwill solve them.

      \n
      fn part1(input: &Input) -> Output {
      todo!()
      }

      fn part2(input: &Input) -> Output {
      todo!()
      }
      \n

      Both functions take reference to the input and return some output (in majority\nof puzzles, it is the same type). todo!() can be used as a nice placeholder,\nit also causes a panic when reached and we could also provide some string with\nan explanation, e.g. todo!(\"part 1\"). We have not given functions a specific\ntype and to avoid as much copy-paste as possible, we will introduce type aliases.

      \n
      type Input = String;
      type Output = i32;
      \n
      tip

      This allows us to quickly adjust the types only in one place without the need to\ndo regex-replace or replace them manually.

      \n

      For each day we get a personalized input that is provided as a text file. Almost\nall the time, we would like to get some structured type out of that input, and\ntherefore it makes sense to introduce a new function that will provide the parsing\nof the input.

      \n
      fn parse_input(path: &str) -> Input {
      todo!()
      }
      \n

      This “parser” will take a path to the file, just in case we would like to run the\nsample instead of input.

      \n

      OK, so now we can write a main function that will take all of the pieces and\nrun them.

      \n
      fn main() {
      let input = parse_input(\"inputs/dayXX.txt\");

      println!(\"Part 1: {}\", part_1(&input));
      println!(\"Part 2: {}\", part_2(&input));
      }
      \n

      This would definitely do :) But we have installed a few libraries and we want to\nuse them. In this part we are going to utilize tracing (for tracing, duh…)\nand color-eyre (for better error reporting, e.g. from parsing).

      \n
      fn main() -> Result<()> {
      tracing_subscriber::fmt()
      .with_env_filter(EnvFilter::from_default_env())
      .with_target(false)
      .with_file(true)
      .with_line_number(true)
      .without_time()
      .compact()
      .init();
      color_eyre::install()?;

      let input = parse_input(\"inputs/dayXX.txt\");

      info!(\"Part 1: {}\", part_1(&input));
      info!(\"Part 2: {}\", part_2(&input));

      Ok(())
      }
      \n

      The first statement will set up tracing and configure it to print out the logs to\nterminal, based on the environment variable. We also change the formatting a bit,\nsince we do not need all the fancy features of the logger. Pure initialization\nwould get us logs like this:

      \n
      2022-12-11T19:53:19.975343Z  INFO day01: Part 1: 0
      \n

      However after running that command, we will get the following:

      \n
       INFO src/bin/day01.rs:35: Part 1: 0
      \n

      And the color_eyre::install()? is quite straightforward. We just initialize the\nerror reporting by color eyre.

      \n
      caution

      Notice that we had to add Ok(()) to the end of the function and adjust the\nreturn type of the main to Result<()>. It is caused by the color eyre that\ncan be installed only once and therefore it can fail, that is how we got the ?\nat the end of the ::install which unwraps the »result« of the installation.

      \n

      Overall we will get to a template like this:

      \n
      use aoc_2022::*;

      use color_eyre::eyre::Result;
      use tracing::info;
      use tracing_subscriber::EnvFilter;

      type Input = String;
      type Output = i32;

      fn parse_input(path: &str) -> Input {
      todo!()
      }

      fn part1(input: &Input) -> Output {
      todo!()
      }

      fn part2(input: &Input) -> Output {
      todo!()
      }

      fn main() -> Result<()> {
      tracing_subscriber::fmt()
      .with_env_filter(EnvFilter::from_default_env())
      .with_target(false)
      .with_file(true)
      .with_line_number(true)
      .without_time()
      .compact()
      .init();
      color_eyre::install()?;

      let input = parse_input(\"inputs/dayXX.txt\");

      info!(\"Part 1: {}\", part_1(&input));
      info!(\"Part 2: {}\", part_2(&input));

      Ok(())
      }
      \n

      Footnotes

      \n
        \n
      1. \n

        Copy-pasting might be a relaxing thing to do, but you can also discover\nnasty stuff about your PC. See this Reddit post and the comment.

        \n
      2. \n
      3. \n

        GitHub profile

        \n
      4. \n
      5. \n

        Even though you can use it even for libraries, but handling errors from\nlibraries using anyhow is nasty… You will be the stinky one ;)

        \n
      6. \n
      \n
      ", + "content_html": "

      Let's talk about the preparations for this year's Advent of Code.

      \n

      Choosing a language

      \n

      When choosing a language for AoC, you usually want a language that gives you a\nquick feedback which allows you to iterate quickly to the solution of the puzzle.\nOne of the most common choices is Python, many people also use JavaScript or Ruby.

      \n

      Given the competitive nature of the AoC and popularity among competitive programming,\nC++ might be also a very good choice. Only if you are familiar with it, I guess…

      \n

      If you want a challenge, you might also choose to rotate the languages each day.\nThough I prefer to use only one language.

      \n

      For this year I have been deciding between Rust, C++ and Pascal or Ada.

      \n

      I have tried Rust last year and have survived with it for 3 days and then gave\nup and switched to Kotlin, which was pretty good given it is “Java undercover”.\nI pretty much like the ideas behind Rust, I am not sure about the whole cult and\nimplementation of those ideas though. After some years with C/C++, I would say\nthat Rust feels too safe for my taste and tries to “punish me” even for the\nmost trivial things.

      \n

      C++ is a very robust, but also comes with a wide variety of options providing you\nthe ability to shoot yourself in the leg. I have tried to solve few days of previous\nAdvent of Code events, it was relatively easy to solve the problems in C++, given\nthat I do not admit writing my own iterator for enumerate

      \n

      Pascal or Ada were meme choices :) Ada is heavily inspired by Pascal and has a\npretty nice standard library that offers enough to be able to quickly solve some\nproblems in it. However the toolkit is questionable :/

      \n

      Choosing libraries

      \n

      Preparations for Rust

      \n

      All of the sources, later on including solutions, can be found at my\nGitLab.

      \n

      Toolkit

      \n

      Since we are using Rust, we are going to use a Cargo and more than likely VSCode\nwith rust-analyzer. Because of my choice of libraries, we will also introduce\na .envrc file that can be used by direnv, which allows you to set specific\nenvironment variables when you enter a directory. In our case, we will use

      \n
      # to show nice backtrace when using the color-eyre
      export RUST_BACKTRACE=1

      # to catch logs generated by tracing
      export RUST_LOG=trace
      \n

      And for the one of the most obnoxious things ever, we will use a script to download\nthe inputs instead of “clicking, opening and copying to a file1. There is\nno need to be fancy, so we will adjust Python script by Martin2.

      \n
      #!/usr/bin/env python3

      import datetime
      import yaml
      import requests
      import sys


      def load_config():
      with open(\"env.yaml\", \"r\") as f:
      js = yaml.load(f, Loader=yaml.Loader)
      return js[\"session\"], js[\"year\"]


      def get_input(session, year, day):
      return requests.get(
      f\"https://adventofcode.com/{year}/day/{day}/input\",
      cookies={\"session\": session},
      headers={
      \"User-Agent\": \"{repo} by {mail}\".format(
      repo=\"gitlab.com/mfocko/advent-of-code-2022\",
      mail=\"me@mfocko.xyz\",
      )
      },
      ).content.decode(\"utf-8\")


      def main():
      day = datetime.datetime.now().day
      if len(sys.argv) == 2:
      day = sys.argv[1]

      session, year = load_config()
      problem_input = get_input(session, year, day)

      with open(f\"./inputs/day{day:>02}.txt\", \"w\") as f:
      f.write(problem_input)


      if __name__ == \"__main__\":
      main()
      \n

      If the script is called without any arguments, it will deduce the day from the\nsystem, so we do not need to change the day every morning. It also requires a\nconfiguration file:

      \n
      # env.yaml
      session: ‹your session cookie›
      year: 2022
      \n

      Libraries

      \n

      Looking at the list of the libraries, I have chosen “a lot” of them. Let's walk\nthrough each of them.

      \n

      tracing and tracing-subscriber are the crates that can be used for tracing\nand logging of your Rust programs, there are also other crates that can help you\nwith providing backtrace to the Sentry in case you have deployed your application\nsomewhere and you want to watch over it. In our use case we will just utilize the\nmacros for debugging in the terminal.

      \n

      thiserror, anyhow and color-eyre are used for error reporting.\nthiserror is a very good choice for libraries, cause it extends the Error\nfrom the std and allows you to create more convenient error types. Next is\nanyhow which kinda builds on top of the thiserror and provides you with simpler\nerror handling in binaries3. And finally we have color-eyre which, as I found\nout later, is a colorful (wink wink) extension of eyre which is fork of anyhow\nwhile supporting customized reports.

      \n

      In the end I have decided to remove thiserror and anyhow, since first one is\nsuitable for libraries and the latter was basically fully replaced by {color-,}eyre.

      \n

      regex and lazy_static are a very good and also, I hope, self-explanatory\ncombination. lazy_static allows you to have static variables that must be initialized\nduring runtime.

      \n

      itertools provides some nice extensions to the iterators from the std.

      \n

      My own “library”

      \n

      When creating the crate for this year's Advent of Code, I have chosen a library\ntype. Even though standard library is huge, some things might not be included and\nalso we can follow KISS. I have 2 modules that my “library” exports, one for\nparsing and one for 2D vector (that gets used quite often during Advent of Code).

      \n

      Key part is, of course, processing the input and my library exports following\nfunctions that get used a lot:

      \n
      /// Reads file to the string.
      pub fn file_to_string<P: AsRef<Path>>(pathname: P) -> String;

      /// Reads file and returns it as a vector of characters.
      pub fn file_to_chars<P: AsRef<Path>>(pathname: P) -> Vec<char>;

      /// Reads file and returns a vector of parsed structures. Expects each structure
      /// on its own line in the file. And `T` needs to implement `FromStr` trait.
      pub fn file_to_structs<P: AsRef<Path>, T: FromStr>(pathname: P) -> Vec<T>
      where
      <T as FromStr>::Err: Debug;

      /// Converts iterator over strings to a vector of parsed structures. `T` needs
      /// to implement `FromStr` trait and its error must derive `Debug` trait.
      pub fn strings_to_structs<T: FromStr, U>(
      iter: impl Iterator<Item = U>
      ) -> Vec<T>
      where
      <T as std::str::FromStr>::Err: std::fmt::Debug,
      U: Deref<Target = str>;

      /// Reads file and returns it as a vector of its lines.
      pub fn file_to_lines<P: AsRef<Path>>(pathname: P) -> Vec<String>;
      \n

      As for the vector, I went with a rather simple implementation that allows only\naddition of the vectors for now and accessing the elements via functions x()\nand y(). Also the vector is generic, so we can use it with any numeric type we\nneed.

      \n

      Skeleton

      \n

      We can also prepare a template to quickly bootstrap each of the days. We know\nthat each puzzle has 2 parts, which means that we can start with 2 functions that\nwill solve them.

      \n
      fn part1(input: &Input) -> Output {
      todo!()
      }

      fn part2(input: &Input) -> Output {
      todo!()
      }
      \n

      Both functions take reference to the input and return some output (in majority\nof puzzles, it is the same type). todo!() can be used as a nice placeholder,\nit also causes a panic when reached and we could also provide some string with\nan explanation, e.g. todo!(\"part 1\"). We have not given functions a specific\ntype and to avoid as much copy-paste as possible, we will introduce type aliases.

      \n
      type Input = String;
      type Output = i32;
      \n
      tip

      This allows us to quickly adjust the types only in one place without the need to\ndo regex-replace or replace them manually.

      \n

      For each day we get a personalized input that is provided as a text file. Almost\nall the time, we would like to get some structured type out of that input, and\ntherefore it makes sense to introduce a new function that will provide the parsing\nof the input.

      \n
      fn parse_input(path: &str) -> Input {
      todo!()
      }
      \n

      This “parser” will take a path to the file, just in case we would like to run the\nsample instead of input.

      \n

      OK, so now we can write a main function that will take all of the pieces and\nrun them.

      \n
      fn main() {
      let input = parse_input(\"inputs/dayXX.txt\");

      println!(\"Part 1: {}\", part_1(&input));
      println!(\"Part 2: {}\", part_2(&input));
      }
      \n

      This would definitely do :) But we have installed a few libraries and we want to\nuse them. In this part we are going to utilize tracing (for tracing, duh…)\nand color-eyre (for better error reporting, e.g. from parsing).

      \n
      fn main() -> Result<()> {
      tracing_subscriber::fmt()
      .with_env_filter(EnvFilter::from_default_env())
      .with_target(false)
      .with_file(true)
      .with_line_number(true)
      .without_time()
      .compact()
      .init();
      color_eyre::install()?;

      let input = parse_input(\"inputs/dayXX.txt\");

      info!(\"Part 1: {}\", part_1(&input));
      info!(\"Part 2: {}\", part_2(&input));

      Ok(())
      }
      \n

      The first statement will set up tracing and configure it to print out the logs to\nterminal, based on the environment variable. We also change the formatting a bit,\nsince we do not need all the fancy features of the logger. Pure initialization\nwould get us logs like this:

      \n
      2022-12-11T19:53:19.975343Z  INFO day01: Part 1: 0
      \n

      However after running that command, we will get the following:

      \n
       INFO src/bin/day01.rs:35: Part 1: 0
      \n

      And the color_eyre::install()? is quite straightforward. We just initialize the\nerror reporting by color eyre.

      \n
      caution

      Notice that we had to add Ok(()) to the end of the function and adjust the\nreturn type of the main to Result<()>. It is caused by the color eyre that\ncan be installed only once and therefore it can fail, that is how we got the ?\nat the end of the ::install which unwraps the »result« of the installation.

      \n

      Overall we will get to a template like this:

      \n
      use aoc_2022::*;

      use color_eyre::eyre::Result;
      use tracing::info;
      use tracing_subscriber::EnvFilter;

      type Input = String;
      type Output = i32;

      fn parse_input(path: &str) -> Input {
      todo!()
      }

      fn part1(input: &Input) -> Output {
      todo!()
      }

      fn part2(input: &Input) -> Output {
      todo!()
      }

      fn main() -> Result<()> {
      tracing_subscriber::fmt()
      .with_env_filter(EnvFilter::from_default_env())
      .with_target(false)
      .with_file(true)
      .with_line_number(true)
      .without_time()
      .compact()
      .init();
      color_eyre::install()?;

      let input = parse_input(\"inputs/dayXX.txt\");

      info!(\"Part 1: {}\", part_1(&input));
      info!(\"Part 2: {}\", part_2(&input));

      Ok(())
      }
      \n

      Footnotes

      \n
        \n
      1. \n

        Copy-pasting might be a relaxing thing to do, but you can also discover\nnasty stuff about your PC. See this Reddit post and the comment.

        \n
      2. \n
      3. \n

        GitHub profile

        \n
      4. \n
      5. \n

        Even though you can use it even for libraries, but handling errors from\nlibraries using anyhow is nasty… You will be the stinky one ;)

        \n
      6. \n
      \n
      ", "url": "https://blog.mfocko.xyz/blog/aoc-2022/intro", "title": "Advent of Code '22 in Rust", "summary": "Preparing for Advent of Code '22.", diff --git a/blog/index.html b/blog/index.html index 545d2a5..cbbc436 100644 --- a/blog/index.html +++ b/blog/index.html @@ -14,8 +14,8 @@ - - + +

      · 4 min read
      Matej Focko

      When you decide to run Fedora on your VPS, you might get screwed over by using diff --git a/blog/leetcode/sort-diagonally/index.html b/blog/leetcode/sort-diagonally/index.html index fcc3a79..e6d1546 100644 --- a/blog/leetcode/sort-diagonally/index.html +++ b/blog/leetcode/sort-diagonally/index.html @@ -14,8 +14,8 @@ - - + +

      Sort the matrix diagonally

      · 17 min read
      Matej Focko

      Let's try to solve one of the LeetCode challenges in easy and hard mode at the @@ -35,26 +35,26 @@ order and return the resulting matrix.

      Image describing the problem

      Skeleton and initial adjustments

      We are given the following skeleton for the C++ and the given challenge:

      -
      class Solution {
      public:
      vector<vector<int>> diagonalSort(vector<vector<int>>& mat) {

      }
      };
      +
      class Solution {
      public:
      vector<vector<int>> diagonalSort(vector<vector<int>>& mat) {

      }
      };

      The task is to sort the passed matrix diagonally and then return it. First of all, I don't like to solve this in a web browser, so we'll need to adjust it accordingly for running it locally. We'll start by including the vector header and using fully-qualified namespaces1 and also adding few tests:

      -
      #include <cassert>
      #include <vector>

      using matrix = std::vector<std::vector<int>>;

      class Solution {
      public:
      matrix diagonalSort(matrix& mat)
      {
      }
      };

      static void test_case_1()
      {
      // Input: mat = [[3,3,1,1],[2,2,1,2],[1,1,1,2]]
      // Output: [[1,1,1,1],[1,2,2,2],[1,2,3,3]]

      Solution s;
      assert((s.diagonalSort(std::vector { std::vector { 3, 3, 1, 1 },
      std::vector { 2, 2, 1, 2 },
      std::vector { 1, 1, 1, 2 } })
      == std::vector { std::vector { 1, 1, 1, 1 },
      std::vector { 1, 2, 2, 2 },
      std::vector { 1, 2, 3, 3 } }));
      }

      static void test_case_2()
      {
      // Input: mat =
      // [[11,25,66,1,69,7],[23,55,17,45,15,52],[75,31,36,44,58,8],[22,27,33,25,68,4],[84,28,14,11,5,50]]
      // Output:
      // [[5,17,4,1,52,7],[11,11,25,45,8,69],[14,23,25,44,58,15],[22,27,31,36,50,66],[84,28,75,33,55,68]]

      Solution s;
      assert((s.diagonalSort(std::vector { std::vector { 11, 25, 66, 1, 69, 7 },
      std::vector { 23, 55, 17, 45, 15, 52 },
      std::vector { 75, 31, 36, 44, 58, 8 },
      std::vector { 22, 27, 33, 25, 68, 4 },
      std::vector { 84, 28, 14, 11, 5, 50 } })
      == std::vector { std::vector { 5, 17, 4, 1, 52, 7 },
      std::vector { 11, 11, 25, 45, 8, 69 },
      std::vector { 14, 23, 25, 44, 58, 15 },
      std::vector { 22, 27, 31, 36, 50, 66 },
      std::vector { 84, 28, 75, 33, 55, 68 } }));
      }

      int main()
      {
      test_case_1();
      test_case_2();

      return 0;
      }
      +
      #include <cassert>
      #include <vector>

      using matrix = std::vector<std::vector<int>>;

      class Solution {
      public:
      matrix diagonalSort(matrix& mat)
      {
      }
      };

      static void test_case_1()
      {
      // Input: mat = [[3,3,1,1],[2,2,1,2],[1,1,1,2]]
      // Output: [[1,1,1,1],[1,2,2,2],[1,2,3,3]]

      Solution s;
      assert((s.diagonalSort(std::vector { std::vector { 3, 3, 1, 1 },
      std::vector { 2, 2, 1, 2 },
      std::vector { 1, 1, 1, 2 } })
      == std::vector { std::vector { 1, 1, 1, 1 },
      std::vector { 1, 2, 2, 2 },
      std::vector { 1, 2, 3, 3 } }));
      }

      static void test_case_2()
      {
      // Input: mat =
      // [[11,25,66,1,69,7],[23,55,17,45,15,52],[75,31,36,44,58,8],[22,27,33,25,68,4],[84,28,14,11,5,50]]
      // Output:
      // [[5,17,4,1,52,7],[11,11,25,45,8,69],[14,23,25,44,58,15],[22,27,31,36,50,66],[84,28,75,33,55,68]]

      Solution s;
      assert((s.diagonalSort(std::vector { std::vector { 11, 25, 66, 1, 69, 7 },
      std::vector { 23, 55, 17, 45, 15, 52 },
      std::vector { 75, 31, 36, 44, 58, 8 },
      std::vector { 22, 27, 33, 25, 68, 4 },
      std::vector { 84, 28, 14, 11, 5, 50 } })
      == std::vector { std::vector { 5, 17, 4, 1, 52, 7 },
      std::vector { 11, 11, 25, 45, 8, 69 },
      std::vector { 14, 23, 25, 44, 58, 15 },
      std::vector { 22, 27, 31, 36, 50, 66 },
      std::vector { 84, 28, 75, 33, 55, 68 } }));
      }

      int main()
      {
      test_case_1();
      test_case_2();

      return 0;
      }

      We need to return the matrix, but we're given a reference to the input matrix. We can easily abuse the C++ here and just switch the reference to value, this way the matrix will be copied when passed to the function, we can sort the copy and just return it back. And we also get yelled by the compiler for the fact that the method doesn't return anything yet, so to make it “shut up” we will just return the input for now:

      -
      -    matrix diagonalSort(matrix& mat)
      + matrix diagonalSort(matrix mat)
      {
      + return mat;
      }
      +
      -    matrix diagonalSort(matrix& mat)
      + matrix diagonalSort(matrix mat)
      {
      + return mat;
      }

      Now, we get the copy and we're good to go.

      Naïve solution

      As you may know, C++ offers a plethora of functions that can be used to your advantage, given that you know how to “bend” the data structures accordingly.

      What does that mean for us? Well, we have an std::sort, we can use it, right? Let's have a look at it:

      -
      template< class RandomIt >
      void sort( RandomIt first, RandomIt last );
      +
      template< class RandomIt >
      void sort( RandomIt first, RandomIt last );

      This overload is more than we need. What does it do? It just sorts the elements in the range [first, last) using operator< on them. We can't sort the whole matrix using this, but… we can sort just »one« diagonal without doing much work @@ -72,10 +72,10 @@ up, i.e. “compiler-assisted development3. And that way we get

      -
      matrix diagonalSort(matrix mat)
      {
      // we iterate over the diagonals
      for (auto d : diagonals(mat)) {
      // and we sort each diagonal
      std::sort(d.begin(), d.end());
      }

      // we take the matrix by copy, so we can sort in-situ and return the copy
      // that we sorted
      return mat;
      }
      +
      matrix diagonalSort(matrix mat)
      {
      // we iterate over the diagonals
      for (auto d : diagonals(mat)) {
      // and we sort each diagonal
      std::sort(d.begin(), d.end());
      }

      // we take the matrix by copy, so we can sort in-situ and return the copy
      // that we sorted
      return mat;
      }

      This solution looks very simple, doesn't it? Well, cause it is. Let's try compiling it:

      -
      matrix-sort.cpp:11:23: error: use of undeclared identifier 'diagonals' [clang-diagnostic-error]
      for (auto d : diagonals(mat)) {
      ^
      Found compiler error(s).
      make: *** [makefile:14: tidy] Error 1
      +
      matrix-sort.cpp:11:23: error: use of undeclared identifier 'diagonals' [clang-diagnostic-error]
      for (auto d : diagonals(mat)) {
      ^
      Found compiler error(s).
      make: *** [makefile:14: tidy] Error 1

      OK, seems about right. We haven't implemented the diagonals yet. And based on what we've written so far, we need a function or a class diagonals that will give us the diagonals we need.

      @@ -90,7 +90,7 @@ do such functionality for a matrix of any type, not just the int fr
    2. get the beginning
    3. get the end (the “sentinel”)
    4. -
      template <typename T>
      class diagonals {
      using matrix_t = std::vector<std::vector<T>>;

      matrix_t& _matrix;

      public:
      diagonals(matrix_t& m)
      : _matrix(m)
      {
      }
      diagonals_iter begin()
      {
      /* TODO */
      }
      diagonals_iter end()
      {
      /* TODO */
      }
      };
      +
      template <typename T>
      class diagonals {
      using matrix_t = std::vector<std::vector<T>>;

      matrix_t& _matrix;

      public:
      diagonals(matrix_t& m)
      : _matrix(m)
      {
      }
      diagonals_iter begin()
      {
      /* TODO */
      }
      diagonals_iter end()
      {
      /* TODO */
      }
      };

      Now we have a diagonals that we can use to go through the diagonals. We haven't implemented the core of it yet. Let's go through what we have for now.

      We have a templated class with templated T that is used as a placeholder for any @@ -109,7 +109,7 @@ in the first row, followed by the rest of the diagonals in the first column.

      need to know which diagonal is next. For that purpose we will pass the indices of the first cell on the diagonal. That way we can always tell how to move forward.

      We will start by updating the begin and end to reflect our choice accordingly.

      -
      diagonals_iter begin() { return diagonals_iter { _matrix, 0, 0 }; }
      diagonals_iter end() { return diagonals_iter { _matrix, 0, _matrix.size() }; }
      +
      diagonals_iter begin() { return diagonals_iter { _matrix, 0, 0 }; }
      diagonals_iter end() { return diagonals_iter { _matrix, 0, _matrix.size() }; }

      For the begin we return the first diagonal that starts at (0, 0). And because we have decided to do the diagonals in the first column at the end, the first diagonal that is not a valid one is the one at (0, height). Apart from the @@ -123,7 +123,7 @@ don't care about the fact they don't need to be sorted.

      We can start with a simple skeleton based on the information that we pass from the diagonals. Also to utilize the matrix_t and also contain implementation details hidden away, we will put this code into the diagonals class.

      -
      class diagonals_iter {
      matrix_t& m;
      std::size_t x;
      std::size_t y;

      public:
      diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }
      };
      +
      class diagonals_iter {
      matrix_t& m;
      std::size_t x;
      std::size_t y;

      public:
      diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }
      };

      In this case we will be implementing a “simple” forward iterator, so we don't need to implement a lot. Notably it will be:

        @@ -133,12 +133,12 @@ iterate over)
      • dereference operator (we need to be able to retrieve the objects we iterate over)
      -
      class diagonals_iter {
      matrix_t& m;
      std::size_t x;
      std::size_t y;

      public:
      diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator!=(const diagonals_iter& rhs) const
      {
      // iterators are not equal if they reference different matrices, or
      // their positions differ
      return m != rhs.m || x != rhs.x || y != rhs.y;
      }

      diagonals_iter& operator++()
      {
      if (y != 0) {
      // iterating through diagonals down the first column
      y++;
      return *this;
      }

      // iterating the diagonals along the first row
      x++;
      if (x == m.front().size()) {
      // switching to diagonals in the first column
      x = 0;
      y++;
      }

      return *this;
      }

      diagonal<T> operator*() const { return diagonal { m, x, y }; }
      };
      +
      class diagonals_iter {
      matrix_t& m;
      std::size_t x;
      std::size_t y;

      public:
      diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator!=(const diagonals_iter& rhs) const
      {
      // iterators are not equal if they reference different matrices, or
      // their positions differ
      return m != rhs.m || x != rhs.x || y != rhs.y;
      }

      diagonals_iter& operator++()
      {
      if (y != 0) {
      // iterating through diagonals down the first column
      y++;
      return *this;
      }

      // iterating the diagonals along the first row
      x++;
      if (x == m.front().size()) {
      // switching to diagonals in the first column
      x = 0;
      y++;
      }

      return *this;
      }

      diagonal<T> operator*() const { return diagonal { m, x, y }; }
      };

      Let's go one-by-one. Inequality operator is rather simple, just compare iterator's attributes field-by-field. If you think about it, checking inequality of two 2D vectors may be a bit inefficient, therefore, we can swap around and check it as a last thing.

      -
      -        return m != rhs.m || x != rhs.x || y != rhs.y;
      + return x != rhs.x || y != rhs.y || m != rhs.m;
      +
      -        return m != rhs.m || x != rhs.x || y != rhs.y;
      + return x != rhs.x || y != rhs.y || m != rhs.m;

      Preincrementation is where the magic happens. If you have a better look, you can see two branches of this operation:

        @@ -158,7 +158,7 @@ something else. In our case it will be a class called diagonal.

        a diagonal is the matrix itself and the “start” of the diagonal (row and column). And we also know that the diagonal must provide some iterators for the std::sort function. We can start with the following skeleton:

        -
        template <typename T>
        class diagonal {
        using matrix_t = std::vector<std::vector<T>>;

        matrix_t& matrix;
        std::size_t x;
        std::size_t y;

        public:
        diagonal(matrix_t& matrix, std::size_t x, std::size_t y)
        : matrix(matrix)
        , x(x)
        , y(y)
        {
        }

        diagonal_iter begin() const { return diagonal_iter { matrix, x, y }; }

        diagonal_iter end() const
        {
        auto max_x = matrix[y].size();
        auto max_y = matrix.size();

        // we need to find the distance in which we get out of bounds (either in
        // column or row)
        auto steps = std::min(max_x - x, max_y - y);

        return diagonal_iter { matrix, x + steps, y + steps };
        }
        };
        +
        template <typename T>
        class diagonal {
        using matrix_t = std::vector<std::vector<T>>;

        matrix_t& matrix;
        std::size_t x;
        std::size_t y;

        public:
        diagonal(matrix_t& matrix, std::size_t x, std::size_t y)
        : matrix(matrix)
        , x(x)
        , y(y)
        {
        }

        diagonal_iter begin() const { return diagonal_iter { matrix, x, y }; }

        diagonal_iter end() const
        {
        auto max_x = matrix[y].size();
        auto max_y = matrix.size();

        // we need to find the distance in which we get out of bounds (either in
        // column or row)
        auto steps = std::min(max_x - x, max_y - y);

        return diagonal_iter { matrix, x + steps, y + steps };
        }
        };

        Initialization is rather simple, we just “keep” the stuff we get, begin is the simplest, we just delegate.

        In case of the end, it gets more complicated. We need to know where is the “end” @@ -185,7 +185,7 @@ be used in std::sort. We need the usual operations like:

        We will also add all the types that our iterator uses with the category of the iterator, i.e. what interface it supports:

        -
        class diagonal_iter {
        // we need to keep reference to the matrix itself
        matrix_t& m;

        // we need to be able to tell our current position
        std::size_t x;
        std::size_t y;

        public:
        using difference_type = std::ptrdiff_t;
        using value_type = T;
        using pointer = T*;
        using reference = T&;
        using iterator_category = std::random_access_iterator_tag;

        diagonal_iter(matrix_t& matrix,
        std::size_t x,
        std::size_t y)
        : m(matrix)
        , x(x)
        , y(y)
        {
        }

        bool operator==(const diagonal_iter& rhs) const
        {
        return x == rhs.x && y == rhs.y && m == rhs.m;
        }

        diagonal_iter& operator++()
        {
        // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
        // the same time
        x++;
        y++;
        return *this;
        }

        reference operator*() const { return m[y][x]; }
        };
        +
        class diagonal_iter {
        // we need to keep reference to the matrix itself
        matrix_t& m;

        // we need to be able to tell our current position
        std::size_t x;
        std::size_t y;

        public:
        using difference_type = std::ptrdiff_t;
        using value_type = T;
        using pointer = T*;
        using reference = T&;
        using iterator_category = std::random_access_iterator_tag;

        diagonal_iter(matrix_t& matrix,
        std::size_t x,
        std::size_t y)
        : m(matrix)
        , x(x)
        , y(y)
        {
        }

        bool operator==(const diagonal_iter& rhs) const
        {
        return x == rhs.x && y == rhs.y && m == rhs.m;
        }

        diagonal_iter& operator++()
        {
        // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
        // the same time
        x++;
        y++;
        return *this;
        }

        reference operator*() const { return m[y][x]; }
        };

        This is pretty similar to the previous iterator, but now we need to implement the remaining requirements of the random access iterator. Let's see what those are:

          @@ -196,16 +196,16 @@ remaining requirements of the random access iterator. Let's see wh
        • define an ordering on the iterators

        Let's fill them in:

        -
        class diagonal_iter {
        // we need to keep reference to the matrix itself
        matrix_t& m;

        // we need to be able to tell our current position
        std::size_t x;
        std::size_t y;

        public:
        using difference_type = std::ptrdiff_t;
        using value_type = T;
        using pointer = T*;
        using reference = T&;
        using iterator_category = std::random_access_iterator_tag;

        diagonal_iter(matrix_t& matrix,
        std::size_t x,
        std::size_t y)
        : m(matrix)
        , x(x)
        , y(y)
        {
        }

        bool operator==(const diagonal_iter& rhs) const
        {
        return x == rhs.x && y == rhs.y && m == rhs.m;
        }

        diagonal_iter& operator++()
        {
        // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
        // the same time
        x++;
        y++;
        return *this;
        }

        reference operator*() const { return m[y][x]; }

        // exactly opposite to the incrementation
        diagonal_iter operator--()
        {
        x--;
        y--;
        return *this;
        }

        // moving ‹n› steps back is same as calling decrementation ‹n›-times, so we
        // can just return a new iterator and subtract ‹n› from both coordinates in
        // the matrix
        diagonal_iter operator-(difference_type n) const
        {
        return diagonal_iter { m, x - n, y - n };
        }

        // here we assume that we are given two iterators on the same diagonal
        difference_type operator-(const diagonal_iter& rhs) const
        {
        assert(m == rhs.m);
        return x - rhs.x;
        }

        // counterpart of moving ‹n› steps backwards
        diagonal_iter operator+(difference_type n) const
        {
        return diagonal_iter { m, x + n, y + n };
        }

        // we compare the coordinates, and also assume that those 2 iterators are
        // lying on the same diagonal
        bool operator<(const diagonal_iter& rhs) const
        {
        assert(m == rhs.m);
        return x < rhs.x && y < rhs.y;
        }
        };
        +
        class diagonal_iter {
        // we need to keep reference to the matrix itself
        matrix_t& m;

        // we need to be able to tell our current position
        std::size_t x;
        std::size_t y;

        public:
        using difference_type = std::ptrdiff_t;
        using value_type = T;
        using pointer = T*;
        using reference = T&;
        using iterator_category = std::random_access_iterator_tag;

        diagonal_iter(matrix_t& matrix,
        std::size_t x,
        std::size_t y)
        : m(matrix)
        , x(x)
        , y(y)
        {
        }

        bool operator==(const diagonal_iter& rhs) const
        {
        return x == rhs.x && y == rhs.y && m == rhs.m;
        }

        diagonal_iter& operator++()
        {
        // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
        // the same time
        x++;
        y++;
        return *this;
        }

        reference operator*() const { return m[y][x]; }

        // exactly opposite to the incrementation
        diagonal_iter operator--()
        {
        x--;
        y--;
        return *this;
        }

        // moving ‹n› steps back is same as calling decrementation ‹n›-times, so we
        // can just return a new iterator and subtract ‹n› from both coordinates in
        // the matrix
        diagonal_iter operator-(difference_type n) const
        {
        return diagonal_iter { m, x - n, y - n };
        }

        // here we assume that we are given two iterators on the same diagonal
        difference_type operator-(const diagonal_iter& rhs) const
        {
        assert(m == rhs.m);
        return x - rhs.x;
        }

        // counterpart of moving ‹n› steps backwards
        diagonal_iter operator+(difference_type n) const
        {
        return diagonal_iter { m, x + n, y + n };
        }

        // we compare the coordinates, and also assume that those 2 iterators are
        // lying on the same diagonal
        bool operator<(const diagonal_iter& rhs) const
        {
        assert(m == rhs.m);
        return x < rhs.x && y < rhs.y;
        }
        };

        At this point we could probably try and compile it, right? If we do so, we will get yelled at by a compiler for the following reasons:

        -
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
        __last = __next;
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1817:11: note: in instantiation of function template specialization 'std::__unguarded_linear_insert<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Val_less_iter>' requested here
        std::__unguarded_linear_insert(__i,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1849:9: note: in instantiation of function template specialization 'std::__insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__insertion_sort(__first, __first + int(_S_threshold), __comp);
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__final_insertion_sort(__first, __last, __comp);
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
        ^
        matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
        std::sort(d.begin(), d.end());
        ^
        matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
        matrix_t& m;
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1830:2: error: no matching function for call to '__unguarded_linear_insert' [clang-diagnostic-error]
        std::__unguarded_linear_insert(__i,
        ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1850:9: note: in instantiation of function template specialization 'std::__unguarded_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__unguarded_insertion_sort(__first + int(_S_threshold), __last,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__final_insertion_sort(__first, __last, __comp);
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
        ^
        matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
        std::sort(d.begin(), d.end());
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1782:5: note: candidate template ignored: substitution failure [with _RandomAccessIterator = diagonal<int>::diagonal_iter, _Compare = __gnu_cxx::__ops::_Val_less_iter]
        __unguarded_linear_insert(_RandomAccessIterator __last,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1923:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
        __last = __cut;
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1937:9: note: in instantiation of function template specialization 'std::__introsort_loop<diagonal<int>::diagonal_iter, long, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__introsort_loop(__first, __last,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
        ^
        matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
        std::sort(d.begin(), d.end());
        ^
        matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
        matrix_t& m;
        ^
        +
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
        __last = __next;
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1817:11: note: in instantiation of function template specialization 'std::__unguarded_linear_insert<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Val_less_iter>' requested here
        std::__unguarded_linear_insert(__i,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1849:9: note: in instantiation of function template specialization 'std::__insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__insertion_sort(__first, __first + int(_S_threshold), __comp);
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__final_insertion_sort(__first, __last, __comp);
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
        ^
        matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
        std::sort(d.begin(), d.end());
        ^
        matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
        matrix_t& m;
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1830:2: error: no matching function for call to '__unguarded_linear_insert' [clang-diagnostic-error]
        std::__unguarded_linear_insert(__i,
        ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1850:9: note: in instantiation of function template specialization 'std::__unguarded_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__unguarded_insertion_sort(__first + int(_S_threshold), __last,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__final_insertion_sort(__first, __last, __comp);
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
        ^
        matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
        std::sort(d.begin(), d.end());
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1782:5: note: candidate template ignored: substitution failure [with _RandomAccessIterator = diagonal<int>::diagonal_iter, _Compare = __gnu_cxx::__ops::_Val_less_iter]
        __unguarded_linear_insert(_RandomAccessIterator __last,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1923:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
        __last = __cut;
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1937:9: note: in instantiation of function template specialization 'std::__introsort_loop<diagonal<int>::diagonal_iter, long, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__introsort_loop(__first, __last,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
        ^
        matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
        std::sort(d.begin(), d.end());
        ^
        matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
        matrix_t& m;
        ^

        That's a lot of noise, isn't it? Let's focus on the important parts:

        -
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]

        matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
        matrix_t& m;
        ^
        +
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]

        matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
        matrix_t& m;
        ^

        Ah! We have a reference in our iterator, and this prevents us from having a copy assignment operator (that is used “somewhere” in the sorting algorithm). Well… Let's just wrap it!

        -
        # we need to keep a different type than reference
        - matrix_t& m;
        + std::reference_wrapper<matrix_t> m;

        # in comparison we need to get the reference out of the wrapper first
        - return x == rhs.x && y == rhs.y && m == rhs.m;
        + return x == rhs.x && y == rhs.y && m.get() == rhs.m.get();

        # same when we return a reference to the “cell” in the matrix
        - reference operator*() const { return m[y][x]; }
        + reference operator*() const { return m.get()[y][x]; }

        # and finally in the assertions that we set for the “distance” and “less than”
        - assert(m == rhs.m);
        + assert(m.get() == rhs.m.get());
        +
        # we need to keep a different type than reference
        - matrix_t& m;
        + std::reference_wrapper<matrix_t> m;

        # in comparison we need to get the reference out of the wrapper first
        - return x == rhs.x && y == rhs.y && m == rhs.m;
        + return x == rhs.x && y == rhs.y && m.get() == rhs.m.get();

        # same when we return a reference to the “cell” in the matrix
        - reference operator*() const { return m[y][x]; }
        + reference operator*() const { return m.get()[y][x]; }

        # and finally in the assertions that we set for the “distance” and “less than”
        - assert(m == rhs.m);
        + assert(m.get() == rhs.m.get());

        We're done now! We have written an iterator over diagonals for a 2D vector. You can have a look at the final result here.

        Footnotes

          diff --git a/blog/rss.xml b/blog/rss.xml index 8c4e84a..769d626 100644 --- a/blog/rss.xml +++ b/blog/rss.xml @@ -36,7 +36,7 @@ very close to installing the desired package.

        So in shell you would do

        -
        # dnf copr enable ‹copr-repository›
        # dnf install ‹package-from-the-repository›
        +
        # dnf copr enable ‹copr-repository›
        # dnf install ‹package-from-the-repository›

        And… that's it! Nothing else needed! Simple, right? And literally same process as you would do for the PPA.

        AUR

        On the other hand, if you are familiar with the archLinux, you definitely know @@ -119,17 +119,17 @@ each row and column to determine the boundaries, it was very easy to do for the rows (cause each row is a Vec element), but not for the columns, since they span multiple rows.

        For this use case I have implemented my own column iterator:

        -
        pub struct ColumnIterator<'a, T> {
        map: &'a [Vec<T>],
        column: usize,

        i: usize,
        }

        impl<'a, T> ColumnIterator<'a, T> {
        pub fn new(map: &'a [Vec<T>], column: usize) -> ColumnIterator<'a, T> {
        Self { map, column, i: 0 }
        }
        }

        impl<'a, T> Iterator for ColumnIterator<'a, T> {
        type Item = &'a T;

        fn next(&mut self) -> Option<Self::Item> {
        if self.i >= self.map.len() {
        return None;
        }

        self.i += 1;
        Some(&self.map[self.i - 1][self.column])
        }
        }
        +
        pub struct ColumnIterator<'a, T> {
        map: &'a [Vec<T>],
        column: usize,

        i: usize,
        }

        impl<'a, T> ColumnIterator<'a, T> {
        pub fn new(map: &'a [Vec<T>], column: usize) -> ColumnIterator<'a, T> {
        Self { map, column, i: 0 }
        }
        }

        impl<'a, T> Iterator for ColumnIterator<'a, T> {
        type Item = &'a T;

        fn next(&mut self) -> Option<Self::Item> {
        if self.i >= self.map.len() {
        return None;
        }

        self.i += 1;
        Some(&self.map[self.i - 1][self.column])
        }
        }

        Given this piece of an iterator, it is very easy to factor out the common functionality between the rows and columns into:

        -
        let mut find_boundaries = |constructor: fn(usize) -> Orientation,
        iterator: &mut dyn Iterator<Item = &char>,
        upper_bound,
        i| {
        let mut first_non_empty = iterator.enumerate().skip_while(|&(_, &c)| c == ' ');
        let start = first_non_empty.next().unwrap().0 as isize;

        let mut last_non_empty = first_non_empty.skip_while(|&(_, &c)| c != ' ');
        let end = last_non_empty.next().unwrap_or((upper_bound, &'_')).0 as isize;

        boundaries.insert(constructor(i), start..end);
        };
        +
        let mut find_boundaries = |constructor: fn(usize) -> Orientation,
        iterator: &mut dyn Iterator<Item = &char>,
        upper_bound,
        i| {
        let mut first_non_empty = iterator.enumerate().skip_while(|&(_, &c)| c == ' ');
        let start = first_non_empty.next().unwrap().0 as isize;

        let mut last_non_empty = first_non_empty.skip_while(|&(_, &c)| c != ' ');
        let end = last_non_empty.next().unwrap_or((upper_bound, &'_')).0 as isize;

        boundaries.insert(constructor(i), start..end);
        };

        And then use it as such:

        -
        // construct all horizontal boundaries
        (0..map.len()).for_each(|row| {
        find_boundaries(
        Orientation::horizontal,
        &mut map[row].iter(),
        map[row].len(),
        row,
        );
        });

        // construct all vertical boundaries
        (0..map[0].len()).for_each(|col| {
        find_boundaries(
        Orientation::vertical,
        &mut ColumnIterator::new(&map, col),
        map.len(),
        col,
        );
        });
        +
        // construct all horizontal boundaries
        (0..map.len()).for_each(|row| {
        find_boundaries(
        Orientation::horizontal,
        &mut map[row].iter(),
        map[row].len(),
        row,
        );
        });

        // construct all vertical boundaries
        (0..map[0].len()).for_each(|col| {
        find_boundaries(
        Orientation::vertical,
        &mut ColumnIterator::new(&map, col),
        map.len(),
        col,
        );
        });

        Walking around the map

        Once the 2nd part got introduced, you start to think about a way how not to copy-paste a lot of stuff (I haven't avoided it anyways…). In this problem, I've chosen to introduce a trait (i.e. interface) for 2D and 3D walker.

        -
        trait Wrap: Clone {
        type State;

        // simulation
        fn is_blocked(&self) -> bool;
        fn step(&mut self, steps: isize);
        fn turn_left(&mut self);
        fn turn_right(&mut self);

        // movement
        fn next(&self) -> (Self::State, Direction);

        // final answer
        fn answer(&self) -> Output;
        }
        +
        trait Wrap: Clone {
        type State;

        // simulation
        fn is_blocked(&self) -> bool;
        fn step(&mut self, steps: isize);
        fn turn_left(&mut self);
        fn turn_right(&mut self);

        // movement
        fn next(&self) -> (Self::State, Direction);

        // final answer
        fn answer(&self) -> Output;
        }

        Each walker maintains its own state and also provides the functions that are used during the simulation. The “promised” methods are separated into:

          @@ -141,7 +141,7 @@ implementation-specific walker

        Both 2D and 3D versions borrow the original input and therefore you must annotate the lifetime of it:

        -
        struct Wrap2D<'a> {
        input: &'a Input,
        position: Position,
        direction: Direction,
        }
        impl<'a> Wrap2D<'a> {
        fn new(input: &'a Input) -> Wrap2D<'a> {
        // …
        +
        struct Wrap2D<'a> {
        input: &'a Input,
        position: Position,
        direction: Direction,
        }
        impl<'a> Wrap2D<'a> {
        fn new(input: &'a Input) -> Wrap2D<'a> {
        // …

        Problems

        I have used a lot of closures for this problem and once I introduced a parameter that was of unknown type (apart from the fact it implements a specific trait), I @@ -158,13 +158,13 @@ of rather smart suggestions.

        char was the .is_digit() function that takes a radix as a parameter. Clippy noticed that I use radix = 10 and suggested switching to .is_ascii_digit() that does exactly the same thing:

        -
        -                .take_while(|c| c.is_digit(10))
        + .take_while(|c| c.is_ascii_digit())
        +
        -                .take_while(|c| c.is_digit(10))
        + .take_while(|c| c.is_ascii_digit())

        Another useful suggestion appeared when working with the iterators and I wanted to get the nn-th element from it. You know the .skip(), you know the .next(), just “slap” them together and we're done for 😁 Well, I got suggested to use .nth() that does exactly the combination of the two mentioned methods on iterators:

        -
        -            match it.clone().skip(skip).next().unwrap() {
        + match it.clone().nth(skip).unwrap() {
        +
        -            match it.clone().skip(skip).next().unwrap() {
        + match it.clone().nth(skip).unwrap() {

        Day 23: Unstable Diffusion

        tl;dr

        Simulating movement of elves around with a set of specific rules.

        Solution

        @@ -177,20 +177,20 @@ minimum that are, of course, exactly the same except for initial values and comparators, it looks like a rather simple fix, but typing in Rust is something else, right? In the end I settled for a function that computes both boundaries without any duplication while using a closure:

        -
        fn get_bounds(positions: &Input) -> (Vector2D<isize>, Vector2D<isize>) {
        let f = |init, cmp: &dyn Fn(isize, isize) -> isize| {
        positions
        .iter()
        .fold(Vector2D::new(init, init), |acc, elf| {
        Vector2D::new(cmp(acc.x(), elf.x()), cmp(acc.y(), elf.y()))
        })
        };

        (f(isize::MAX, &min::<isize>), f(isize::MIN, &max::<isize>))
        }
        +
        fn get_bounds(positions: &Input) -> (Vector2D<isize>, Vector2D<isize>) {
        let f = |init, cmp: &dyn Fn(isize, isize) -> isize| {
        positions
        .iter()
        .fold(Vector2D::new(init, init), |acc, elf| {
        Vector2D::new(cmp(acc.x(), elf.x()), cmp(acc.y(), elf.y()))
        })
        };

        (f(isize::MAX, &min::<isize>), f(isize::MIN, &max::<isize>))
        }

        This function returns a pair of 2D vectors that represent opposite points of the bounding rectangle of all elves.

        You might ask why would we need a closure and the answer is that positions cannot be captured from within the nested function, only via closure. One more fun fact on top of that is the type of the comparator

        -
        &dyn Fn(isize, isize) -> isize
        +
        &dyn Fn(isize, isize) -> isize

        Once we remove the dyn keyword, compiler yells at us and also includes a way how to get a more thorough explanation of the error by running

        $ rustc --explain E0782

        which shows us

        Trait objects must include the dyn keyword.

        Erroneous code example:

        -
        trait Foo {}
        fn test(arg: Box<Foo>) {} // error!
        +
        trait Foo {}
        fn test(arg: Box<Foo>) {} // error!

        Trait objects are a way to call methods on types that are not known until runtime but conform to some trait.

        Trait objects should be formed with Box<dyn Foo>, but in the code above @@ -198,7 +198,7 @@ runtime but conform to some trait.

        This makes it harder to see that arg is a trait object and not a simply a heap allocated type called Foo.

        To fix this issue, add dyn before the trait name.

        -
        trait Foo {}
        fn test(arg: Box<dyn Foo>) {} // ok!
        +
        trait Foo {}
        fn test(arg: Box<dyn Foo>) {} // ok!

        This used to be allowed before edition 2021, but is now an error.

        Rant

        Not all of the explanations are helpful though, in some cases they might be even more confusing than helpful, since they address very simple use cases.

        As you can see, even in this case there are two sides to the explanations:

          @@ -241,7 +241,7 @@ cleaned it up a bit. The changed version is shown here and the original was just more verbose.

        I'll skip the boring parts of checking bounds and entry/exit of the basin 😉 We can easily calculate positions of the blizzards using a modular arithmetics:

        -
        impl Index<Position> for Basin {
        type Output = char;

        fn index(&self, index: Position) -> &Self::Output {
        // ‹skipped boring parts›

        // We need to account for the loops of the blizzards
        let width = self.cols - 2;
        let height = self.rows - 2;

        let blizzard_origin = |size, d, t, i| ((i - 1 + size + d * (t % size)) % size + 1) as usize;
        [
        (
        index.y() as usize,
        blizzard_origin(width, -1, index.z(), index.x()),
        '>',
        ),
        (
        index.y() as usize,
        blizzard_origin(width, 1, index.z(), index.x()),
        '<',
        ),
        (
        blizzard_origin(height, -1, index.z(), index.y()),
        index.x() as usize,
        'v',
        ),
        (
        blizzard_origin(height, 1, index.z(), index.y()),
        index.x() as usize,
        '^',
        ),
        ]
        .iter()
        .find_map(|&(y, x, direction)| {
        if self.map[y][x] == direction {
        Some(&self.map[y][x])
        } else {
        None
        }
        })
        .unwrap_or(&'.')
        }
        }
        +
        impl Index<Position> for Basin {
        type Output = char;

        fn index(&self, index: Position) -> &Self::Output {
        // ‹skipped boring parts›

        // We need to account for the loops of the blizzards
        let width = self.cols - 2;
        let height = self.rows - 2;

        let blizzard_origin = |size, d, t, i| ((i - 1 + size + d * (t % size)) % size + 1) as usize;
        [
        (
        index.y() as usize,
        blizzard_origin(width, -1, index.z(), index.x()),
        '>',
        ),
        (
        index.y() as usize,
        blizzard_origin(width, 1, index.z(), index.x()),
        '<',
        ),
        (
        blizzard_origin(height, -1, index.z(), index.y()),
        index.x() as usize,
        'v',
        ),
        (
        blizzard_origin(height, 1, index.z(), index.y()),
        index.x() as usize,
        '^',
        ),
        ]
        .iter()
        .find_map(|&(y, x, direction)| {
        if self.map[y][x] == direction {
        Some(&self.map[y][x])
        } else {
        None
        }
        })
        .unwrap_or(&'.')
        }
        }

        As you can see, there is an expression for calculating the original position and it's used multiple times, so why not take it out to a lambda, right? 😉

        I couldn't get the rustfmt to format the for-loop nicely, so I've just @@ -259,10 +259,10 @@ algorithm, since it better reflects the cost function.

        a priority for the queue.

        Whereas with the A*, I have chosen to use both time and Manhattan distance that promotes vertices closer to the exit and with a minimum time taken.

        Cost function is, of course, a closure 😉

        -
        let cost = |p: Position| p.z() as usize + exit.y().abs_diff(p.y()) + exit.x().abs_diff(p.x());
        +
        let cost = |p: Position| p.z() as usize + exit.y().abs_diff(p.y()) + exit.x().abs_diff(p.x());

        And also for checking the possible moves from the current vertex, I have implemented, yet another, closure that yields an iterator with the next moves:

        -
        let next_positions = |p| {
        [(0, 0, 1), (0, -1, 1), (0, 1, 1), (-1, 0, 1), (1, 0, 1)]
        .iter()
        .filter_map(move |&(x, y, t)| {
        let next_p = p + Vector3D::new(x, y, t);

        if basin[next_p] == '.' {
        Some(next_p)
        } else {
        None
        }
        })
        };
        +
        let next_positions = |p| {
        [(0, 0, 1), (0, -1, 1), (0, 1, 1), (-1, 0, 1), (1, 0, 1)]
        .iter()
        .filter_map(move |&(x, y, t)| {
        let next_p = p + Vector3D::new(x, y, t);

        if basin[next_p] == '.' {
        Some(next_p)
        } else {
        None
        }
        })
        };

        Min-heap

        In this case I had a need to use the priority queue taking the elements with the lowest cost as the prioritized ones. Rust only offers you the BinaryHeap and @@ -272,7 +272,7 @@ the BinaryHeap). However the wrapping affects the type of the h popping the most prioritized elements yields values wrapped in the Reverse.

        For this purpose I have just taken the max-heap and wrapped it as a whole in a separate structure providing just the desired methods:

        -
        use std::cmp::{Ord, Reverse};
        use std::collections::BinaryHeap;

        pub struct MinHeap<T> {
        heap: BinaryHeap<Reverse<T>>,
        }

        impl<T: Ord> MinHeap<T> {
        pub fn new() -> MinHeap<T> {
        MinHeap {
        heap: BinaryHeap::new(),
        }
        }

        pub fn push(&mut self, item: T) {
        self.heap.push(Reverse(item))
        }

        pub fn pop(&mut self) -> Option<T> {
        self.heap.pop().map(|Reverse(x)| x)
        }
        }

        impl<T: Ord> Default for MinHeap<T> {
        fn default() -> Self {
        Self::new()
        }
        }
        +
        use std::cmp::{Ord, Reverse};
        use std::collections::BinaryHeap;

        pub struct MinHeap<T> {
        heap: BinaryHeap<Reverse<T>>,
        }

        impl<T: Ord> MinHeap<T> {
        pub fn new() -> MinHeap<T> {
        MinHeap {
        heap: BinaryHeap::new(),
        }
        }

        pub fn push(&mut self, item: T) {
        self.heap.push(Reverse(item))
        }

        pub fn pop(&mut self) -> Option<T> {
        self.heap.pop().map(|Reverse(x)| x)
        }
        }

        impl<T: Ord> Default for MinHeap<T> {
        fn default() -> Self {
        Self::new()
        }
        }

        Rest is just the algorithm implementation which is not that interesting.

        Day 25: Full of Hot Air

        tl;dr

        Playing around with a numbers in a special base.

        @@ -289,7 +289,7 @@ with a rather easy solution, as the last day always seems to be.

        that sounds familiar, doesn't it? Let's introduce a structure for the SNAFU numbers and implement the traits that we need.

        Let's start with a structure:

        -
        #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
        struct SNAFU {
        value: i64,
        }
        +
        #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
        struct SNAFU {
        value: i64,
        }

        Converting from &str

        We will start by implementing the FromStr trait that will help us parse our input. This is rather simple, I can just take the from_snafu function, copy-paste it @@ -308,13 +308,13 @@ trait for the SNAFU.

        After those changes we need to adjust the code and tests.

        Parsing of the input is very easy, before we have used the lines, now we parse everything:

        -
             fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
        - file_to_lines(pathname)
        + file_to_structs(pathname)
        }
        +
             fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
        - file_to_lines(pathname)
        + file_to_structs(pathname)
        }

        Part 1 needs to be adjusted a bit too:

        -
             fn part_1(input: &Input) -> Output {
        - to_snafu(input.iter().map(|s| from_snafu(s)).sum())
        + SNAFU::from(input.iter().map(|s| s.value).sum::<i64>()).to_string()
        }
        +
             fn part_1(input: &Input) -> Output {
        - to_snafu(input.iter().map(|s| from_snafu(s)).sum())
        + SNAFU::from(input.iter().map(|s| s.value).sum::<i64>()).to_string()
        }

        You can also see that it simplifies the meaning a bit and it is more explicit than the previous versions.

        And for the tests:

        -
             #[test]
        fn test_from() {
        - for (n, s) in EXAMPLES.iter() {
        - assert_eq!(from_snafu(s), *n);
        + for (&n, s) in EXAMPLES.iter() {
        + assert_eq!(s.parse::<SNAFU>().unwrap().value, n);
        }
        }

        #[test]
        fn test_to() {
        - for (n, s) in EXAMPLES.iter() {
        - assert_eq!(to_snafu(*n), s.to_string());
        + for (&n, s) in EXAMPLES.iter() {
        + assert_eq!(SNAFU::from(n).to_string(), s.to_string());
        }
        +
             #[test]
        fn test_from() {
        - for (n, s) in EXAMPLES.iter() {
        - assert_eq!(from_snafu(s), *n);
        + for (&n, s) in EXAMPLES.iter() {
        + assert_eq!(s.parse::<SNAFU>().unwrap().value, n);
        }
        }

        #[test]
        fn test_to() {
        - for (n, s) in EXAMPLES.iter() {
        - assert_eq!(to_snafu(*n), s.to_string());
        + for (&n, s) in EXAMPLES.iter() {
        + assert_eq!(SNAFU::from(n).to_string(), s.to_string());
        }

        Summary

        Let's wrap the whole thing up! Keeping in mind both AoC and the Rust…

        Finished advent calendar :smile:

        @@ -376,7 +376,7 @@ to implement the indexing in a graph, rather than explicitly access the underlying data structure.

        Here you can see a rather short snippet from the solution that allows you to “index” the graph:

        -
        impl Index<&str> for Graph {
        type Output = Vertex;

        fn index(&self, index: &str) -> &Self::Output {
        &self.g[index]
        }
        }
        +
        impl Index<&str> for Graph {
        type Output = Vertex;

        fn index(&self, index: &str) -> &Self::Output {
        &self.g[index]
        }
        }

        Cartesian product

        During the implementation I had to utilize Floyd-Warshall algorithm for finding the shortest path between pairs of vertices and utilized the iproduct! macro @@ -390,7 +390,7 @@ also makes it harder to evaluate algorithmically, since you need to check the different ways the work can be split.

        Being affected by functional programming brain damage™️, I have chosen to do this part by function that returns an iterator over the possible ways:

        -
        fn pairings(
        valves: &BTreeSet<String>,
        ) -> impl Iterator<Item = (BTreeSet<String>, BTreeSet<String>)> + '_ {
        let mapping = valves.iter().collect_vec();

        let max_mask = 1 << (valves.len() - 1);

        (0..max_mask).map(move |mask| {
        let mut elephant = BTreeSet::new();
        let mut human = BTreeSet::new();

        for (i, &v) in mapping.iter().enumerate() {
        if (mask & (1 << i)) == 0 {
        human.insert(v.clone());
        } else {
        elephant.insert(v.clone());
        }
        }

        (human, elephant)
        })
        }
        +
        fn pairings(
        valves: &BTreeSet<String>,
        ) -> impl Iterator<Item = (BTreeSet<String>, BTreeSet<String>)> + '_ {
        let mapping = valves.iter().collect_vec();

        let max_mask = 1 << (valves.len() - 1);

        (0..max_mask).map(move |mask| {
        let mut elephant = BTreeSet::new();
        let mut human = BTreeSet::new();

        for (i, &v) in mapping.iter().enumerate() {
        if (mask & (1 << i)) == 0 {
        human.insert(v.clone());
        } else {
        elephant.insert(v.clone());
        }
        }

        (human, elephant)
        })
        }

        Day 17: Pyroclastic Flow

        tl;dr

        Simulating an autonomous Tetris where pieces get affected by a series of jets of hot gas.

        @@ -401,7 +401,7 @@ hot gas.

      iterate through the positions that can actually collide with the wall or other piece.

      To get the desired behaviour, you can just compose few smaller functions:

      -
      fn occupied(shape: &[Vec<char>]) -> impl Iterator<Item = Position> + '_ {
      shape.iter().enumerate().flat_map(|(y, row)| {
      row.iter().enumerate().filter_map(move |(x, c)| {
      if c == &'#' {
      Some(Vector2D::new(x as isize, y as isize))
      } else {
      None
      }
      })
      })
      }
      +
      fn occupied(shape: &[Vec<char>]) -> impl Iterator<Item = Position> + '_ {
      shape.iter().enumerate().flat_map(|(y, row)| {
      row.iter().enumerate().filter_map(move |(x, c)| {
      if c == &'#' {
      Some(Vector2D::new(x as isize, y as isize))
      } else {
      None
      }
      })
      })
      }

      In the end, we get relative positions which we can adjust later when given the specific positions from iterator. You can see some interesting parts in this:

        @@ -420,7 +420,7 @@ and also unwraps the values from Some(…). jets that move our pieces around. Initially I have implemented my own infinite iterator that just yields the indices. It is a very simple, yet powerful, piece of code:

        -
        struct InfiniteIndex {
        size: usize,
        i: usize,
        }

        impl InfiniteIndex {
        fn new(size: usize) -> InfiniteIndex {
        InfiniteIndex { size, i: size - 1 }
        }
        }

        impl Iterator for InfiniteIndex {
        type Item = usize;

        fn next(&mut self) -> Option<Self::Item> {
        self.i = (self.i + 1) % self.size;
        Some(self.i)
        }
        }
        +
        struct InfiniteIndex {
        size: usize,
        i: usize,
        }

        impl InfiniteIndex {
        fn new(size: usize) -> InfiniteIndex {
        InfiniteIndex { size, i: size - 1 }
        }
        }

        impl Iterator for InfiniteIndex {
        type Item = usize;

        fn next(&mut self) -> Option<Self::Item> {
        self.i = (self.i + 1) % self.size;
        Some(self.i)
        }
        }

        However when I'm looking at the code now, it doesn't really make much sense… Guess what, we can use a built-in function that is implemented on iterators for that! The function is called .cycle()

        @@ -466,13 +466,13 @@ the Rc<RefCell<T>>. In the end I failed on wrong an a rather interesting issue with .borrow_mut() method being used on Rc<RefCell<T>>.

        .borrow_mut()

        Consider the following snippet of the code (taken from the documentation):

        -
        use std::cell::{RefCell, RefMut};
        use std::collections::HashMap;
        use std::rc::Rc;
        // use std::borrow::BorrowMut;

        fn main() {
        let shared_map: Rc<RefCell<_>> = Rc::new(RefCell::new(HashMap::new()));
        // Create a new block to limit the scope of the dynamic borrow
        {
        let mut map: RefMut<_> = shared_map.borrow_mut();
        map.insert("africa", 92388);
        map.insert("kyoto", 11837);
        map.insert("piccadilly", 11826);
        map.insert("marbles", 38);
        }

        // Note that if we had not let the previous borrow of the cache fall out
        // of scope then the subsequent borrow would cause a dynamic thread panic.
        // This is the major hazard of using `RefCell`.
        let total: i32 = shared_map.borrow().values().sum();
        println!("{total}");
        }
        +
        use std::cell::{RefCell, RefMut};
        use std::collections::HashMap;
        use std::rc::Rc;
        // use std::borrow::BorrowMut;

        fn main() {
        let shared_map: Rc<RefCell<_>> = Rc::new(RefCell::new(HashMap::new()));
        // Create a new block to limit the scope of the dynamic borrow
        {
        let mut map: RefMut<_> = shared_map.borrow_mut();
        map.insert("africa", 92388);
        map.insert("kyoto", 11837);
        map.insert("piccadilly", 11826);
        map.insert("marbles", 38);
        }

        // Note that if we had not let the previous borrow of the cache fall out
        // of scope then the subsequent borrow would cause a dynamic thread panic.
        // This is the major hazard of using `RefCell`.
        let total: i32 = shared_map.borrow().values().sum();
        println!("{total}");
        }

        We allocate a hash map on the heap and then in the inner block, we borrow it as a mutable reference, so that we can use it.

        note

        It is a very primitive example for Rc<RefCell<T>> and mutable borrow.

        If you uncomment the 4th line with use std::borrow::BorrowMut;, you cannot compile the code anymore, because of

        -
           Compiling playground v0.0.1 (/playground)
        error[E0308]: mismatched types
        --> src/main.rs:10:34
        |
        10 | let mut map: RefMut<_> = shared_map.borrow_mut();
        | --------- ^^^^^^^^^^^^^^^^^^^^^^^ expected struct `RefMut`, found mutable reference
        | |
        | expected due to this
        |
        = note: expected struct `RefMut<'_, _>`
        found mutable reference `&mut Rc<RefCell<HashMap<_, _>>>`

        error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
        --> src/main.rs:11:13
        |
        11 | map.insert("africa", 92388);
        | ^^^^^^ method not found in `RefMut<'_, _>`

        error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
        --> src/main.rs:12:13
        |
        12 | map.insert("kyoto", 11837);
        | ^^^^^^ method not found in `RefMut<'_, _>`

        error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
        --> src/main.rs:13:13
        |
        13 | map.insert("piccadilly", 11826);
        | ^^^^^^ method not found in `RefMut<'_, _>`

        error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
        --> src/main.rs:14:13
        |
        14 | map.insert("marbles", 38);
        | ^^^^^^ method not found in `RefMut<'_, _>`

        Some errors have detailed explanations: E0308, E0599.
        For more information about an error, try `rustc --explain E0308`.
        error: could not compile `playground` due to 5 previous errors
        +
           Compiling playground v0.0.1 (/playground)
        error[E0308]: mismatched types
        --> src/main.rs:10:34
        |
        10 | let mut map: RefMut<_> = shared_map.borrow_mut();
        | --------- ^^^^^^^^^^^^^^^^^^^^^^^ expected struct `RefMut`, found mutable reference
        | |
        | expected due to this
        |
        = note: expected struct `RefMut<'_, _>`
        found mutable reference `&mut Rc<RefCell<HashMap<_, _>>>`

        error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
        --> src/main.rs:11:13
        |
        11 | map.insert("africa", 92388);
        | ^^^^^^ method not found in `RefMut<'_, _>`

        error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
        --> src/main.rs:12:13
        |
        12 | map.insert("kyoto", 11837);
        | ^^^^^^ method not found in `RefMut<'_, _>`

        error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
        --> src/main.rs:13:13
        |
        13 | map.insert("piccadilly", 11826);
        | ^^^^^^ method not found in `RefMut<'_, _>`

        error[E0599]: no method named `insert` found for struct `RefMut<'_, _>` in the current scope
        --> src/main.rs:14:13
        |
        14 | map.insert("marbles", 38);
        | ^^^^^^ method not found in `RefMut<'_, _>`

        Some errors have detailed explanations: E0308, E0599.
        For more information about an error, try `rustc --explain E0308`.
        error: could not compile `playground` due to 5 previous errors

        It might seem a bit ridiculous. However, I got to a point where the compiler suggested use std::borrow::BorrowMut; and it resulted in breaking parts of the code that worked previously. I think it may be a good idea to go over what is @@ -499,14 +499,14 @@ method. OK, but how can we call it on the Rc<T>? Easily! I have not been able to find a lot on this trait. My guess is that it provides a method instead of a syntactic sugar (&mut x) for the mutable borrow. And also it provides default implementations for the types:

        -
        impl BorrowMut<str> for String

        impl<T> BorrowMut<T> for &mut T
        where
        T: ?Sized,

        impl<T> BorrowMut<T> for T
        where
        T: ?Sized,

        impl<T, A> BorrowMut<[T]> for Vec<T, A>
        where
        A: Allocator,

        impl<T, A> BorrowMut<T> for Box<T, A>
        where
        A: Allocator,
        T: ?Sized,

        impl<T, const N: usize> BorrowMut<[T]> for [T; N]
        +
        impl BorrowMut<str> for String

        impl<T> BorrowMut<T> for &mut T
        where
        T: ?Sized,

        impl<T> BorrowMut<T> for T
        where
        T: ?Sized,

        impl<T, A> BorrowMut<[T]> for Vec<T, A>
        where
        A: Allocator,

        impl<T, A> BorrowMut<T> for Box<T, A>
        where
        A: Allocator,
        T: ?Sized,

        impl<T, const N: usize> BorrowMut<[T]> for [T; N]
        Conflict

        Now the question is why did it break the code… My first take was that the type Rc<RefCell<T>> has some specialized implementation of the .borrow_mut() and the use overrides it with the default, which is true in a sense. However there is no specialized implementation. Let's have a look at the trait and the type signature on the RefCell<T>:

        -
        // trait
        pub trait BorrowMut<Borrowed>: Borrow<Borrowed>
        where
        Borrowed: ?Sized,
        {
        fn borrow_mut(&mut self) -> &mut Borrowed;
        }

        // ‹RefCell<T>.borrow_mut()› type signature
        pub fn borrow_mut(&self) -> RefMut<'_, T>
        +
        // trait
        pub trait BorrowMut<Borrowed>: Borrow<Borrowed>
        where
        Borrowed: ?Sized,
        {
        fn borrow_mut(&mut self) -> &mut Borrowed;
        }

        // ‹RefCell<T>.borrow_mut()› type signature
        pub fn borrow_mut(&self) -> RefMut<'_, T>

        I think that we can definitely agree on the fact that RefMut<'_, T> is not the RefCell<T>.

        In my opinion, RefCell<T> implements a separate .borrow_mut() rather @@ -533,7 +533,7 @@ as:

        that you can use the macro machinery to save yourself some typing. If you have enumeration of which the default value doesn't bear any parameter, you can just do2:

        -
        #[derive(Default)]
        enum Color {
        #[default]
        White,
        Gray,
        Black,
        }
        +
        #[derive(Default)]
        enum Color {
        #[default]
        White,
        Gray,
        Black,
        }

        Abusing negation

        If you want to use a unary minus operator on your own type, you can implement a Neg trait3. I was dealing with a binary tree and needed a way how to look @@ -580,26 +580,26 @@ order and return the resulting matrix.

        Image describing the problem

        Skeleton and initial adjustments

        We are given the following skeleton for the C++ and the given challenge:

        -
        class Solution {
        public:
        vector<vector<int>> diagonalSort(vector<vector<int>>& mat) {

        }
        };
        +
        class Solution {
        public:
        vector<vector<int>> diagonalSort(vector<vector<int>>& mat) {

        }
        };

        The task is to sort the passed matrix diagonally and then return it. First of all, I don't like to solve this in a web browser, so we'll need to adjust it accordingly for running it locally. We'll start by including the vector header and using fully-qualified namespaces1 and also adding few tests:

        -
        #include <cassert>
        #include <vector>

        using matrix = std::vector<std::vector<int>>;

        class Solution {
        public:
        matrix diagonalSort(matrix& mat)
        {
        }
        };

        static void test_case_1()
        {
        // Input: mat = [[3,3,1,1],[2,2,1,2],[1,1,1,2]]
        // Output: [[1,1,1,1],[1,2,2,2],[1,2,3,3]]

        Solution s;
        assert((s.diagonalSort(std::vector { std::vector { 3, 3, 1, 1 },
        std::vector { 2, 2, 1, 2 },
        std::vector { 1, 1, 1, 2 } })
        == std::vector { std::vector { 1, 1, 1, 1 },
        std::vector { 1, 2, 2, 2 },
        std::vector { 1, 2, 3, 3 } }));
        }

        static void test_case_2()
        {
        // Input: mat =
        // [[11,25,66,1,69,7],[23,55,17,45,15,52],[75,31,36,44,58,8],[22,27,33,25,68,4],[84,28,14,11,5,50]]
        // Output:
        // [[5,17,4,1,52,7],[11,11,25,45,8,69],[14,23,25,44,58,15],[22,27,31,36,50,66],[84,28,75,33,55,68]]

        Solution s;
        assert((s.diagonalSort(std::vector { std::vector { 11, 25, 66, 1, 69, 7 },
        std::vector { 23, 55, 17, 45, 15, 52 },
        std::vector { 75, 31, 36, 44, 58, 8 },
        std::vector { 22, 27, 33, 25, 68, 4 },
        std::vector { 84, 28, 14, 11, 5, 50 } })
        == std::vector { std::vector { 5, 17, 4, 1, 52, 7 },
        std::vector { 11, 11, 25, 45, 8, 69 },
        std::vector { 14, 23, 25, 44, 58, 15 },
        std::vector { 22, 27, 31, 36, 50, 66 },
        std::vector { 84, 28, 75, 33, 55, 68 } }));
        }

        int main()
        {
        test_case_1();
        test_case_2();

        return 0;
        }
        +
        #include <cassert>
        #include <vector>

        using matrix = std::vector<std::vector<int>>;

        class Solution {
        public:
        matrix diagonalSort(matrix& mat)
        {
        }
        };

        static void test_case_1()
        {
        // Input: mat = [[3,3,1,1],[2,2,1,2],[1,1,1,2]]
        // Output: [[1,1,1,1],[1,2,2,2],[1,2,3,3]]

        Solution s;
        assert((s.diagonalSort(std::vector { std::vector { 3, 3, 1, 1 },
        std::vector { 2, 2, 1, 2 },
        std::vector { 1, 1, 1, 2 } })
        == std::vector { std::vector { 1, 1, 1, 1 },
        std::vector { 1, 2, 2, 2 },
        std::vector { 1, 2, 3, 3 } }));
        }

        static void test_case_2()
        {
        // Input: mat =
        // [[11,25,66,1,69,7],[23,55,17,45,15,52],[75,31,36,44,58,8],[22,27,33,25,68,4],[84,28,14,11,5,50]]
        // Output:
        // [[5,17,4,1,52,7],[11,11,25,45,8,69],[14,23,25,44,58,15],[22,27,31,36,50,66],[84,28,75,33,55,68]]

        Solution s;
        assert((s.diagonalSort(std::vector { std::vector { 11, 25, 66, 1, 69, 7 },
        std::vector { 23, 55, 17, 45, 15, 52 },
        std::vector { 75, 31, 36, 44, 58, 8 },
        std::vector { 22, 27, 33, 25, 68, 4 },
        std::vector { 84, 28, 14, 11, 5, 50 } })
        == std::vector { std::vector { 5, 17, 4, 1, 52, 7 },
        std::vector { 11, 11, 25, 45, 8, 69 },
        std::vector { 14, 23, 25, 44, 58, 15 },
        std::vector { 22, 27, 31, 36, 50, 66 },
        std::vector { 84, 28, 75, 33, 55, 68 } }));
        }

        int main()
        {
        test_case_1();
        test_case_2();

        return 0;
        }

        We need to return the matrix, but we're given a reference to the input matrix. We can easily abuse the C++ here and just switch the reference to value, this way the matrix will be copied when passed to the function, we can sort the copy and just return it back. And we also get yelled by the compiler for the fact that the method doesn't return anything yet, so to make it “shut up” we will just return the input for now:

        -
        -    matrix diagonalSort(matrix& mat)
        + matrix diagonalSort(matrix mat)
        {
        + return mat;
        }
        +
        -    matrix diagonalSort(matrix& mat)
        + matrix diagonalSort(matrix mat)
        {
        + return mat;
        }

        Now, we get the copy and we're good to go.

        Naïve solution

        As you may know, C++ offers a plethora of functions that can be used to your advantage, given that you know how to “bend” the data structures accordingly.

        What does that mean for us? Well, we have an std::sort, we can use it, right? Let's have a look at it:

        -
        template< class RandomIt >
        void sort( RandomIt first, RandomIt last );
        +
        template< class RandomIt >
        void sort( RandomIt first, RandomIt last );

        This overload is more than we need. What does it do? It just sorts the elements in the range [first, last) using operator< on them. We can't sort the whole matrix using this, but… we can sort just »one« diagonal without doing much work @@ -617,10 +617,10 @@ up, i.e. “compiler-assisted development3. And that way we get

        -
        matrix diagonalSort(matrix mat)
        {
        // we iterate over the diagonals
        for (auto d : diagonals(mat)) {
        // and we sort each diagonal
        std::sort(d.begin(), d.end());
        }

        // we take the matrix by copy, so we can sort in-situ and return the copy
        // that we sorted
        return mat;
        }
        +
        matrix diagonalSort(matrix mat)
        {
        // we iterate over the diagonals
        for (auto d : diagonals(mat)) {
        // and we sort each diagonal
        std::sort(d.begin(), d.end());
        }

        // we take the matrix by copy, so we can sort in-situ and return the copy
        // that we sorted
        return mat;
        }

        This solution looks very simple, doesn't it? Well, cause it is. Let's try compiling it:

        -
        matrix-sort.cpp:11:23: error: use of undeclared identifier 'diagonals' [clang-diagnostic-error]
        for (auto d : diagonals(mat)) {
        ^
        Found compiler error(s).
        make: *** [makefile:14: tidy] Error 1
        +
        matrix-sort.cpp:11:23: error: use of undeclared identifier 'diagonals' [clang-diagnostic-error]
        for (auto d : diagonals(mat)) {
        ^
        Found compiler error(s).
        make: *** [makefile:14: tidy] Error 1

        OK, seems about right. We haven't implemented the diagonals yet. And based on what we've written so far, we need a function or a class diagonals that will give us the diagonals we need.

        @@ -635,7 +635,7 @@ do such functionality for a matrix of any type, not just the int fr
      • get the beginning
      • get the end (the “sentinel”)
      -
      template <typename T>
      class diagonals {
      using matrix_t = std::vector<std::vector<T>>;

      matrix_t& _matrix;

      public:
      diagonals(matrix_t& m)
      : _matrix(m)
      {
      }
      diagonals_iter begin()
      {
      /* TODO */
      }
      diagonals_iter end()
      {
      /* TODO */
      }
      };
      +
      template <typename T>
      class diagonals {
      using matrix_t = std::vector<std::vector<T>>;

      matrix_t& _matrix;

      public:
      diagonals(matrix_t& m)
      : _matrix(m)
      {
      }
      diagonals_iter begin()
      {
      /* TODO */
      }
      diagonals_iter end()
      {
      /* TODO */
      }
      };

      Now we have a diagonals that we can use to go through the diagonals. We haven't implemented the core of it yet. Let's go through what we have for now.

      We have a templated class with templated T that is used as a placeholder for any @@ -654,7 +654,7 @@ in the first row, followed by the rest of the diagonals in the first column.

      need to know which diagonal is next. For that purpose we will pass the indices of the first cell on the diagonal. That way we can always tell how to move forward.

      We will start by updating the begin and end to reflect our choice accordingly.

      -
      diagonals_iter begin() { return diagonals_iter { _matrix, 0, 0 }; }
      diagonals_iter end() { return diagonals_iter { _matrix, 0, _matrix.size() }; }
      +
      diagonals_iter begin() { return diagonals_iter { _matrix, 0, 0 }; }
      diagonals_iter end() { return diagonals_iter { _matrix, 0, _matrix.size() }; }

      For the begin we return the first diagonal that starts at (0, 0). And because we have decided to do the diagonals in the first column at the end, the first diagonal that is not a valid one is the one at (0, height). Apart from the @@ -668,7 +668,7 @@ don't care about the fact they don't need to be sorted.

      We can start with a simple skeleton based on the information that we pass from the diagonals. Also to utilize the matrix_t and also contain implementation details hidden away, we will put this code into the diagonals class.

      -
      class diagonals_iter {
      matrix_t& m;
      std::size_t x;
      std::size_t y;

      public:
      diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }
      };
      +
      class diagonals_iter {
      matrix_t& m;
      std::size_t x;
      std::size_t y;

      public:
      diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }
      };

      In this case we will be implementing a “simple” forward iterator, so we don't need to implement a lot. Notably it will be:

        @@ -678,12 +678,12 @@ iterate over)
      • dereference operator (we need to be able to retrieve the objects we iterate over)
      -
      class diagonals_iter {
      matrix_t& m;
      std::size_t x;
      std::size_t y;

      public:
      diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator!=(const diagonals_iter& rhs) const
      {
      // iterators are not equal if they reference different matrices, or
      // their positions differ
      return m != rhs.m || x != rhs.x || y != rhs.y;
      }

      diagonals_iter& operator++()
      {
      if (y != 0) {
      // iterating through diagonals down the first column
      y++;
      return *this;
      }

      // iterating the diagonals along the first row
      x++;
      if (x == m.front().size()) {
      // switching to diagonals in the first column
      x = 0;
      y++;
      }

      return *this;
      }

      diagonal<T> operator*() const { return diagonal { m, x, y }; }
      };
      +
      class diagonals_iter {
      matrix_t& m;
      std::size_t x;
      std::size_t y;

      public:
      diagonals_iter(matrix_t& matrix, std::size_t x, std::size_t y)
      : m(matrix)
      , x(x)
      , y(y)
      {
      }

      bool operator!=(const diagonals_iter& rhs) const
      {
      // iterators are not equal if they reference different matrices, or
      // their positions differ
      return m != rhs.m || x != rhs.x || y != rhs.y;
      }

      diagonals_iter& operator++()
      {
      if (y != 0) {
      // iterating through diagonals down the first column
      y++;
      return *this;
      }

      // iterating the diagonals along the first row
      x++;
      if (x == m.front().size()) {
      // switching to diagonals in the first column
      x = 0;
      y++;
      }

      return *this;
      }

      diagonal<T> operator*() const { return diagonal { m, x, y }; }
      };

      Let's go one-by-one. Inequality operator is rather simple, just compare iterator's attributes field-by-field. If you think about it, checking inequality of two 2D vectors may be a bit inefficient, therefore, we can swap around and check it as a last thing.

      -
      -        return m != rhs.m || x != rhs.x || y != rhs.y;
      + return x != rhs.x || y != rhs.y || m != rhs.m;
      +
      -        return m != rhs.m || x != rhs.x || y != rhs.y;
      + return x != rhs.x || y != rhs.y || m != rhs.m;

      Preincrementation is where the magic happens. If you have a better look, you can see two branches of this operation:

        @@ -703,7 +703,7 @@ something else. In our case it will be a class called diagonal.

        a diagonal is the matrix itself and the “start” of the diagonal (row and column). And we also know that the diagonal must provide some iterators for the std::sort function. We can start with the following skeleton:

        -
        template <typename T>
        class diagonal {
        using matrix_t = std::vector<std::vector<T>>;

        matrix_t& matrix;
        std::size_t x;
        std::size_t y;

        public:
        diagonal(matrix_t& matrix, std::size_t x, std::size_t y)
        : matrix(matrix)
        , x(x)
        , y(y)
        {
        }

        diagonal_iter begin() const { return diagonal_iter { matrix, x, y }; }

        diagonal_iter end() const
        {
        auto max_x = matrix[y].size();
        auto max_y = matrix.size();

        // we need to find the distance in which we get out of bounds (either in
        // column or row)
        auto steps = std::min(max_x - x, max_y - y);

        return diagonal_iter { matrix, x + steps, y + steps };
        }
        };
        +
        template <typename T>
        class diagonal {
        using matrix_t = std::vector<std::vector<T>>;

        matrix_t& matrix;
        std::size_t x;
        std::size_t y;

        public:
        diagonal(matrix_t& matrix, std::size_t x, std::size_t y)
        : matrix(matrix)
        , x(x)
        , y(y)
        {
        }

        diagonal_iter begin() const { return diagonal_iter { matrix, x, y }; }

        diagonal_iter end() const
        {
        auto max_x = matrix[y].size();
        auto max_y = matrix.size();

        // we need to find the distance in which we get out of bounds (either in
        // column or row)
        auto steps = std::min(max_x - x, max_y - y);

        return diagonal_iter { matrix, x + steps, y + steps };
        }
        };

        Initialization is rather simple, we just “keep” the stuff we get, begin is the simplest, we just delegate.

        In case of the end, it gets more complicated. We need to know where is the “end” @@ -730,7 +730,7 @@ be used in std::sort. We need the usual operations like:

        We will also add all the types that our iterator uses with the category of the iterator, i.e. what interface it supports:

        -
        class diagonal_iter {
        // we need to keep reference to the matrix itself
        matrix_t& m;

        // we need to be able to tell our current position
        std::size_t x;
        std::size_t y;

        public:
        using difference_type = std::ptrdiff_t;
        using value_type = T;
        using pointer = T*;
        using reference = T&;
        using iterator_category = std::random_access_iterator_tag;

        diagonal_iter(matrix_t& matrix,
        std::size_t x,
        std::size_t y)
        : m(matrix)
        , x(x)
        , y(y)
        {
        }

        bool operator==(const diagonal_iter& rhs) const
        {
        return x == rhs.x && y == rhs.y && m == rhs.m;
        }

        diagonal_iter& operator++()
        {
        // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
        // the same time
        x++;
        y++;
        return *this;
        }

        reference operator*() const { return m[y][x]; }
        };
        +
        class diagonal_iter {
        // we need to keep reference to the matrix itself
        matrix_t& m;

        // we need to be able to tell our current position
        std::size_t x;
        std::size_t y;

        public:
        using difference_type = std::ptrdiff_t;
        using value_type = T;
        using pointer = T*;
        using reference = T&;
        using iterator_category = std::random_access_iterator_tag;

        diagonal_iter(matrix_t& matrix,
        std::size_t x,
        std::size_t y)
        : m(matrix)
        , x(x)
        , y(y)
        {
        }

        bool operator==(const diagonal_iter& rhs) const
        {
        return x == rhs.x && y == rhs.y && m == rhs.m;
        }

        diagonal_iter& operator++()
        {
        // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
        // the same time
        x++;
        y++;
        return *this;
        }

        reference operator*() const { return m[y][x]; }
        };

        This is pretty similar to the previous iterator, but now we need to implement the remaining requirements of the random access iterator. Let's see what those are:

          @@ -741,16 +741,16 @@ remaining requirements of the random access iterator. Let's see what th
        • define an ordering on the iterators

        Let's fill them in:

        -
        class diagonal_iter {
        // we need to keep reference to the matrix itself
        matrix_t& m;

        // we need to be able to tell our current position
        std::size_t x;
        std::size_t y;

        public:
        using difference_type = std::ptrdiff_t;
        using value_type = T;
        using pointer = T*;
        using reference = T&;
        using iterator_category = std::random_access_iterator_tag;

        diagonal_iter(matrix_t& matrix,
        std::size_t x,
        std::size_t y)
        : m(matrix)
        , x(x)
        , y(y)
        {
        }

        bool operator==(const diagonal_iter& rhs) const
        {
        return x == rhs.x && y == rhs.y && m == rhs.m;
        }

        diagonal_iter& operator++()
        {
        // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
        // the same time
        x++;
        y++;
        return *this;
        }

        reference operator*() const { return m[y][x]; }

        // exactly opposite to the incrementation
        diagonal_iter operator--()
        {
        x--;
        y--;
        return *this;
        }

        // moving ‹n› steps back is same as calling decrementation ‹n›-times, so we
        // can just return a new iterator and subtract ‹n› from both coordinates in
        // the matrix
        diagonal_iter operator-(difference_type n) const
        {
        return diagonal_iter { m, x - n, y - n };
        }

        // here we assume that we are given two iterators on the same diagonal
        difference_type operator-(const diagonal_iter& rhs) const
        {
        assert(m == rhs.m);
        return x - rhs.x;
        }

        // counterpart of moving ‹n› steps backwards
        diagonal_iter operator+(difference_type n) const
        {
        return diagonal_iter { m, x + n, y + n };
        }

        // we compare the coordinates, and also assume that those 2 iterators are
        // lying on the same diagonal
        bool operator<(const diagonal_iter& rhs) const
        {
        assert(m == rhs.m);
        return x < rhs.x && y < rhs.y;
        }
        };
        +
        class diagonal_iter {
        // we need to keep reference to the matrix itself
        matrix_t& m;

        // we need to be able to tell our current position
        std::size_t x;
        std::size_t y;

        public:
        using difference_type = std::ptrdiff_t;
        using value_type = T;
        using pointer = T*;
        using reference = T&;
        using iterator_category = std::random_access_iterator_tag;

        diagonal_iter(matrix_t& matrix,
        std::size_t x,
        std::size_t y)
        : m(matrix)
        , x(x)
        , y(y)
        {
        }

        bool operator==(const diagonal_iter& rhs) const
        {
        return x == rhs.x && y == rhs.y && m == rhs.m;
        }

        diagonal_iter& operator++()
        {
        // we are moving along the diagonal, so we increment both ‹x› and ‹y› at
        // the same time
        x++;
        y++;
        return *this;
        }

        reference operator*() const { return m[y][x]; }

        // exactly opposite to the incrementation
        diagonal_iter operator--()
        {
        x--;
        y--;
        return *this;
        }

        // moving ‹n› steps back is same as calling decrementation ‹n›-times, so we
        // can just return a new iterator and subtract ‹n› from both coordinates in
        // the matrix
        diagonal_iter operator-(difference_type n) const
        {
        return diagonal_iter { m, x - n, y - n };
        }

        // here we assume that we are given two iterators on the same diagonal
        difference_type operator-(const diagonal_iter& rhs) const
        {
        assert(m == rhs.m);
        return x - rhs.x;
        }

        // counterpart of moving ‹n› steps backwards
        diagonal_iter operator+(difference_type n) const
        {
        return diagonal_iter { m, x + n, y + n };
        }

        // we compare the coordinates, and also assume that those 2 iterators are
        // lying on the same diagonal
        bool operator<(const diagonal_iter& rhs) const
        {
        assert(m == rhs.m);
        return x < rhs.x && y < rhs.y;
        }
        };

        At this point we could probably try and compile it, right? If we do so, we will get yelled at by a compiler for the following reasons:

        -
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
        __last = __next;
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1817:11: note: in instantiation of function template specialization 'std::__unguarded_linear_insert<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Val_less_iter>' requested here
        std::__unguarded_linear_insert(__i,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1849:9: note: in instantiation of function template specialization 'std::__insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__insertion_sort(__first, __first + int(_S_threshold), __comp);
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__final_insertion_sort(__first, __last, __comp);
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
        ^
        matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
        std::sort(d.begin(), d.end());
        ^
        matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
        matrix_t& m;
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1830:2: error: no matching function for call to '__unguarded_linear_insert' [clang-diagnostic-error]
        std::__unguarded_linear_insert(__i,
        ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1850:9: note: in instantiation of function template specialization 'std::__unguarded_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__unguarded_insertion_sort(__first + int(_S_threshold), __last,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__final_insertion_sort(__first, __last, __comp);
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
        ^
        matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
        std::sort(d.begin(), d.end());
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1782:5: note: candidate template ignored: substitution failure [with _RandomAccessIterator = diagonal<int>::diagonal_iter, _Compare = __gnu_cxx::__ops::_Val_less_iter]
        __unguarded_linear_insert(_RandomAccessIterator __last,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1923:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
        __last = __cut;
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1937:9: note: in instantiation of function template specialization 'std::__introsort_loop<diagonal<int>::diagonal_iter, long, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__introsort_loop(__first, __last,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
        ^
        matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
        std::sort(d.begin(), d.end());
        ^
        matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
        matrix_t& m;
        ^
        +
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
        __last = __next;
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1817:11: note: in instantiation of function template specialization 'std::__unguarded_linear_insert<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Val_less_iter>' requested here
        std::__unguarded_linear_insert(__i,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1849:9: note: in instantiation of function template specialization 'std::__insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__insertion_sort(__first, __first + int(_S_threshold), __comp);
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__final_insertion_sort(__first, __last, __comp);
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
        ^
        matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
        std::sort(d.begin(), d.end());
        ^
        matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
        matrix_t& m;
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1830:2: error: no matching function for call to '__unguarded_linear_insert' [clang-diagnostic-error]
        std::__unguarded_linear_insert(__i,
        ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1850:9: note: in instantiation of function template specialization 'std::__unguarded_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__unguarded_insertion_sort(__first + int(_S_threshold), __last,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1940:9: note: in instantiation of function template specialization 'std::__final_insertion_sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__final_insertion_sort(__first, __last, __comp);
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
        ^
        matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
        std::sort(d.begin(), d.end());
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1782:5: note: candidate template ignored: substitution failure [with _RandomAccessIterator = diagonal<int>::diagonal_iter, _Compare = __gnu_cxx::__ops::_Val_less_iter]
        __unguarded_linear_insert(_RandomAccessIterator __last,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1923:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]
        __last = __cut;
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1937:9: note: in instantiation of function template specialization 'std::__introsort_loop<diagonal<int>::diagonal_iter, long, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__introsort_loop(__first, __last,
        ^
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:4820:12: note: in instantiation of function template specialization 'std::__sort<diagonal<int>::diagonal_iter, __gnu_cxx::__ops::_Iter_less_iter>' requested here
        std::__sort(__first, __last, __gnu_cxx::__ops::__iter_less_iter());
        ^
        matrix-sort.cpp:161:18: note: in instantiation of function template specialization 'std::sort<diagonal<int>::diagonal_iter>' requested here
        std::sort(d.begin(), d.end());
        ^
        matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
        matrix_t& m;
        ^

        That's a lot of noise, isn't it? Let's focus on the important parts:

        -
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]

        matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
        matrix_t& m;
        ^
        +
        /usr/bin/../lib/gcc/x86_64-redhat-linux/12/../../../../include/c++/12/bits/stl_algo.h:1792:11: error: object of type 'diagonal<int>::diagonal_iter' cannot be assigned because its copy assignment operator is implicitly deleted [clang-diagnostic-error]

        matrix-sort.cpp:17:19: note: copy assignment operator of 'diagonal_iter' is implicitly deleted because field 'm' is of reference type 'diagonal<int>::matrix_t &' (aka 'vector<std::vector<int>> &')
        matrix_t& m;
        ^

        Ah! We have a reference in our iterator, and this prevents us from having a copy assignment operator (that is used “somewhere” in the sorting algorithm). Well… Let's just wrap it!

        -
        # we need to keep a different type than reference
        - matrix_t& m;
        + std::reference_wrapper<matrix_t> m;

        # in comparison we need to get the reference out of the wrapper first
        - return x == rhs.x && y == rhs.y && m == rhs.m;
        + return x == rhs.x && y == rhs.y && m.get() == rhs.m.get();

        # same when we return a reference to the “cell” in the matrix
        - reference operator*() const { return m[y][x]; }
        + reference operator*() const { return m.get()[y][x]; }

        # and finally in the assertions that we set for the “distance” and “less than”
        - assert(m == rhs.m);
        + assert(m.get() == rhs.m.get());
        +
        # we need to keep a different type than reference
        - matrix_t& m;
        + std::reference_wrapper<matrix_t> m;

        # in comparison we need to get the reference out of the wrapper first
        - return x == rhs.x && y == rhs.y && m == rhs.m;
        + return x == rhs.x && y == rhs.y && m.get() == rhs.m.get();

        # same when we return a reference to the “cell” in the matrix
        - reference operator*() const { return m[y][x]; }
        + reference operator*() const { return m.get()[y][x]; }

        # and finally in the assertions that we set for the “distance” and “less than”
        - assert(m == rhs.m);
        + assert(m.get() == rhs.m.get());

        We're done now! We have written an iterator over diagonals for a 2D vector. You can have a look at the final result here.

        Footnotes

          @@ -800,7 +800,7 @@ own “box of hell”.

      Swapping indices

      Relatively simple implementation, just take the values, swap them and return new vector.

      -
      impl<T: Copy> Vector2D<T> {
      pub fn swap(&self) -> Self {
      Self {
      x: self.y,
      y: self.x,
      }
      }
      }
      +
      impl<T: Copy> Vector2D<T> {
      pub fn swap(&self) -> Self {
      Self {
      x: self.y,
      y: self.x,
      }
      }
      }

      Pretty straight-forward implementation, but let's talk about the T: Copy. We need to use it, since we are returning a new vector, with swapped values. If we had values that cannot be copied, the only thing we could do, would be a @@ -809,7 +809,7 @@ later on). This is pretty similar with the operations on sets from the first wee

      Indexing Vec

      I will start with the indexing, cause bound-checking is a bit more… complicated than I would like to.

      -
      pub fn index<'a, T, U>(v: &'a [Vec<U>], idx: &Vector2D<T>) -> &'a U
      where
      usize: TryFrom<T>,
      <usize as TryFrom<T>>::Error: Debug,
      T: Copy,
      {
      let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
      &v[y][x]
      }
      +
      pub fn index<'a, T, U>(v: &'a [Vec<U>], idx: &Vector2D<T>) -> &'a U
      where
      usize: TryFrom<T>,
      <usize as TryFrom<T>>::Error: Debug,
      T: Copy,
      {
      let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
      &v[y][x]
      }

      Let's talk about this mess… Body of the function is probably the most easy part and should not be hard to understand, we just take the x and y and convert them both to usize type that can be used later on for indexing.

      @@ -848,20 +848,20 @@ taken by a reference, i.e. returned reference must live at least as long as the

      First issue that our implementation has is the fact that we cannot get a mutable reference out of that function. This could be easily resolved by introducing new function, e.g. index_mut. Which I have actually done while writing this part:

      -
      pub fn index_mut<'a, T, U>(v: &'a mut [Vec<U>], idx: &Vector2D<T>) -> &'a mut U
      where
      usize: TryFrom<T>,
      <usize as TryFrom<T>>::Error: Debug,
      T: Copy,
      {
      let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
      &mut v[y][x]
      }
      +
      pub fn index_mut<'a, T, U>(v: &'a mut [Vec<U>], idx: &Vector2D<T>) -> &'a mut U
      where
      usize: TryFrom<T>,
      <usize as TryFrom<T>>::Error: Debug,
      T: Copy,
      {
      let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
      &mut v[y][x]
      }
      «↯» Why can't we use one function?

      When we consider a Vec<T>, we don't need to consider containers as T, Rust implements indexing as traits Index<T> and IndexMut<T> that do the dirty work behind syntactic sugar of container[idx].

      However, implementing of traits is not allowed for external types, i.e. types that you haven't defined yourself. This means that you can implement indexing over containers that you have implemented yourself, but you cannot use your own types for indexing “built-in” types.

      Another part of this rabbit hole is trait SliceIndex<T> that is of a relevance -because of

      impl<T, I> Index<I> for [T]
      where
      I: SliceIndex<[T]>

      impl<T, I, A> Index<I> for Vec<T, A>
      where
      I: SliceIndex<[T]>,
      A: Allocator

      impl<T, I, const N: usize> Index<I> for [T; N]
      where
      [T]: Index<I>

      In other words, if your type implements SliceIndex<T> trait, it can be used +because of

      impl<T, I> Index<I> for [T]
      where
      I: SliceIndex<[T]>

      impl<T, I, A> Index<I> for Vec<T, A>
      where
      I: SliceIndex<[T]>,
      A: Allocator

      impl<T, I, const N: usize> Index<I> for [T; N]
      where
      [T]: Index<I>

      In other words, if your type implements SliceIndex<T> trait, it can be used for indexing. As of now, this trait has all of its required methods experimental and is marked as unsafe.

      Another problem is a requirement for indexing either [Vec<T>] or Vec<Vec<T>>. This requirement could be countered by removing inner type Vec<T> and constraining it by a trait Index (or IndexMut respectively) in a following way

      -
      pub fn index<'a, C, T>(v: &'a [C], idx: &Vector2D<T>) -> &'a C::Output
      where
      usize: TryFrom<T>,
      <usize as TryFrom<T>>::Error: Debug,
      T: Copy,
      C: Index<usize>
      {
      let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
      &v[y][x]
      }
      +
      pub fn index<'a, C, T>(v: &'a [C], idx: &Vector2D<T>) -> &'a C::Output
      where
      usize: TryFrom<T>,
      <usize as TryFrom<T>>::Error: Debug,
      T: Copy,
      C: Index<usize>
      {
      let (x, y): (usize, usize) = (idx.x.try_into().unwrap(), idx.y.try_into().unwrap());
      &v[y][x]
      }

      Given this, we can also give a more meaningful typename for indexing type, such as I.

      Checking bounds

      @@ -872,12 +872,12 @@ up with negative values which, unlike in C++, causes an error (instead of underf that you can use to your advantage; you can easily guess how).

      So how can we approach this then? Well… we will convert the bounds instead of the indices and that lead us to:

      -
      pub fn in_range<T, U>(v: &[Vec<U>], idx: &Vector2D<T>) -> bool
      where
      usize: TryInto<T>,
      <usize as TryInto<T>>::Error: Debug,
      T: PartialOrd + Copy,
      {
      idx.y >= 0.try_into().unwrap()
      && idx.y < v.len().try_into().unwrap()
      && idx.x >= 0.try_into().unwrap()
      && idx.x
      < v[TryInto::<usize>::try_into(idx.y).unwrap()]
      .len()
      .try_into()
      .unwrap()
      }
      +
      pub fn in_range<T, U>(v: &[Vec<U>], idx: &Vector2D<T>) -> bool
      where
      usize: TryInto<T>,
      <usize as TryInto<T>>::Error: Debug,
      T: PartialOrd + Copy,
      {
      idx.y >= 0.try_into().unwrap()
      && idx.y < v.len().try_into().unwrap()
      && idx.x >= 0.try_into().unwrap()
      && idx.x
      < v[TryInto::<usize>::try_into(idx.y).unwrap()]
      .len()
      .try_into()
      .unwrap()
      }

      You can tell that it's definitely a shitty code. Let's improve it now! We will get back to the original idea, but do it better. We know that we cannot convert negative values into usize, but we also know that conversion like that returns a Result<T, E> which we can use to our advantage.

      -
      pub fn in_range<T, U>(v: &[Vec<U>], idx: &Vector2D<T>) -> bool
      where
      T: Copy,
      usize: TryFrom<T>,
      {
      usize::try_from(idx.y)
      .and_then(|y| usize::try_from(idx.x).map(|x| y < v.len() && x < v[y].len()))
      .unwrap_or(false)
      }
      +
      pub fn in_range<T, U>(v: &[Vec<U>], idx: &Vector2D<T>) -> bool
      where
      T: Copy,
      usize: TryFrom<T>,
      {
      usize::try_from(idx.y)
      .and_then(|y| usize::try_from(idx.x).map(|x| y < v.len() && x < v[y].len()))
      .unwrap_or(false)
      }

      Result<T, E> is a type similar to Either in Haskell and it allows us to chain multiple operations on correct results or propagate the original error without doing anything. Let's dissect it one-by-one.

      @@ -886,7 +886,7 @@ types and either successfully convert them or fail (with a reasonable error). Th method returns Result<T, E>.

      We call and_then on that result, let's have a look at the type signature of and_then, IMO it explains more than enough:

      -
      pub fn and_then<U, F>(self, op: F) -> Result<U, E>
      where
      F: FnOnce(T) -> Result<U, E>
      +
      pub fn and_then<U, F>(self, op: F) -> Result<U, E>
      where
      F: FnOnce(T) -> Result<U, E>

      OK… So it takes the result and a function and returns another result with different value and different error. However we can see that the function, which represents an operation on a result, takes just the value, i.e. it doesn't care @@ -896,7 +896,7 @@ about any previous error. To make it short:

      We parsed a y index and now we try to convert the x index with try_from again, but on that result we use map rather than and_then, why would that be?

      -
      pub fn map<U, F>(self, op: F) -> Result<U, E>
      where
      F: FnOnce(T) -> U
      +
      pub fn map<U, F>(self, op: F) -> Result<U, E>
      where
      F: FnOnce(T) -> U

      Huh… map performs an operation that cannot fail. And finally we use unwrap_or which takes the value from result, or in case of an error returns the default that we define.

      @@ -925,13 +925,13 @@ preparations for the AoC. Let's sum up our requirements:

      cannot do anything about it. However running and testing can be simplified!

      Let's introduce and export a new module solution that will take care of all of this. We will start by introducing a trait for each day.

      -
      pub trait Solution<Input, Output: Display> {
      fn parse_input<P: AsRef<Path>>(pathname: P) -> Input;

      fn part_1(input: &Input) -> Output;
      fn part_2(input: &Input) -> Output;
      }
      +
      pub trait Solution<Input, Output: Display> {
      fn parse_input<P: AsRef<Path>>(pathname: P) -> Input;

      fn part_1(input: &Input) -> Output;
      fn part_2(input: &Input) -> Output;
      }

      This does a lot of work for us already, we have defined a trait and for each day we will create a structure representing a specific day. That structure will also implement the Solution trait.

      Now we need to get rid of the boilerplate, we can't get rid of the main function, but we can at least move out the functionality.

      -
      fn run(type_of_input: &str) -> Result<()>
      where
      Self: Sized,
      {
      tracing_subscriber::fmt()
      .with_env_filter(EnvFilter::from_default_env())
      .with_target(false)
      .with_file(true)
      .with_line_number(true)
      .without_time()
      .compact()
      .init();
      color_eyre::install()?;

      let input = Self::parse_input(format!("{}s/{}.txt", type_of_input, Self::day()));

      info!("Part 1: {}", Self::part_1(&input));
      info!("Part 2: {}", Self::part_2(&input));

      Ok(())
      }

      fn main() -> Result<()>
      where
      Self: Sized,
      {
      Self::run("input")
      }
      +
      fn run(type_of_input: &str) -> Result<()>
      where
      Self: Sized,
      {
      tracing_subscriber::fmt()
      .with_env_filter(EnvFilter::from_default_env())
      .with_target(false)
      .with_file(true)
      .with_line_number(true)
      .without_time()
      .compact()
      .init();
      color_eyre::install()?;

      let input = Self::parse_input(format!("{}s/{}.txt", type_of_input, Self::day()));

      info!("Part 1: {}", Self::part_1(&input));
      info!("Part 2: {}", Self::part_2(&input));

      Ok(())
      }

      fn main() -> Result<()>
      where
      Self: Sized,
      {
      Self::run("input")
      }

      This is all part of the Solution trait, which can implement methods while being dependent on what is provided by the implementing types. In this case, we just need to bound the Output type to implement Display that is necessary for the @@ -940,14 +940,14 @@ need to bound the Output type to implement Display tha day() method that you can see being used when constructing path to the input file. That method will generate a name of the file, e.g. day01 and we know that we can somehow deduce it from the structure name, given we name it reasonably.

      -
      fn day() -> String {
      let mut day = String::from(type_name::<Self>().split("::").next().unwrap());
      day.make_ascii_lowercase();

      day.to_string()
      }
      +
      fn day() -> String {
      let mut day = String::from(type_name::<Self>().split("::").next().unwrap());
      day.make_ascii_lowercase();

      day.to_string()
      }
      type_name

      This feature is still experimental and considered to be internal, it is not advised to use it any production code.

      And now we can get to the nastiest stuff 😩 We will generate the tests!

      We want to be able to generate tests for sample input in a following way:

      -
      test_sample!(day_01, Day01, 42, 69);
      +
      test_sample!(day_01, Day01, 42, 69);

      There's not much we can do, so we will write a macro to generate the tests for us.

      -
      #[macro_export]
      macro_rules! test_sample {
      ($mod_name:ident, $day_struct:tt, $part_1:expr, $part_2:expr) => {
      #[cfg(test)]
      mod $mod_name {
      use super::*;

      #[test]
      fn test_part_1() {
      let sample =
      $day_struct::parse_input(&format!("samples/{}.txt", $day_struct::day()));
      assert_eq!($day_struct::part_1(&sample), $part_1);
      }

      #[test]
      fn test_part_2() {
      let sample =
      $day_struct::parse_input(&format!("samples/{}.txt", $day_struct::day()));
      assert_eq!($day_struct::part_2(&sample), $part_2);
      }
      }
      };
      }
      +
      #[macro_export]
      macro_rules! test_sample {
      ($mod_name:ident, $day_struct:tt, $part_1:expr, $part_2:expr) => {
      #[cfg(test)]
      mod $mod_name {
      use super::*;

      #[test]
      fn test_part_1() {
      let sample =
      $day_struct::parse_input(&format!("samples/{}.txt", $day_struct::day()));
      assert_eq!($day_struct::part_1(&sample), $part_1);
      }

      #[test]
      fn test_part_2() {
      let sample =
      $day_struct::parse_input(&format!("samples/{}.txt", $day_struct::day()));
      assert_eq!($day_struct::part_2(&sample), $part_2);
      }
      }
      };
      }

      We have used it in a similar way as macros in C/C++, one of the things that we can use to our advantage is defining “type” of the parameters for the macro. All parameters have their name prefixed with $ sign and you can define various “forms” @@ -963,7 +963,7 @@ which literally means an expression.

    5. Apart from that we need to use #[macro_export] to mark the macro as exported for usage outside of the module. Now our skeleton looks like:

      -
      use aoc_2022::*;

      type Input = String;
      type Output = String;

      struct DayXX;
      impl Solution<Input, Output> for DayXX {
      fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
      file_to_string(pathname)
      }

      fn part_1(input: &Input) -> Output {
      todo!()
      }

      fn part_2(input: &Input) -> Output {
      todo!()
      }
      }

      fn main() -> Result<()> {
      // DayXX::run("sample")
      DayXX::main()
      }

      // test_sample!(day_XX, DayXX, , );
      +
      use aoc_2022::*;

      type Input = String;
      type Output = String;

      struct DayXX;
      impl Solution<Input, Output> for DayXX {
      fn parse_input<P: AsRef<Path>>(pathname: P) -> Input {
      file_to_string(pathname)
      }

      fn part_1(input: &Input) -> Output {
      todo!()
      }

      fn part_2(input: &Input) -> Output {
      todo!()
      }
      }

      fn main() -> Result<()> {
      // DayXX::run("sample")
      DayXX::main()
      }

      // test_sample!(day_XX, DayXX, , );

      Solution

      Not much to talk about, it is relatively easy to simulate.

      Day 10: Cathode-Ray Tube

      @@ -973,7 +973,7 @@ CPU's accumulator.

    And the issue is caused by different types of Output for the part 1 and part 2.

    Problem is relatively simple and consists of simulating a CPU, I have approached it in a following way:

    -
    fn evaluate_instructions(instructions: &[Instruction], mut out: Output) -> Output {
    instructions
    .iter()
    .fold(State::new(), |state, instruction| {
    state.execute(instruction, &mut out)
    });

    out
    }
    +
    fn evaluate_instructions(instructions: &[Instruction], mut out: Output) -> Output {
    instructions
    .iter()
    .fold(State::new(), |state, instruction| {
    state.execute(instruction, &mut out)
    });

    out
    }

    We just take the instructions, we have some state of the CPU and we execute the instructions one-by-one. Perfect usage of the fold (or reduce as you may know it from other languages).

    @@ -981,11 +981,11 @@ it from other languages).

    that problem. And the answer is very simple and functional. Rust allows you to have an enumeration that can bear some other values apart from the type itself.

    tip

    We could've seen something like this with the Result<T, E> type that can be -defined as

    enum Result<T, E> {
    Ok(T),
    Err(E)
    }
    What does that mean though?

    When we have an Ok value, it has the result itself, and when we get an Err +defined as

    enum Result<T, E> {
    Ok(T),
    Err(E)
    }
    What does that mean though?

    When we have an Ok value, it has the result itself, and when we get an Err value, it has the error. This also allows us to handle results in a rather -pretty way:

    match do_something(x) {
    Ok(y) => {
    println!("SUCCESS: {}", y);
    },
    Err(y) => {
    eprintln!("ERROR: {}", y);
    }
    }
    +pretty way:

    match do_something(x) {
    Ok(y) => {
    println!("SUCCESS: {}", y);
    },
    Err(y) => {
    eprintln!("ERROR: {}", y);
    }
    }

    My solution has a following outline:

    -
    fn execute(&self, i: &Instruction, output: &mut Output) -> State {
    // execute the instruction

    // collect results if necessary
    match output {
    Output::Part1(x) => self.execute_part_1(y, x),
    Output::Part2(x) => self.execute_part_2(y, x),
    }

    // return the obtained state
    new_state
    }
    +
    fn execute(&self, i: &Instruction, output: &mut Output) -> State {
    // execute the instruction

    // collect results if necessary
    match output {
    Output::Part1(x) => self.execute_part_1(y, x),
    Output::Part2(x) => self.execute_part_2(y, x),
    }

    // return the obtained state
    new_state
    }

    You might think that it's a perfectly reasonable thing to do. Yes, but notice that the match statement doesn't collect the changes in any way and also we pass output by &mut, so it is shared across each iteration of the fold.

    @@ -1012,7 +1012,7 @@ also rolling down the hill…

    As I have said in the tl;dr, we are looking for the shortest path, but the start and goal differ for the part 1 and 2. So I have decided to refactor my solution to a BFS algorithm that takes necessary parameters via functions:

    -
    fn bfs<F, G>(
    graph: &[Vec<char>], start: &Position, has_edge: F, is_target: G
    ) -> Option<usize>
    where
    F: Fn(&[Vec<char>], &Position, &Position) -> bool,
    G: Fn(&[Vec<char>], &Position) -> bool
    +
    fn bfs<F, G>(
    graph: &[Vec<char>], start: &Position, has_edge: F, is_target: G
    ) -> Option<usize>
    where
    F: Fn(&[Vec<char>], &Position, &Position) -> bool,
    G: Fn(&[Vec<char>], &Position) -> bool

    We pass the initial vertex from the caller and everything else is left to the BFS algorithm, based on the has_edge and is_target functions.

    This was easy! And that is not very usual in Rust once you want to pass around @@ -1029,7 +1029,7 @@ time complexity, because of the priority heap instead of the queue.

    You can implement a lot of traits if you want to. It is imperative to implement ordering on the packets. I had a typo, so I also proceeded to implement a Display trait for debugging purposes:

    -
    impl Display for Packet {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
    match self {
    Packet::Integer(x) => write!(f, "{x}"),
    Packet::List(lst) => write!(f, "[{}]", lst.iter().map(|p| format!("{p}")).join(",")),
    }
    }
    }
    +
    impl Display for Packet {
    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
    match self {
    Packet::Integer(x) => write!(f, "{x}"),
    Packet::List(lst) => write!(f, "[{}]", lst.iter().map(|p| format!("{p}")).join(",")),
    }
    }
    }

    Solution

    A lot of technical details… Parsing is nasty too…

    Day 14: Regolith Reservoir

    @@ -1049,16 +1049,16 @@ leave it be, so I tried to implement the Index and IndexMutunsafe
    part are the 2 methods that are named *unchecked*. Anyways, I will be implementing the Index* traits for now, rather than the SliceIndex.

    It's relatively straightforward…

    -
    impl<I, C> Index<Vector2D<I>> for [C]
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: Index<usize>,
    {
    type Output = C::Output;

    fn index(&self, index: Vector2D<I>) -> &Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &self[y][x]
    }
    }

    impl<I, C> IndexMut<Vector2D<I>> for [C]
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: IndexMut<usize>,
    {
    fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &mut self[y][x]
    }
    }
    +
    impl<I, C> Index<Vector2D<I>> for [C]
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: Index<usize>,
    {
    type Output = C::Output;

    fn index(&self, index: Vector2D<I>) -> &Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &self[y][x]
    }
    }

    impl<I, C> IndexMut<Vector2D<I>> for [C]
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: IndexMut<usize>,
    {
    fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &mut self[y][x]
    }
    }

    We can see a lot of similarities to the implementation of index and index_mut functions. In the end, they are 1:1, just wrapped in the trait that provides a syntax sugar for container[idx].

    note

    I have also switched from using the TryFrom to TryInto trait, since it better matches what we are using, the .try_into rather than usize::try_from.

    Also implementing TryFrom automatically provides you with a TryInto trait, -since it is relatively easy to implement. Just compare the following:

    pub trait TryFrom<T>: Sized {
    type Error;

    fn try_from(value: T) -> Result<Self, Self::Error>;
    }

    pub trait TryInto<T>: Sized {
    type Error;

    fn try_into(self) -> Result<T, Self::Error>;
    }
    +since it is relatively easy to implement. Just compare the following:

    pub trait TryFrom<T>: Sized {
    type Error;

    fn try_from(value: T) -> Result<Self, Self::Error>;
    }

    pub trait TryInto<T>: Sized {
    type Error;

    fn try_into(self) -> Result<T, Self::Error>;
    }

    OK, so we have our trait implemented, we should be able to use container[index], right? Yes… but actually no 😦

    -
    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
    --> src/bin/day08.rs:26:18
    |
    26 | if trees[pos] > tallest {
    | ^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
    --> src/bin/day08.rs:30:28
    |
    30 | max(tallest, trees[pos])
    | ^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<isize>`
    --> src/bin/day08.rs:52:28
    |
    52 | let max_height = trees[position];
    | ^^^^^^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<isize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<isize>>`
    +
    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
    --> src/bin/day08.rs:26:18
    |
    26 | if trees[pos] > tallest {
    | ^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<usize>`
    --> src/bin/day08.rs:30:28
    |
    30 | max(tallest, trees[pos])
    | ^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<usize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<usize>>`

    error[E0277]: the type `[std::vec::Vec<i8>]` cannot be indexed by `aoc_2022::Vector2D<isize>`
    --> src/bin/day08.rs:52:28
    |
    52 | let max_height = trees[position];
    | ^^^^^^^^ slice indices are of type `usize` or ranges of `usize`
    |
    = help: the trait `std::slice::SliceIndex<[std::vec::Vec<i8>]>` is not implemented for `aoc_2022::Vector2D<isize>`
    = note: required for `std::vec::Vec<std::vec::Vec<i8>>` to implement `std::ops::Index<aoc_2022::Vector2D<isize>>`

    Why? We have it implemented for the slices ([C]), why doesn't it work? Well, the fun part consists of the fact that in other place, where we were using it, we were passing the &[Vec<T>], but this is coming from a helper functions that @@ -1068,9 +1068,9 @@ those. Just for the slices. 🤯 What are we going to do abo so let's implement a macro! The only difference across the implementations are the types of the outer containers. Implementation doesn't differ at all!

    Implementing the macro can be done in a following way:

    -
    macro_rules! generate_indices {
    ($container:ty) => {
    impl<I, C> Index<Vector2D<I>> for $container
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: Index<usize>,
    {
    type Output = C::Output;

    fn index(&self, index: Vector2D<I>) -> &Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &self[y][x]
    }
    }

    impl<I, C> IndexMut<Vector2D<I>> for $container
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: IndexMut<usize>,
    {
    fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &mut self[y][x]
    }
    }
    };
    }
    +
    macro_rules! generate_indices {
    ($container:ty) => {
    impl<I, C> Index<Vector2D<I>> for $container
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: Index<usize>,
    {
    type Output = C::Output;

    fn index(&self, index: Vector2D<I>) -> &Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &self[y][x]
    }
    }

    impl<I, C> IndexMut<Vector2D<I>> for $container
    where
    I: Copy + TryInto<usize>,
    <I as TryInto<usize>>::Error: Debug,
    C: IndexMut<usize>,
    {
    fn index_mut(&mut self, index: Vector2D<I>) -> &mut Self::Output {
    let (x, y): (usize, usize) =
    (index.x.try_into().unwrap(), index.y.try_into().unwrap());
    &mut self[y][x]
    }
    }
    };
    }

    And now we can simply do

    -
    generate_indices!(VecDeque<C>);
    generate_indices!([C]);
    generate_indices!(Vec<C>);
    // generate_indices!([C; N], const N: usize);
    +
    generate_indices!(VecDeque<C>);
    generate_indices!([C]);
    generate_indices!(Vec<C>);
    // generate_indices!([C; N], const N: usize);

    The last type (I took the inspiration from the implementations of the Index and IndexMut traits) is a bit problematic, because of the const N: usize part, which I haven't managed to be able to parse. And that's how I got rid of the error.

    @@ -1080,11 +1080,11 @@ copy-paste, cause the cost of this “monstrosity” outweighs the benefits of n

    This issue is relatively funny. If you don't use any type aliases, just the raw types, you'll get suggested certain changes by the clippy. For example if you consider the following piece of code

    -
    fn get_sum(nums: &Vec<i32>) -> i32 {
    nums.iter().sum()
    }

    fn main() {
    let nums = vec![1, 2, 3];
    println!("Sum: {}", get_sum(&nums));
    }
    +
    fn get_sum(nums: &Vec<i32>) -> i32 {
    nums.iter().sum()
    }

    fn main() {
    let nums = vec![1, 2, 3];
    println!("Sum: {}", get_sum(&nums));
    }

    and you run clippy on it, you will get

    -
    Checking playground v0.0.1 (/playground)
    warning: writing `&Vec` instead of `&[_]` involves a new object where a slice will do
    --> src/main.rs:1:18
    |
    1 | fn get_sum(nums: &Vec<i32>) -> i32 {
    | ^^^^^^^^^ help: change this to: `&[i32]`
    |
    = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#ptr_arg
    = note: `#[warn(clippy::ptr_arg)]` on by default

    warning: `playground` (bin "playground") generated 1 warning
    Finished dev [unoptimized + debuginfo] target(s) in 0.61s
    +
    Checking playground v0.0.1 (/playground)
    warning: writing `&Vec` instead of `&[_]` involves a new object where a slice will do
    --> src/main.rs:1:18
    |
    1 | fn get_sum(nums: &Vec<i32>) -> i32 {
    | ^^^^^^^^^ help: change this to: `&[i32]`
    |
    = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#ptr_arg
    = note: `#[warn(clippy::ptr_arg)]` on by default

    warning: `playground` (bin "playground") generated 1 warning
    Finished dev [unoptimized + debuginfo] target(s) in 0.61s

    However, if you introduce a type alias, such as

    -
    type Numbers = Vec<i32>;
    +
    type Numbers = Vec<i32>;

    Then clippy won't say anything, cause there is literally nothing to suggest. However the outcome is not the same…

    ]]> me+blog@mfocko.xyz (Matej Focko) @@ -1115,10 +1115,10 @@ backpacks and we want to choose the elf that has the most food ;)

    At first I've decided to put asserts into my main, something like

    -
    assert_eq!(part_1(&sample), 24000);
    info!("Part 1: {}", part_1(&input));

    assert_eq!(part_2(&sample), 45000);
    info!("Part 2: {}", part_2(&input));
    +
    assert_eq!(part_1(&sample), 24000);
    info!("Part 1: {}", part_1(&input));

    assert_eq!(part_2(&sample), 45000);
    info!("Part 2: {}", part_2(&input));

    However, once you get further, the sample input may take some time to run itself. So in the end, I have decided to turn them into unit tests:

    -
    #[cfg(test)]
    mod tests {
    use super::*;

    #[test]
    fn test_part_1() {
    let sample = parse_input("samples/day01.txt");
    assert_eq!(part_1(&sample), 24000);
    }

    #[test]
    fn test_part_2() {
    let sample = parse_input("samples/day01.txt");
    assert_eq!(part_2(&sample), 45000);
    }
    }
    +
    #[cfg(test)]
    mod tests {
    use super::*;

    #[test]
    fn test_part_1() {
    let sample = parse_input("samples/day01.txt");
    assert_eq!(part_1(&sample), 24000);
    }

    #[test]
    fn test_part_2() {
    let sample = parse_input("samples/day01.txt");
    assert_eq!(part_2(&sample), 45000);
    }
    }

    And later on I have noticed, it's hard to tell the difference between the days, so I further renamed the mod from generic tests to reflect the days.

    Also after finishing the first day puzzle, I have installed an sccache to @@ -1147,16 +1147,16 @@ to give up. Let's dive into it \o/

    Fun fact

    Fighting the compiler took me 30 minutes.

    We need to find a common item among 2 collections, that's an easy task, right? We can construct 2 sets and find an intersection:

    -
    let top: HashSet<i32> = [1, 2, 3].iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5].iter().collect();
    +
    let top: HashSet<i32> = [1, 2, 3].iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5].iter().collect();

    Now, the first issue that we encounter is caused by the fact that we are using a slice (the […]), iterator of that returns references to the numbers. And we get immediately yelled at by the compiler, because the numbers are discarded after running the .collect. To fix this, we can use .into_iter:

    -
    let top: HashSet<i32> = [1, 2, 3].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5].into_iter().collect();
    +
    let top: HashSet<i32> = [1, 2, 3].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5].into_iter().collect();

    This way the numbers will get copied instead of referenced. OK, let's find the intersection of those 2 collections:

    -
    println!("Common elements: {:?}", top.intersection(&bottom));
    -
    Common elements: [3]
    +
    println!("Common elements: {:?}", top.intersection(&bottom));
    +
    Common elements: [3]
    caution

    Notice that we need to do &bottom. It explicitly specifies that .intersection borrows the bottom, i.e. takes an immutable reference to it.

    That's what we want, right? Looks like it! \o/

    @@ -1164,16 +1164,16 @@ intersection of those 2 collections:

    that should be fairly easy, we have an intersection and we want to find intersection over all of them.

    Let's have a look at the type of the .intersection

    -
    pub fn intersection<'a>(
        &'a self,
        other: &'a HashSet<T, S>
    ) -> Intersection<'a, T, S>
    +
    pub fn intersection<'a>(
        &'a self,
        other: &'a HashSet<T, S>
    ) -> Intersection<'a, T, S>

    OK… Huh… But we have an example there!

    -
    let intersection: HashSet<_> = a.intersection(&b).collect();
    +
    let intersection: HashSet<_> = a.intersection(&b).collect();

    Cool, that's all we need.

    -
    let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
    let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
    let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

    let intersection: HashSet<_> = top.intersection(&bottom).collect();
    println!("Intersection: {:?}", intersection);
    -
    Intersection: {3, 4}
    +
    let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
    let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
    let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

    let intersection: HashSet<_> = top.intersection(&bottom).collect();
    println!("Intersection: {:?}", intersection);
    +
    Intersection: {3, 4}

    Cool, so let's do the intersection with the top_2:

    -
    let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
    let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
    let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

    let intersection: HashSet<_> = top.intersection(&bottom).collect();
    let intersection: HashSet<_> = intersection.intersection(&top_2).collect();
    println!("Intersection: {:?}", intersection);
    +
    let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
    let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
    let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

    let intersection: HashSet<_> = top.intersection(&bottom).collect();
    let intersection: HashSet<_> = intersection.intersection(&top_2).collect();
    println!("Intersection: {:?}", intersection);

    And we get yelled at by the compiler:

    -
    error[E0308]: mismatched types
    --> src/main.rs:10:58
    |
    10 | let intersection: HashSet<_> = intersection.intersection(&top_2).collect();
    | ------------ ^^^^^^ expected `&i32`, found `i32`
    | |
    | arguments to this function are incorrect
    |
    = note: expected reference `&HashSet<&i32>`
    found reference `&HashSet<i32>`
    +
    error[E0308]: mismatched types
    --> src/main.rs:10:58
    |
    10 | let intersection: HashSet<_> = intersection.intersection(&top_2).collect();
    | ------------ ^^^^^^ expected `&i32`, found `i32`
    | |
    | arguments to this function are incorrect
    |
    = note: expected reference `&HashSet<&i32>`
    found reference `&HashSet<i32>`

    /o\ What the hell is going on here? Well, the funny thing is, that this operation doesn't return the elements themselves, but the references to them and when we pass the third set, it has just the values themselves, without any references.

    @@ -1183,8 +1183,8 @@ a “tax” for having a borrow checker drilling your ass having your making sure you're not doing something naughty that may cause an undefined behavior.

    To resolve this we need to get an iterator that clones the elements:

    -
    let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
    let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
    let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

    let intersection: HashSet<_> = top.intersection(&bottom).cloned().collect();
    let intersection: HashSet<_> = intersection.intersection(&top_2).cloned().collect();
    let intersection: HashSet<_> = intersection.intersection(&bottom_2).cloned().collect();
    println!("Intersection: {:?}", intersection);
    -
    Intersection: {4}
    +
    let top: HashSet<i32> = [1, 2, 3, 4].into_iter().collect();
    let bottom: HashSet<i32> = [3, 4, 5, 6].into_iter().collect();
    let top_2: HashSet<i32> = [2, 3, 4, 5, 6].into_iter().collect();
    let bottom_2: HashSet<i32> = [4, 5, 6].into_iter().collect();

    let intersection: HashSet<_> = top.intersection(&bottom).cloned().collect();
    let intersection: HashSet<_> = intersection.intersection(&top_2).cloned().collect();
    let intersection: HashSet<_> = intersection.intersection(&bottom_2).cloned().collect();
    println!("Intersection: {:?}", intersection);
    +
    Intersection: {4}

    Solution

    The approach is pretty simple, if you omit the 1on1 with the compiler. You just have some fun with the set operations :)

    @@ -1198,7 +1198,7 @@ Find how many overlap and can take the day off.

    Day 5: Supply Stacks

    tl;dr

    Let's play with stacks of crates.

    Very easy problem with very annoying input. You can judge yourself:

    -
        [D]    
    [N] [C]
    [Z] [M] [P]
    1 2 3

    move 1 from 2 to 1
    move 3 from 1 to 3
    move 2 from 2 to 1
    move 1 from 1 to 2
    +
        [D]    
    [N] [C]
    [Z] [M] [P]
    1 2 3

    move 1 from 2 to 1
    move 3 from 1 to 3
    move 2 from 2 to 1
    move 1 from 1 to 2

    Good luck transforming that into something reasonable :)

    Fun fact

    Took me 40 minutes to parse this reasonably, including fighting the compiler.

    Solution

    @@ -1206,7 +1206,7 @@ Find how many overlap and can take the day off.

    the work. Later on I have decided to explore the std and interface of the std::vec::Vec and found split_off which takes an index and splits (duh) the vector:

    -
    let mut vec = vec![1, 2, 3];
    let vec2 = vec.split_off(1);
    assert_eq!(vec, [1]);
    assert_eq!(vec2, [2, 3]);
    +
    let mut vec = vec![1, 2, 3];
    let vec2 = vec.split_off(1);
    assert_eq!(vec, [1]);
    assert_eq!(vec2, [2, 3]);

    This helped me simplify my solution a lot and also get rid of some edge cases.

    Day 6: Tuning Trouble

    tl;dr

    Finding start of the message in a very weird protocol. Start of the message is @@ -1231,7 +1231,7 @@ directories that take a lot of space and should be deleted.

    Solution

    We need to “build” a file system from the input that is given in a following form:

    -
    $ cd /
    $ ls
    dir a
    14848514 b.txt
    8504156 c.dat
    dir d
    $ cd a
    $ ls
    dir e
    29116 f
    2557 g
    62596 h.lst
    $ cd e
    $ ls
    584 i
    $ cd ..
    $ cd ..
    $ cd d
    $ ls
    4060174 j
    8033020 d.log
    5626152 d.ext
    7214296 k
    +
    $ cd /
    $ ls
    dir a
    14848514 b.txt
    8504156 c.dat
    dir d
    $ cd a
    $ ls
    dir e
    29116 f
    2557 g
    62596 h.lst
    $ cd e
    $ ls
    584 i
    $ cd ..
    $ cd ..
    $ cd d
    $ ls
    4060174 j
    8033020 d.log
    5626152 d.ext
    7214296 k

    There are few ways in which you can achieve this and also you can assume some preconditions, but why would we do that, right? :)

    You can “slap” this in either HashMap or BTreeMap and call it a day. @@ -1251,7 +1251,7 @@ references are present) are checked dynamically.

    So in the end, if you wan to have Rc<RefCell<T>>.

    So, how are we going to represent the file system then? We will use an enumeration, hehe, which is an algebraic data type that can store some stuff in itself 😩

    -
    type FileHandle = Rc<RefCell<AocFile>>;

    #[derive(Debug)]
    enum AocFile {
    File(usize),
    Directory(BTreeMap<String, FileHandle>),
    }
    +
    type FileHandle = Rc<RefCell<AocFile>>;

    #[derive(Debug)]
    enum AocFile {
    File(usize),
    Directory(BTreeMap<String, FileHandle>),
    }

    Let's go over it! FileHandle represents dynamically allocated AocFile, not much to discuss. What does the #[derive(Debug)] do though? It lets us to print out the value of that enumeration, it's derived, so it's not as good as if we had @@ -1349,15 +1349,15 @@ problems in it. However the toolkit is questionable :/

    with rust-analyzer. Because of my choice of libraries, we will also introduce a .envrc file that can be used by direnv, which allows you to set specific environment variables when you enter a directory. In our case, we will use

    -
    # to show nice backtrace when using the color-eyre
    export RUST_BACKTRACE=1

    # to catch logs generated by tracing
    export RUST_LOG=trace
    +
    # to show nice backtrace when using the color-eyre
    export RUST_BACKTRACE=1

    # to catch logs generated by tracing
    export RUST_LOG=trace

    And for the one of the most obnoxious things ever, we will use a script to download the inputs instead of “clicking, opening and copying to a file1. There is no need to be fancy, so we will adjust Python script by Martin2.

    -
    #!/usr/bin/env python3

    import datetime
    import yaml
    import requests
    import sys


    def load_config():
    with open("env.yaml", "r") as f:
    js = yaml.load(f, Loader=yaml.Loader)
    return js["session"], js["year"]


    def get_input(session, year, day):
    return requests.get(
    f"https://adventofcode.com/{year}/day/{day}/input",
    cookies={"session": session},
    headers={
    "User-Agent": "{repo} by {mail}".format(
    repo="gitlab.com/mfocko/advent-of-code-2022",
    mail="me@mfocko.xyz",
    )
    },
    ).content.decode("utf-8")


    def main():
    day = datetime.datetime.now().day
    if len(sys.argv) == 2:
    day = sys.argv[1]

    session, year = load_config()
    problem_input = get_input(session, year, day)

    with open(f"./inputs/day{day:>02}.txt", "w") as f:
    f.write(problem_input)


    if __name__ == "__main__":
    main()
    +
    #!/usr/bin/env python3

    import datetime
    import yaml
    import requests
    import sys


    def load_config():
    with open("env.yaml", "r") as f:
    js = yaml.load(f, Loader=yaml.Loader)
    return js["session"], js["year"]


    def get_input(session, year, day):
    return requests.get(
    f"https://adventofcode.com/{year}/day/{day}/input",
    cookies={"session": session},
    headers={
    "User-Agent": "{repo} by {mail}".format(
    repo="gitlab.com/mfocko/advent-of-code-2022",
    mail="me@mfocko.xyz",
    )
    },
    ).content.decode("utf-8")


    def main():
    day = datetime.datetime.now().day
    if len(sys.argv) == 2:
    day = sys.argv[1]

    session, year = load_config()
    problem_input = get_input(session, year, day)

    with open(f"./inputs/day{day:>02}.txt", "w") as f:
    f.write(problem_input)


    if __name__ == "__main__":
    main()

    If the script is called without any arguments, it will deduce the day from the system, so we do not need to change the day every morning. It also requires a configuration file:

    -
    # env.yaml
    session: ‹your session cookie›
    year: 2022
    +
    # env.yaml
    session: ‹your session cookie›
    year: 2022

    Libraries

    Looking at the list of the libraries, I have chosen “a lot” of them. Let's walk through each of them.

    @@ -1386,7 +1386,7 @@ also we can follow KISS. I have 2 modules that my “library” exports parsing and one for 2D vector (that gets used quite often during Advent of Code).

    Key part is, of course, processing the input and my library exports following functions that get used a lot:

    -
    /// Reads file to the string.
    pub fn file_to_string<P: AsRef<Path>>(pathname: P) -> String;

    /// Reads file and returns it as a vector of characters.
    pub fn file_to_chars<P: AsRef<Path>>(pathname: P) -> Vec<char>;

    /// Reads file and returns a vector of parsed structures. Expects each structure
    /// on its own line in the file. And `T` needs to implement `FromStr` trait.
    pub fn file_to_structs<P: AsRef<Path>, T: FromStr>(pathname: P) -> Vec<T>
    where
    <T as FromStr>::Err: Debug;

    /// Converts iterator over strings to a vector of parsed structures. `T` needs
    /// to implement `FromStr` trait and its error must derive `Debug` trait.
    pub fn strings_to_structs<T: FromStr, U>(
    iter: impl Iterator<Item = U>
    ) -> Vec<T>
    where
    <T as std::str::FromStr>::Err: std::fmt::Debug,
    U: Deref<Target = str>;

    /// Reads file and returns it as a vector of its lines.
    pub fn file_to_lines<P: AsRef<Path>>(pathname: P) -> Vec<String>;
    +
    /// Reads file to the string.
    pub fn file_to_string<P: AsRef<Path>>(pathname: P) -> String;

    /// Reads file and returns it as a vector of characters.
    pub fn file_to_chars<P: AsRef<Path>>(pathname: P) -> Vec<char>;

    /// Reads file and returns a vector of parsed structures. Expects each structure
    /// on its own line in the file. And `T` needs to implement `FromStr` trait.
    pub fn file_to_structs<P: AsRef<Path>, T: FromStr>(pathname: P) -> Vec<T>
    where
    <T as FromStr>::Err: Debug;

    /// Converts iterator over strings to a vector of parsed structures. `T` needs
    /// to implement `FromStr` trait and its error must derive `Debug` trait.
    pub fn strings_to_structs<T: FromStr, U>(
    iter: impl Iterator<Item = U>
    ) -> Vec<T>
    where
    <T as std::str::FromStr>::Err: std::fmt::Debug,
    U: Deref<Target = str>;

    /// Reads file and returns it as a vector of its lines.
    pub fn file_to_lines<P: AsRef<Path>>(pathname: P) -> Vec<String>;

    As for the vector, I went with a rather simple implementation that allows only addition of the vectors for now and accessing the elements via functions x() and y(). Also the vector is generic, so we can use it with any numeric type we @@ -1395,36 +1395,36 @@ need.

    We can also prepare a template to quickly bootstrap each of the days. We know that each puzzle has 2 parts, which means that we can start with 2 functions that will solve them.

    -
    fn part1(input: &Input) -> Output {
    todo!()
    }

    fn part2(input: &Input) -> Output {
    todo!()
    }
    +
    fn part1(input: &Input) -> Output {
    todo!()
    }

    fn part2(input: &Input) -> Output {
    todo!()
    }

    Both functions take reference to the input and return some output (in majority of puzzles, it is the same type). todo!() can be used as a nice placeholder, it also causes a panic when reached and we could also provide some string with an explanation, e.g. todo!("part 1"). We have not given functions a specific type and to avoid as much copy-paste as possible, we will introduce type aliases.

    -
    type Input = String;
    type Output = i32;
    +
    type Input = String;
    type Output = i32;
    tip

    This allows us to quickly adjust the types only in one place without the need to do regex-replace or replace them manually.

    For each day we get a personalized input that is provided as a text file. Almost all the time, we would like to get some structured type out of that input, and therefore it makes sense to introduce a new function that will provide the parsing of the input.

    -
    fn parse_input(path: &str) -> Input {
    todo!()
    }
    +
    fn parse_input(path: &str) -> Input {
    todo!()
    }

    This “parser” will take a path to the file, just in case we would like to run the sample instead of input.

    OK, so now we can write a main function that will take all of the pieces and run them.

    -
    fn main() {
    let input = parse_input("inputs/dayXX.txt");

    println!("Part 1: {}", part_1(&input));
    println!("Part 2: {}", part_2(&input));
    }
    +
    fn main() {
    let input = parse_input("inputs/dayXX.txt");

    println!("Part 1: {}", part_1(&input));
    println!("Part 2: {}", part_2(&input));
    }

    This would definitely do :) But we have installed a few libraries and we want to use them. In this part we are going to utilize tracing (for tracing, duh…) and color-eyre (for better error reporting, e.g. from parsing).

    -
    fn main() -> Result<()> {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = parse_input("inputs/dayXX.txt");

    info!("Part 1: {}", part_1(&input));
    info!("Part 2: {}", part_2(&input));

    Ok(())
    }
    +
    fn main() -> Result<()> {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = parse_input("inputs/dayXX.txt");

    info!("Part 1: {}", part_1(&input));
    info!("Part 2: {}", part_2(&input));

    Ok(())
    }

    The first statement will set up tracing and configure it to print out the logs to terminal, based on the environment variable. We also change the formatting a bit, since we do not need all the fancy features of the logger. Pure initialization would get us logs like this:

    -
    2022-12-11T19:53:19.975343Z  INFO day01: Part 1: 0
    +
    2022-12-11T19:53:19.975343Z  INFO day01: Part 1: 0

    However after running that command, we will get the following:

    -
     INFO src/bin/day01.rs:35: Part 1: 0
    +
     INFO src/bin/day01.rs:35: Part 1: 0

    And the color_eyre::install()? is quite straightforward. We just initialize the error reporting by color eyre.

    caution

    Notice that we had to add Ok(()) to the end of the function and adjust the @@ -1432,7 +1432,7 @@ return type of the main to Result<()>. It is cau can be installed only once and therefore it can fail, that is how we got the ? at the end of the ::install which unwraps the »result« of the installation.

    Overall we will get to a template like this:

    -
    use aoc_2022::*;

    use color_eyre::eyre::Result;
    use tracing::info;
    use tracing_subscriber::EnvFilter;

    type Input = String;
    type Output = i32;

    fn parse_input(path: &str) -> Input {
    todo!()
    }

    fn part1(input: &Input) -> Output {
    todo!()
    }

    fn part2(input: &Input) -> Output {
    todo!()
    }

    fn main() -> Result<()> {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = parse_input("inputs/dayXX.txt");

    info!("Part 1: {}", part_1(&input));
    info!("Part 2: {}", part_2(&input));

    Ok(())
    }
    +
    use aoc_2022::*;

    use color_eyre::eyre::Result;
    use tracing::info;
    use tracing_subscriber::EnvFilter;

    type Input = String;
    type Output = i32;

    fn parse_input(path: &str) -> Input {
    todo!()
    }

    fn part1(input: &Input) -> Output {
    todo!()
    }

    fn part2(input: &Input) -> Output {
    todo!()
    }

    fn main() -> Result<()> {
    tracing_subscriber::fmt()
    .with_env_filter(EnvFilter::from_default_env())
    .with_target(false)
    .with_file(true)
    .with_line_number(true)
    .without_time()
    .compact()
    .init();
    color_eyre::install()?;

    let input = parse_input("inputs/dayXX.txt");

    info!("Part 1: {}", part_1(&input));
    info!("Part 2: {}", part_2(&input));

    Ok(())
    }

    Footnotes

    1. diff --git a/blog/tags/admin/index.html b/blog/tags/admin/index.html index 076ea8b..a42d099 100644 --- a/blog/tags/admin/index.html +++ b/blog/tags/admin/index.html @@ -14,8 +14,8 @@ - - + +

      One post tagged with "admin"

      View All Tags

      · 4 min read
      Matej Focko

      When you decide to run Fedora on your VPS, you might get screwed over by using diff --git a/blog/tags/advent-of-code-2022/index.html b/blog/tags/advent-of-code-2022/index.html index 878bb54..2a28b63 100644 --- a/blog/tags/advent-of-code-2022/index.html +++ b/blog/tags/advent-of-code-2022/index.html @@ -14,8 +14,8 @@ - - + +

      5 posts tagged with "advent-of-code-2022"

      View All Tags
      diff --git a/blog/tags/advent-of-code/index.html b/blog/tags/advent-of-code/index.html index 8b5d459..24a0334 100644 --- a/blog/tags/advent-of-code/index.html +++ b/blog/tags/advent-of-code/index.html @@ -14,8 +14,8 @@ - - + +

      5 posts tagged with "advent-of-code"

      View All Tags
      diff --git a/blog/tags/copr/index.html b/blog/tags/copr/index.html index 3c09d38..cd1951d 100644 --- a/blog/tags/copr/index.html +++ b/blog/tags/copr/index.html @@ -14,8 +14,8 @@ - - + +

      One post tagged with "copr"

      View All Tags

      · 4 min read
      Matej Focko

      When you decide to run Fedora on your VPS, you might get screwed over by using diff --git a/blog/tags/cpp/index.html b/blog/tags/cpp/index.html index 1c0123f..b2a4df7 100644 --- a/blog/tags/cpp/index.html +++ b/blog/tags/cpp/index.html @@ -14,8 +14,8 @@ - - + +

      One post tagged with "cpp"

      View All Tags

      · 17 min read
      Matej Focko

      Let's try to solve one of the LeetCode challenges in easy and hard mode at the diff --git a/blog/tags/index.html b/blog/tags/index.html index 3d43aa9..89cd59e 100644 --- a/blog/tags/index.html +++ b/blog/tags/index.html @@ -14,8 +14,8 @@ - - + +

      diff --git a/blog/tags/iterators/index.html b/blog/tags/iterators/index.html index c5e0ecc..6cd6d56 100644 --- a/blog/tags/iterators/index.html +++ b/blog/tags/iterators/index.html @@ -14,8 +14,8 @@ - - + +

      One post tagged with "iterators"

      View All Tags

      · 17 min read
      Matej Focko

      Let's try to solve one of the LeetCode challenges in easy and hard mode at the diff --git a/blog/tags/leetcode/index.html b/blog/tags/leetcode/index.html index 476363c..eed589e 100644 --- a/blog/tags/leetcode/index.html +++ b/blog/tags/leetcode/index.html @@ -14,8 +14,8 @@ - - + +

      One post tagged with "leetcode"

      View All Tags

      · 17 min read
      Matej Focko

      Let's try to solve one of the LeetCode challenges in easy and hard mode at the diff --git a/blog/tags/red-hat/index.html b/blog/tags/red-hat/index.html index 385cd1f..bed04fe 100644 --- a/blog/tags/red-hat/index.html +++ b/blog/tags/red-hat/index.html @@ -14,8 +14,8 @@ - - + +

      One post tagged with "red-hat"

      View All Tags

      · 4 min read
      Matej Focko

      When you decide to run Fedora on your VPS, you might get screwed over by using diff --git a/blog/tags/rust/index.html b/blog/tags/rust/index.html index b54b1d8..de1a5f1 100644 --- a/blog/tags/rust/index.html +++ b/blog/tags/rust/index.html @@ -14,8 +14,8 @@ - - + +

      5 posts tagged with "rust"

      View All Tags
      diff --git a/blog/tags/vps/index.html b/blog/tags/vps/index.html index fd97a73..d46f4e4 100644 --- a/blog/tags/vps/index.html +++ b/blog/tags/vps/index.html @@ -14,8 +14,8 @@ - - + +

      One post tagged with "vps"

      View All Tags

      · 4 min read
      Matej Focko

      When you decide to run Fedora on your VPS, you might get screwed over by using diff --git a/blog/tags/🏭/index.html b/blog/tags/🏭/index.html index 30b524f..99d22d4 100644 --- a/blog/tags/🏭/index.html +++ b/blog/tags/🏭/index.html @@ -14,8 +14,8 @@ - - + +

      One post tagged with "🏭"

      View All Tags

      · 4 min read
      Matej Focko

      When you decide to run Fedora on your VPS, you might get screwed over by using diff --git a/c/bonuses/seminar-03/index.html b/c/bonuses/seminar-03/index.html index 4236db2..34721f8 100644 --- a/c/bonuses/seminar-03/index.html +++ b/c/bonuses/seminar-03/index.html @@ -16,8 +16,8 @@ - - + +

      3rd seminar

      caution

      Deadline for the submission of the bonus is March 16th 24:00.

      @@ -51,7 +51,7 @@ it.

      Function pointers

      In the skeleton of the “full fat” version you might have noticed a weird type signature of both the maximum and select_sort functions. Those functions get passed a function pointer to the comparator that you use for comparing the -respective elements in the passed in array.

      If we take the parameter from one of the functions from the skeleton:

      int (*comp)(const void *, const void *)

      comp is a function pointer to a function that takes two pointers of unspecified +respective elements in the passed in array.

      If we take the parameter from one of the functions from the skeleton:

      int (*comp)(const void *, const void *)

      comp is a function pointer to a function that takes two pointers of unspecified type, i.e. pure address to the memory (you don't know what stored in there), and returns an int.

      You can pass the function by simply using its name. (There is no need to use & to get its address.) And you can also call the function by “calling” the function @@ -66,6 +66,6 @@ submitting the homeworks, that is:

    2. Create a MR to the main branch with me (@xfocko) as the reviewer.
    Directory structure for bonuses

    Ideally create a directory seminar-bonuses in the root of your repository with -bonuses in their own subdirectories.

    Structure of your repository can look like this:

    .
    ├── bonuses
    │ └── seminar-03
    ├── hello
    ├── hw01
    ├── hw02
    ├── seminar-01
    ├── seminar-02
    └── seminar-03

    or

    .
    ├── bonus-seminar-03
    ├── hello
    ├── hw01
    ├── hw02
    ├── seminar-01
    ├── seminar-02
    └── seminar-03

    Structure of the bonuses is entirely up to you, just keep it consistent.

    +bonuses in their own subdirectories.

    Structure of your repository can look like this:

    .
    ├── bonuses
    │ └── seminar-03
    ├── hello
    ├── hw01
    ├── hw02
    ├── seminar-01
    ├── seminar-02
    └── seminar-03

    or

    .
    ├── bonus-seminar-03
    ├── hello
    ├── hw01
    ├── hw02
    ├── seminar-01
    ├── seminar-02
    └── seminar-03

    Structure of the bonuses is entirely up to you, just keep it consistent.

    \ No newline at end of file diff --git a/c/bonuses/seminar-04/index.html b/c/bonuses/seminar-04/index.html index f0ee830..94b7235 100644 --- a/c/bonuses/seminar-04/index.html +++ b/c/bonuses/seminar-04/index.html @@ -16,8 +16,8 @@ - - + +

    4th seminar

    caution

    Deadline for the submission of the bonus is March 23th 24:00.

    @@ -54,7 +54,7 @@ for the implementation of this task it is much easier to use just the pointers.<

    Example of run

    For a better understanding of your task, I will describe a simple walk with corresponding function call.

    -
    const char *map = (
    ">.v"
    ".K<"
    "..."
    );

    walk(map, &map[6], '^', 3, 3);
    +
    const char *map = (
    ">.v"
    ".K<"
    "..."
    );

    walk(map, &map[6], '^', 3, 3);

    For this call, you should return FOUND_KEY. Let us walk through the walk ;)

    1. @@ -62,31 +62,31 @@ function call.

      he follows the direction given by parameter (upwards, denoted as N(orth), so that we can differentiate markers on the map with the robot when using printing function).

      -
      >.v
      .K<
      N..
      +
      >.v
      .K<
      N..
    2. Moves up:

      -
      >.v
      NK<
      ...
      +
      >.v
      NK<
      ...
    3. Moves up (now covers >), changes direction to right:

      -
      E.v
      .K<
      ...
      +
      E.v
      .K<
      ...
    4. Moves to right:

      -
      >Ev
      .K<
      ...
      +
      >Ev
      .K<
      ...
    5. Moves to right, faces south:

      -
      >.S
      .K<
      ...
      +
      >.S
      .K<
      ...
    6. Moves down, faces west:

      -
      >.v
      .KW
      ...
      +
      >.v
      .KW
      ...
    7. Moves left, founds key, returns FOUND_KEY:

      -
      >.v
      .W<
      ...
      +
      >.v
      .W<
      ...

    Bonus part

    diff --git a/c/bonuses/seminar-05-06/index.html b/c/bonuses/seminar-05-06/index.html index ecf22e1..e3bdab4 100644 --- a/c/bonuses/seminar-05-06/index.html +++ b/c/bonuses/seminar-05-06/index.html @@ -16,8 +16,8 @@ - - + +

    5th and 6th seminar

    For this bonus you can get at maximum 2.5 K₡.

    @@ -30,7 +30,7 @@ implementing a very special cipher.

    string in reversed order (also uppercase).

    In case you are given NULL, return NULL.

    Example (more in tests):

    -
    char* reversed = reverse("Hello world!");

    printf("%s\n", reversed);
    // "!DLROW OLLEH"

    if (reversed != NULL) {
    free(reversed);
    }
    +
    char* reversed = reverse("Hello world!");

    printf("%s\n", reversed);
    // "!DLROW OLLEH"

    if (reversed != NULL) {
    free(reversed);
    }

    Task no. 2: Vigenère (0.5 K₡)

    Vigenère cipher is similar to the Caesar cipher, but you also have a key that is used for encrypting (or decrypting).

    @@ -50,7 +50,7 @@ are uppercase or lowercase.

    Function returns address of the encrypted (or decrypted) string. Or NULL in case error occurs.

    Example:

    -
    char *encrypted = vigenere_encrypt("CoMPuTeR", "Hello world!");

    printf("%s\n", encrypted);
    // "JSXAI PSINR!"

    if (encrypted != NULL) {
    free(encrypted)
    }
    +
    char *encrypted = vigenere_encrypt("CoMPuTeR", "Hello world!");

    printf("%s\n", encrypted);
    // "JSXAI PSINR!"

    if (encrypted != NULL) {
    free(encrypted)
    }

    Bonus part (0.5 K₡)

    If you can utilize helper function that would do both encrypting and decrypting, you can gain 0.5 K₡.

    @@ -82,7 +82,7 @@ so we will demonstrate on letter H:

  • That half is used for xor with the other 4 bits:

    -
        1000  // second half
    XOR 1000 // first half after 2nd step
    --------
    0000
    +
        1000  // second half
    XOR 1000 // first half after 2nd step
    --------
    0000
  • Now we combine both halves (first one is 1000, which we got from the 2nd step @@ -97,7 +97,7 @@ which is encrypted character H using this method.

  • char* bit_decrypt(const unsigned char* text)
  • Example:

    -
    unsigned char* encrypted = bit_encrypt("Hello world!");

    for (int i = 0; i < 12;i++) {
    printf("%x ", encrypted[i]);
    //80 9c 95 95 96 11 bc 96 b9 95 9d 10
    }

    if (encrypted != NULL) {
    free(encrypted);
    }
    +
    unsigned char* encrypted = bit_encrypt("Hello world!");

    for (int i = 0; i < 12;i++) {
    printf("%x ", encrypted[i]);
    //80 9c 95 95 96 11 bc 96 b9 95 9d 10
    }

    if (encrypted != NULL) {
    free(encrypted);
    }

    Task no. 4: All combined to BMP (0.5 K₡)

    Authors of the BMP cipher are non-disclosed :)

    Create pair of functions:

    @@ -114,6 +114,6 @@ which is encrypted character H using this method.

    For decrypting, reverse the steps.

    Submitting

    In case you have any questions, feel free to reach out to me.

    -
    +
    \ No newline at end of file diff --git a/c/bonuses/seminar-08/index.html b/c/bonuses/seminar-08/index.html index 68f8679..20de2da 100644 --- a/c/bonuses/seminar-08/index.html +++ b/c/bonuses/seminar-08/index.html @@ -16,8 +16,8 @@ - - + +

    8th seminar bonus assignment

    @@ -46,7 +46,7 @@ take up the space.

    Your first task is to make smallish program that counts occurences of specific (or given) word from file and writes the number to other file.

    Usage of the program is:

    -
    Usage: ./counting <input-file> <output-file> [string-to-be-counted]
    +
    Usage: ./counting <input-file> <output-file> [string-to-be-counted]

    Arguments that are passed to the program represent:

    • <input-file> - path to the file where we count the words
    • @@ -84,16 +84,16 @@ as an argument and pretty-prints it.

      or nil. Why would we have nil in a file? The file represents pre-order iteration through the tree. Leaves never have rank different than 0, so you can safely assume 2 non-existing nils in the input after you read such node ;)

      -
      Example input fileTree it represents
      8;4
      5;3
      3;2
      2;1
      1;0
      nil
      4;0
      7;1
      6;0
      nil
      11;2
      10;1
      9;0
      nil
      12;0

      tree

      +
      Example input fileTree it represents
      8;4
      5;3
      3;2
      2;1
      1;0
      nil
      4;0
      7;1
      6;0
      nil
      11;2
      10;1
      9;0
      nil
      12;0

      tree

      In this task you are only provided with different trees in the test-trees directory. Implementation and format of the pretty-print is totally up to you. :)

      Example of mine for the tree above:

      -
      8 (rank = 4)
      +-- 5 (rank = 3)
      | +-- 3 (rank = 2)
      | | +-- 2 (rank = 1)
      | | | +-- 1 (rank = 0)
      | | +-- 4 (rank = 0)
      | +-- 7 (rank = 1)
      | +-- 6 (rank = 0)
      +-- 11 (rank = 2)
      +-- 10 (rank = 1)
      | +-- 9 (rank = 0)
      +-- 12 (rank = 0)
      +
      8 (rank = 4)
      +-- 5 (rank = 3)
      | +-- 3 (rank = 2)
      | | +-- 2 (rank = 1)
      | | | +-- 1 (rank = 0)
      | | +-- 4 (rank = 0)
      | +-- 7 (rank = 1)
      | +-- 6 (rank = 0)
      +-- 11 (rank = 2)
      +-- 10 (rank = 1)
      | +-- 9 (rank = 0)
      +-- 12 (rank = 0)

      Can you find out what are those trees? :)

      Submitting

      In case you have any questions, feel free to reach out to me.

      -
    +
    \ No newline at end of file diff --git a/c/bonuses/seminar-10/index.html b/c/bonuses/seminar-10/index.html index 9da40f4..abd4d5f 100644 --- a/c/bonuses/seminar-10/index.html +++ b/c/bonuses/seminar-10/index.html @@ -16,8 +16,8 @@ - - + +

    10th seminar

    Source

    @@ -39,14 +39,14 @@ of that, there are 2 kinds of tests:

    • Unit tests - that are present in test_hangman.c and can be run via:

      -
      $ make check-unit
      +
      $ make check-unit

      They cover majorly functions that can be tested easily via testing framework.

    • Functional tests - same as in seminar-08 and are focused on testing the program as whole. Basic smoke test is already included in usage test case.

      They can be run via:

      -
      $ make check-functional
      +
      $ make check-functional

      When testing hangman function (the game loop), it is suggested to create functional tests.

      When submitting the files for review, please leave out functional tests that @@ -56,7 +56,7 @@ will drag the common files myself. :)

    Whole test suite can be run via:

    -
    $ make check
    +
    $ make check

    Summary of the gameplay

      @@ -89,7 +89,7 @@ bugs. Therefore try to follow this succession of steps:

      In case you are submitting the bonus via GitLab, it is helpful to commit tests before commiting the fixes, so that it is apparent that the bug is manifested. Example of git log (notice that the first line represents latest commit):

      -
      feat: Implement fizz_buzzer
      test: Add tests for fizz_buzzer
      fix: Fix NULL-check in print_name
      test: Add test for NULL in print_name
      +
      feat: Implement fizz_buzzer
      test: Add tests for fizz_buzzer
      fix: Fix NULL-check in print_name
      test: Add test for NULL in print_name

      Tasks

      As to your tasks, there are multiple things wrong in this project.

        @@ -136,6 +136,6 @@ it is a not requirement at all and you can still get all points for the bonus ;)

        Submitting

        In case you have any questions, feel free to reach out to me.

        -
    +
    \ No newline at end of file diff --git a/c/category/bonuses/index.html b/c/category/bonuses/index.html index 7a9ec3c..049d32f 100644 --- a/c/category/bonuses/index.html +++ b/c/category/bonuses/index.html @@ -16,8 +16,8 @@ - - + +

    Bonuses

    Bonus assignments for Kontr Coins. diff --git a/c/category/practice-exams/index.html b/c/category/practice-exams/index.html index 82a3cbd..d9e54ff 100644 --- a/c/category/practice-exams/index.html +++ b/c/category/practice-exams/index.html @@ -16,8 +16,8 @@ - - + +

    Practice Exams

    Practice exams for training for the final exam. diff --git a/c/index.html b/c/index.html index b497c31..c5517d6 100644 --- a/c/index.html +++ b/c/index.html @@ -14,10 +14,10 @@ - - + + -

    + \ No newline at end of file diff --git a/c/mr/index.html b/c/mr/index.html index 3393643..3e39906 100644 --- a/c/mr/index.html +++ b/c/mr/index.html @@ -14,8 +14,8 @@ - - + +

    Submitting merge requests for review

    @@ -36,7 +36,7 @@ repository is clean and you are present on the main branch (master, trunk). If you do not know what your default branch is, it is probably master and you should not be on any other branch.

    Output of the command should look like this:

    -
    aisa$ git status
    On branch master # Or main or trunk.
    Your branch is up to date with 'origin/master'.

    nothing to commit, working tree clean
    +
    aisa$ git status
    On branch master # Or main or trunk.
    Your branch is up to date with 'origin/master'.

    nothing to commit, working tree clean

    In case you are on different branch or there are uncommitted changes, do not continue!!! Clean your repository (commit the changes or discard @@ -44,19 +44,19 @@ them), before you continue.

    Step #2 - Create new branch

    In your repository write command:

    -
    aisa$ git checkout -b BRANCH
    Switched to a new branch 'BRANCH'
    +
    aisa$ git checkout -b BRANCH
    Switched to a new branch 'BRANCH'

    Instead of BRANCH use some reasonable name for the branch. For example if you are working on the seminar from 3rd week, name the branch seminar-03.

    Step #3 - Do the assignment

    Download the skeleton for the seminar assignment, extract and program. For example if you are working on 3rd seminar, you can do so by:

    -
    aisa$ wget https://www.fi.muni.cz/pb071/seminars/seminar-03/pb071-seminar-03.zip
    aisa$ unzip pb071-seminar-03.zip
    # Now you should have directory 'seminar-03'.
    aisa$ rm pb071-seminar-03.zip
    aisa$ cd seminar-03
    # You can work on the assignment.
    +
    aisa$ wget https://www.fi.muni.cz/pb071/seminars/seminar-03/pb071-seminar-03.zip
    aisa$ unzip pb071-seminar-03.zip
    # Now you should have directory 'seminar-03'.
    aisa$ rm pb071-seminar-03.zip
    aisa$ cd seminar-03
    # You can work on the assignment.

    Step #4 - Commit and upload the changes to GitLab

    The same way you add and commit files for the homework assignments, you do for the seminar.

    Now you can upload the changes to GitLab. git push is not enough, since repository on GitLab does not know your new branch. You can solve this by adding arguments:

    -
    aisa$ git push origin BRANCH
    ...
    remote: To create a merge request for BRANCH, visit:
    remote: https://gitlab.fi.muni.cz/login/pb071/merge_requests/new?merge_request%5Bsource_branch%5D=BRANCH
    ...
    +
    aisa$ git push origin BRANCH
    ...
    remote: To create a merge request for BRANCH, visit:
    remote: https://gitlab.fi.muni.cz/login/pb071/merge_requests/new?merge_request%5Bsource_branch%5D=BRANCH
    ...

    In the output you should have a link for creating a merge request. If you see this link, open it and skip next step.

    Step #5 - Creating a merge request manually

    @@ -85,8 +85,8 @@ For the sake of safety, do not continue without clean repository. Then with comm git checkout BRANCH switch to your default branch BRANCH.

    If you do not know which branch is your default, try git branch that outputs all branches in your repository. Default branch is typically master, but can be main or trunk.

    -
    aisa$ git status
    # Check if repository is clean

    # If you know, what is your default branch, you can skip next command.
    aisa$ git branch
    # Find the default branch in the list; should be one of the `master`, `main` or
    # `trunk` and you should not have more than one of those.
    # In case the list clears the terminal and you cannot see shell prompt, you can
    # press `q` to quit the pager.

    aisa$ git checkout master
    +
    aisa$ git status
    # Check if repository is clean

    # If you know, what is your default branch, you can skip next command.
    aisa$ git branch
    # Find the default branch in the list; should be one of the `master`, `main` or
    # `trunk` and you should not have more than one of those.
    # In case the list clears the terminal and you cannot see shell prompt, you can
    # press `q` to quit the pager.

    aisa$ git checkout master

    -

    Adapted from: https://www.fi.muni.cz/~xlacko1/pb071/mr.html

    +

    Adapted from: https://www.fi.muni.cz/~xlacko1/pb071/mr.html

    \ No newline at end of file diff --git a/c/pexam/cams/index.html b/c/pexam/cams/index.html index 109f79c..367bbec 100644 --- a/c/pexam/cams/index.html +++ b/c/pexam/cams/index.html @@ -16,8 +16,8 @@ - - + +

    Watching Cams

    @@ -77,7 +77,7 @@ reading. Readings are separated by the commas, which may, but don't have to accompanied by whitespace around.

    Examples

    Few examples of the data from the cameras follow

    -
    10: ABC-12-34 1664608712, 289: XYZ-98-76         1665416417,
    25: ABC-12-34 1633078256 , 42: TryToCatchMe 1671602419,
    11: EL9-987 1679541338 ,2 : Foo-666 1683170082,42: YourMum 1683170082,
    42: TryToCatchMe 1671602419 , 1234: TryToCatchMe 1671602419,
    19: ABC-12-34 1664659649, 69:YouShould-not-pLaCe-4ny-expectations%on^the(input 1680307994,
    9 : 9B9-161 1665416417 , 10: 1a1-999 1671602419,1:lmao 1633078256,
    16: ABC-12-34 1664609012
    +
    10: ABC-12-34 1664608712, 289: XYZ-98-76         1665416417,
    25: ABC-12-34 1633078256 , 42: TryToCatchMe 1671602419,
    11: EL9-987 1679541338 ,2 : Foo-666 1683170082,42: YourMum 1683170082,
    42: TryToCatchMe 1671602419 , 1234: TryToCatchMe 1671602419,
    19: ABC-12-34 1664659649, 69:YouShould-not-pLaCe-4ny-expectations%on^the(input 1680307994,
    9 : 9B9-161 1665416417 , 10: 1a1-999 1671602419,1:lmao 1633078256,
    16: ABC-12-34 1664609012

    Format of the output

    info

    All the examples consider using data from the example of the input.

    You are expected to print out the dates and cameras that has captured the @@ -85,7 +85,7 @@ license plate for each of them (in a sorted fashion).

    If there are multiple scans present and the timespan (i.e. time difference between the scans is bigger than 60 minutes, you should separate them by a newline). For example:

    -
    *** ABC-12-34 ***
    25: Fri Oct 1 10:50:56 2021

    10: Sat Oct 1 09:18:32 2022
    16: Sat Oct 1 09:23:32 2022

    19: Sat Oct 1 23:27:29 2022
    +
    *** ABC-12-34 ***
    25: Fri Oct 1 10:50:56 2021

    10: Sat Oct 1 09:18:32 2022
    16: Sat Oct 1 09:23:32 2022

    19: Sat Oct 1 23:27:29 2022
    tip

    Since you are given the timestamp in a time_t compatible type on UN*X, you can safely use ctime(3) for printing the timestamp as a human readable time when outputting the date and time.

    diff --git a/c/pexam/garbage_collect/index.html b/c/pexam/garbage_collect/index.html index a8d6bb2..0f0ac15 100644 --- a/c/pexam/garbage_collect/index.html +++ b/c/pexam/garbage_collect/index.html @@ -16,8 +16,8 @@ - - + +

    Garbage Collection

    @@ -65,9 +65,9 @@ respective sizes. If there is a directory in the current working it has di instead of the size.

    -
    $ ls
    dir a
    14848514 b.txt
    8504156 c.dat
    dir d
    $ cd a
    $ cd .
    $ cd .
    $ cd .
    $ ls
    dir e
    29116 f
    2557 g
    62596 h.lst
    $ cd e
    $ ls
    584 i
    $ cd ..
    $ cd ..
    $ cd d
    $ ls
    4060174 j
    8033020 d.log
    5626152 d.ext
    7214296 k
    +
    $ ls
    dir a
    14848514 b.txt
    8504156 c.dat
    dir d
    $ cd a
    $ cd .
    $ cd .
    $ cd .
    $ ls
    dir e
    29116 f
    2557 g
    62596 h.lst
    $ cd e
    $ ls
    584 i
    $ cd ..
    $ cd ..
    $ cd d
    $ ls
    4060174 j
    8033020 d.log
    5626152 d.ext
    7214296 k

    For this input, you will get following file system:

    -
    - / (dir, size=48381165)
    - a (dir, size=94853)
    - e (dir, size=584)
    - i (file, size=584)
    - f (file, size=29116)
    - g (file, size=2557)
    - h.lst (file, size=62596)
    - b.txt (file, size=14848514)
    - c.dat (file, size=8504156)
    - d (dir, size=24933642)
    - j (file, size=4060174)
    - d.log (file, size=8033020)
    - d.ext (file, size=5626152)
    - k (file, size=7214296)
    +
    - / (dir, size=48381165)
    - a (dir, size=94853)
    - e (dir, size=584)
    - i (file, size=584)
    - f (file, size=29116)
    - g (file, size=2557)
    - h.lst (file, size=62596)
    - b.txt (file, size=14848514)
    - c.dat (file, size=8504156)
    - d (dir, size=24933642)
    - j (file, size=4060174)
    - d.log (file, size=8033020)
    - d.ext (file, size=5626152)
    - k (file, size=7214296)

    Format of the output

    Your program should support 2 switches:

      diff --git a/contributions/index.html b/contributions/index.html index e9b92af..d880bad 100644 --- a/contributions/index.html +++ b/contributions/index.html @@ -14,8 +14,8 @@ - - + +

      Contributions

      Many of my contributions to open-source projects.

      tmt

      Description

      The `tmt` tool provides a user-friendly way to work with tests. You can comfortably create new tests, safely and easily run tests across different environments, review test results, debug test code and enable tests in the CI using a consistent and concise config.

      Contribution

      Just a smallish contribution to the docs related to the changes implemented on the Packit side.

      Fedora Infrastructure Ansible

      Description

      Collection of Ansible playbooks that powers the Fedora Infrastructure.

      Contribution

      I have adjusted the groups in the Bodhi playbooks after Packit has been granted the privileges to propose updates without restrictions.

      Bodhi

      Description

      Bodhi is a web-system that facilitates the process of publishing updates for a Fedora-based software distribution.

      Contribution

      I have adjusted the client, so that it doesn't show secrets in terminal when you log in to the Bodhi via browser.

      Gluetool Modules Collection

      Description

      Modules for gluetool — a command line centric framework usable for glueing modules into a pipeline.

      Contribution
      • I have proposed a possible implementation of git merging that was later on extended.
      • I have tried to help out with Copr module after they deprecated older version of their API.

      Pagure

      Description

      Pagure is a git-centered forge, python based using pygit2.

      Contribution

      I have added an API endpoint for reopening pull requests.

      Copr

      Description

      RPM build system - upstream for Copr.

      Contribution
      • Supporting external repositories for custom SRPM build method.
      • Allowing admins of Copr repositories to build without the need to ask for explicit builder permissions.

      python-gitlab

      Description

      A python wrapper for the GitLab API.

      Contribution

      I have contributed support for the merge_ref on merge requests that hasn't been supported, yet it was present in the GitLab API.

      PatternFly React

      Description

      A set of React components for the PatternFly project.

      Contribution

      When working on Packit Dashboard, I have spotted smaller bugs that were present in this project and fixed them upstream to provide better experience for our users.

      Fira Code

      Description

      Free monospaced font with programming ligatures.

      Contribution

      I have set up a GitHub Action for building the font on each push to the default branch allowing users to install bleeding edge version of the font.

      nixpkgs

      Description

      Nixpkgs is a collection of over 80,000 software packages that can be installed with the Nix package manager. It also implements NixOS, a purely-functional Linux distribution.

      Contribution

      When I was trying out the nixpkgs, I have tried to bump .NET Core to the latest version. My changes haven't been accepted as they required bumping of multiple more packages that depended upon the .NET Core.

      Darcula

      Description

      A theme for Visual Studio Code based on Darcula theme from Jetbrains IDEs.

      Contribution

      I have contributed support for diff files, though the project doesn't seem to be live anymore, so it hasn't been accepted as of now.

      Packit

      Description

      An open source project aiming to ease the integration of your project with Fedora Linux, CentOS Stream and other distributions.

      Contribution

      Have a look at my pull requests.

      Snitch

      Description

      Language agnostic tool that collects TODOs in the source code and reports them as Issues.

      Contribution
      • Environment variable support for self-hosted GitLab instances
      • GitLab support

      Karel the Robot

      Description

      Karel the robot is in general an educational programming language for beginners, created by Richard E. Pattis. This is implementation of Karel the Robot for C programming language.

      This project is used for educational purposes at TUKE.

      Contribution

      I have contributed some refactoring tips to the author of the library.

      diff --git a/cpp/category/exceptions-and-raii/index.html b/cpp/category/exceptions-and-raii/index.html index 5d75155..7baae54 100644 --- a/cpp/category/exceptions-and-raii/index.html +++ b/cpp/category/exceptions-and-raii/index.html @@ -16,8 +16,8 @@ - - + +

      Exceptions and RAII

      Materials related to the exceptions or RAII in C++. diff --git a/cpp/environment/index.html b/cpp/environment/index.html index dcebfed..dfa4e1d 100644 --- a/cpp/environment/index.html +++ b/cpp/environment/index.html @@ -16,8 +16,8 @@ - - + +

      Environment

      Required tools per OS

      @@ -45,7 +45,7 @@ clang itself or in separate package, e.g. clang-tools-extra)
    • valgrind - in case you manage to create memory errors in your code

    In case of Fedora it is following set of packages:

    -
    sudo dnf install -y clang clang-tools-extra valgrind gcc make
    # If you decide to use google test: add `gtest` or `llvm-googletest` for clang
    +
    sudo dnf install -y clang clang-tools-extra valgrind gcc make
    # If you decide to use google test: add `gtest` or `llvm-googletest` for clang

    macOS

    In case of macOS you should be able to find all of the packages in brew.sh, except valgrind, not sure if you can solve with podman/docker.

    @@ -53,7 +53,7 @@ clang itself or in separate package, e.g. clang-tools-extra)

    nix(OS)

    In case you run nixOS or linux distribution with nixpkgs or you use nixpkgs as a replacement for homebrew on macOS. You should be fine with the following config:

    -
    with import <nixpkgs> {};
    stdenv.mkDerivation {
    name = "cppenv";
    buildInputs = [
    clang-tools

    gnumake

    gmock # used for google test
    valgrind # not sure about macOS though
    ];
    }
    +
    with import <nixpkgs> {};
    stdenv.mkDerivation {
    name = "cppenv";
    buildInputs = [
    clang-tools

    gnumake

    gmock # used for google test
    valgrind # not sure about macOS though
    ];
    }

    IDEs

    Choice of the IDE is mostly up to you, you do not need to use IDE at all ;)

    I would probably recommend VSCode + appropriate extension or CLion if you are used @@ -110,7 +110,7 @@ following.

    It is quite popular, only one header-file, also might be easier to set up.

    Might feel slow to compile, this can be addressed by having one object file with precompiled main for tests, e.g.

    -
    /* File: catch_main.cpp
    * Compile it with: g++ $(CXXFLAGS) -c catch_main.cpp
    *
    * Once you have source file with tests, e.g. test_something.cpp, compile it in
    * a similar fashion: g++ $(CXXFLAGS) -c test_something.cpp $(LDLIBS)
    *
    * And link them together:
    * g++ catch_main.o test_something.o -o test_something
    *
    * Now you can run ./test_something and if you change it, you do not need to compile
    * the main again.
    */
    #define CATCH_CONFIG_MAIN
    #include "catch.hpp"
    +
    /* File: catch_main.cpp
    * Compile it with: g++ $(CXXFLAGS) -c catch_main.cpp
    *
    * Once you have source file with tests, e.g. test_something.cpp, compile it in
    * a similar fashion: g++ $(CXXFLAGS) -c test_something.cpp $(LDLIBS)
    *
    * And link them together:
    * g++ catch_main.o test_something.o -o test_something
    *
    * Now you can run ./test_something and if you change it, you do not need to compile
    * the main again.
    */
    #define CATCH_CONFIG_MAIN
    #include "catch.hpp"

    Google Test

    It is faster compared to catch2, even if you do not precompile the main. Might be more complicated to set up, since there are multiple files (it is not one header diff --git a/cpp/exceptions-and-raii/placeholders/index.html b/cpp/exceptions-and-raii/placeholders/index.html index 7e1f1b5..f1652e9 100644 --- a/cpp/exceptions-and-raii/placeholders/index.html +++ b/cpp/exceptions-and-raii/placeholders/index.html @@ -16,8 +16,8 @@ - - + +

    Placeholders

    Here we will try to implement some placeholders that you can find in other @@ -42,15 +42,15 @@ ways how to implement them:

    I will choose raising an exception, since the closest equivalent of panic in C++ would be asserts that are (by default) disabled in the release builds.

    However I am too lazy to do:

    -
    throw todo();
    // or
    throw todo("optional note");
    +
    throw todo();
    // or
    throw todo("optional note");

    Therefore we will implement exceptions and also wrap them in functions, so that we can do:

    -
    todo();
    // or
    todo("optional note");
    +
    todo();
    // or
    todo("optional note");
    tip

    Wrapping them in a function (or macro) will allow us to do a little magic trick.

    Implementation

    We're going to utilize the exceptions, so we'll need to include the exception header and we will start with a simple _todo exception class.

    -
    #include <exception>
    #include <string>

    class _todo : public std::exception {
    std::string cause;

    public:
    _todo() : cause("not yet implemented") {}
    _todo(std::string&& excuse) : cause("not yet implemented: " + excuse) {}
    virtual const char* what() const throw() { return cause.c_str(); }
    };
    +
    #include <exception>
    #include <string>

    class _todo : public std::exception {
    std::string cause;

    public:
    _todo() : cause("not yet implemented") {}
    _todo(std::string&& excuse) : cause("not yet implemented: " + excuse) {}
    virtual const char* what() const throw() { return cause.c_str(); }
    };

    In this case we have 2 constructors:

    1. default constructor without any parameters that will return just @@ -59,13 +59,13 @@ header and we will start with a simple _todo exception class.

      not yet implemented: ‹excuse›

    If we were to use it now, we would need to do something like:

    -
    #include "placeholders.hpp"

    int main() {
    throw _todo();
    return 0;
    }
    +
    #include "placeholders.hpp"

    int main() {
    throw _todo();
    return 0;
    }

    Wrapping in a function

    I am a lazy person, so we will wrap the exception in a function that will throw it:

    -
    void todo() {
    throw _todo();
    }
    +
    void todo() {
    throw _todo();
    }

    This can be used like:

    -
    #include "placeholders.hpp"

    int main() {
    todo();
    return 0;
    }
    +
    #include "placeholders.hpp"

    int main() {
    todo();
    return 0;
    }

    Magic trick

    At the beginning I've mentioned that by wrapping the exceptions in a helper functions that will throw them, we can do a nice magic trick 😄 This trick @@ -73,7 +73,7 @@ will consist of formatted string and for that we will use std::format that is available since C++20.

    We just need to add one more overload for our todo():

    -
    #include <format>

    template< class... Args >
    void todo(std::format_string<Args...> fmt, Args&&... args) {
    throw _todo(std::format(fmt, args...));
    }
    +
    #include <format>

    template< class... Args >
    void todo(std::format_string<Args...> fmt, Args&&... args) {
    throw _todo(std::format(fmt, args...));
    }

    Finishing off with 2 more exceptions

    Now we can repeat the same process for the other two exceptions I've mentioned

      @@ -81,7 +81,7 @@ available since C++20.

    • unreachable.

    In the end we should end up with something like this:

    -
    #include <exception>
    #include <format>
    #include <string>

    class _todo : public std::exception {
    std::string cause;

    public:
    _todo() : cause("not yet implemented") {}
    _todo(std::string&& excuse) : cause("not yet implemented: " + excuse) {}
    virtual const char* what() const throw() { return cause.c_str(); }
    };

    void todo() { throw _todo(); }

    template <class... Args>
    void todo(std::format_string<Args...> fmt, Args&&... args) {
    throw _todo(std::format(fmt, args...));
    }

    class _unimplemented : public std::exception {
    std::string cause;

    public:
    _unimplemented() : cause("not implemented") {}
    _unimplemented(std::string&& excuse)
    : cause("not implemented: " + excuse) {}
    virtual const char* what() const throw() { return cause.c_str(); }
    };

    void unimplemented() { throw _unimplemented(); }

    template <class... Args>
    void unimplemented(std::format_string<Args...> fmt, Args&&... args) {
    throw _unimplemented(std::format(fmt, args...));
    }

    class _unreachable : public std::exception {
    std::string cause;

    public:
    _unreachable() : cause("entered unreachable code") {}
    _unreachable(std::string&& excuse)
    : cause("entered unreachable code: " + excuse) {}
    virtual const char* what() const throw() { return cause.c_str(); }
    };

    void unreachable() { throw _unreachable(); }

    template <class... Args>
    void unreachable(std::format_string<Args...> fmt, Args&&... args) {
    throw _unreachable(std::format(fmt, args...));
    }
    +
    #include <exception>
    #include <format>
    #include <string>

    class _todo : public std::exception {
    std::string cause;

    public:
    _todo() : cause("not yet implemented") {}
    _todo(std::string&& excuse) : cause("not yet implemented: " + excuse) {}
    virtual const char* what() const throw() { return cause.c_str(); }
    };

    void todo() { throw _todo(); }

    template <class... Args>
    void todo(std::format_string<Args...> fmt, Args&&... args) {
    throw _todo(std::format(fmt, args...));
    }

    class _unimplemented : public std::exception {
    std::string cause;

    public:
    _unimplemented() : cause("not implemented") {}
    _unimplemented(std::string&& excuse)
    : cause("not implemented: " + excuse) {}
    virtual const char* what() const throw() { return cause.c_str(); }
    };

    void unimplemented() { throw _unimplemented(); }

    template <class... Args>
    void unimplemented(std::format_string<Args...> fmt, Args&&... args) {
    throw _unimplemented(std::format(fmt, args...));
    }

    class _unreachable : public std::exception {
    std::string cause;

    public:
    _unreachable() : cause("entered unreachable code") {}
    _unreachable(std::string&& excuse)
    : cause("entered unreachable code: " + excuse) {}
    virtual const char* what() const throw() { return cause.c_str(); }
    };

    void unreachable() { throw _unreachable(); }

    template <class... Args>
    void unreachable(std::format_string<Args...> fmt, Args&&... args) {
    throw _unreachable(std::format(fmt, args...));
    }
    info

    Final source code: placeholders.hpp

    \ No newline at end of file diff --git a/cpp/index.html b/cpp/index.html index 393b1bf..bb6e73a 100644 --- a/cpp/index.html +++ b/cpp/index.html @@ -14,10 +14,10 @@ - - + + - + \ No newline at end of file diff --git a/files/algorithms/graphs/iterative-and-iterators.tar.bz2 b/files/algorithms/graphs/iterative-and-iterators.tar.bz2 index 39b3c2f..66a29b3 100644 Binary files a/files/algorithms/graphs/iterative-and-iterators.tar.bz2 and b/files/algorithms/graphs/iterative-and-iterators.tar.bz2 differ diff --git a/files/algorithms/graphs/iterative-and-iterators.tar.gz b/files/algorithms/graphs/iterative-and-iterators.tar.gz index 3ac7728..7853914 100644 Binary files a/files/algorithms/graphs/iterative-and-iterators.tar.gz and b/files/algorithms/graphs/iterative-and-iterators.tar.gz differ diff --git a/files/algorithms/recursion/karel-1.tar.bz2 b/files/algorithms/recursion/karel-1.tar.bz2 index adbca31..a5a1f9e 100644 Binary files a/files/algorithms/recursion/karel-1.tar.bz2 and b/files/algorithms/recursion/karel-1.tar.bz2 differ diff --git a/files/algorithms/recursion/karel-1.tar.gz b/files/algorithms/recursion/karel-1.tar.gz index 46c940b..3167036 100644 Binary files a/files/algorithms/recursion/karel-1.tar.gz and b/files/algorithms/recursion/karel-1.tar.gz differ diff --git a/files/algorithms/recursion/pyramid-slide-down.tar.bz2 b/files/algorithms/recursion/pyramid-slide-down.tar.bz2 index 2694ffe..1bc7c01 100644 Binary files a/files/algorithms/recursion/pyramid-slide-down.tar.bz2 and b/files/algorithms/recursion/pyramid-slide-down.tar.bz2 differ diff --git a/files/algorithms/recursion/pyramid-slide-down.tar.gz b/files/algorithms/recursion/pyramid-slide-down.tar.gz index 9e4a485..76935ce 100644 Binary files a/files/algorithms/recursion/pyramid-slide-down.tar.gz and b/files/algorithms/recursion/pyramid-slide-down.tar.gz differ diff --git a/files/algorithms/time-complexity/extend.tar.bz2 b/files/algorithms/time-complexity/extend.tar.bz2 index 88f2ceb..17af79b 100644 Binary files a/files/algorithms/time-complexity/extend.tar.bz2 and b/files/algorithms/time-complexity/extend.tar.bz2 differ diff --git a/files/algorithms/time-complexity/extend.tar.gz b/files/algorithms/time-complexity/extend.tar.gz index 1c12588..196ff3e 100644 Binary files a/files/algorithms/time-complexity/extend.tar.gz and b/files/algorithms/time-complexity/extend.tar.gz differ diff --git a/files/c/bonuses/03.tar.bz2 b/files/c/bonuses/03.tar.bz2 index e7bbe81..75312e1 100644 Binary files a/files/c/bonuses/03.tar.bz2 and b/files/c/bonuses/03.tar.bz2 differ diff --git a/files/c/bonuses/03.tar.gz b/files/c/bonuses/03.tar.gz index 6c5693f..5d23df5 100644 Binary files a/files/c/bonuses/03.tar.gz and b/files/c/bonuses/03.tar.gz differ diff --git a/files/c/bonuses/04.tar.bz2 b/files/c/bonuses/04.tar.bz2 index 5a638b5..5d4d28d 100644 Binary files a/files/c/bonuses/04.tar.bz2 and b/files/c/bonuses/04.tar.bz2 differ diff --git a/files/c/bonuses/04.tar.gz b/files/c/bonuses/04.tar.gz index afad951..90bb7a6 100644 Binary files a/files/c/bonuses/04.tar.gz and b/files/c/bonuses/04.tar.gz differ diff --git a/files/c/bonuses/05-06.tar.bz2 b/files/c/bonuses/05-06.tar.bz2 index 416c5e8..391a79f 100644 Binary files a/files/c/bonuses/05-06.tar.bz2 and b/files/c/bonuses/05-06.tar.bz2 differ diff --git a/files/c/bonuses/05-06.tar.gz b/files/c/bonuses/05-06.tar.gz index f30a5c1..32bae0b 100644 Binary files a/files/c/bonuses/05-06.tar.gz and b/files/c/bonuses/05-06.tar.gz differ diff --git a/files/c/bonuses/08.tar.bz2 b/files/c/bonuses/08.tar.bz2 index b8ceb52..6593081 100644 Binary files a/files/c/bonuses/08.tar.bz2 and b/files/c/bonuses/08.tar.bz2 differ diff --git a/files/c/bonuses/08.tar.gz b/files/c/bonuses/08.tar.gz index aa6e3aa..ec043f3 100644 Binary files a/files/c/bonuses/08.tar.gz and b/files/c/bonuses/08.tar.gz differ diff --git a/files/c/bonuses/10.tar.bz2 b/files/c/bonuses/10.tar.bz2 index 2d4f2a8..2fd1131 100644 Binary files a/files/c/bonuses/10.tar.bz2 and b/files/c/bonuses/10.tar.bz2 differ diff --git a/files/c/bonuses/10.tar.gz b/files/c/bonuses/10.tar.gz index b4ee1d2..c298367 100644 Binary files a/files/c/bonuses/10.tar.gz and b/files/c/bonuses/10.tar.gz differ diff --git a/index.html b/index.html index 88018ae..9247a21 100644 --- a/index.html +++ b/index.html @@ -14,8 +14,8 @@ - - + +

    mf

    blog and additional materials for courses at φ

    About Me

    I'm working in Red Hat in the Packit team and studying at FI MUNI while also tutoring some courses there.

    Content

    On this page you can find my blog or unofficial materials I have written over the course of teaching multiple courses at the FI.

    Mastodon

    Feel free to contact me on any of the following Mastodon accounts: Fosstodon or Hachyderm.io

    diff --git a/search/index.html b/search/index.html index f123ced..9cb5c15 100644 --- a/search/index.html +++ b/search/index.html @@ -14,8 +14,8 @@ - - + + diff --git a/talks/index.html b/talks/index.html index 0270ebc..9c95c06 100644 --- a/talks/index.html +++ b/talks/index.html @@ -14,8 +14,8 @@ - - + +

    Talks

    Featured talks I presented on various events.

    Packit: RPM integration, all in one

    Do you want to automate how you build and test your RPM packages? Do you maintain any package in Fedora and want to automate the releases? Or are you just interested in CI/CD on GitHub or GitLab, Fedora and integration of upstream projects with RPM-based Linux distributions? In this session, we are going to deep-dive into features of Packit that can help you do your day-to-day job.
    • DevConf.cz
    • Brno, Czechia
    • 6/2023

    Also presented on:

    • DevConf.cz Mini in Brno, Czechia (3/2023)

    Credits to Paweł Kosiec for implementing his own React components for talks.