aoc/year2017/
day14.rs

1//! # Disk Defragmentation
2//!
3//! This problem is a blend of the hashing from [`Day 10`] and the connected clique finding
4//! from [`Day 12`] and reuses the same flood fill approach to count groups.
5//!
6//! [`Day 10`]: crate::year2017::day10
7//! [`Day 12`]: crate::year2017::day12
8use crate::util::thread::*;
9use std::sync::Mutex;
10use std::sync::atomic::{AtomicUsize, Ordering};
11
12/// Atomics can be safely shared between threads.
13pub struct Shared {
14    prefix: String,
15    counter: AtomicUsize,
16    mutex: Mutex<Exclusive>,
17}
18
19/// Regular data structures need to be protected by a mutex.
20struct Exclusive {
21    grid: Vec<u8>,
22}
23
24/// Parallelize the hashing as each row is independent.
25pub fn parse(input: &str) -> Vec<u8> {
26    let shared = Shared {
27        prefix: input.trim().to_owned(),
28        counter: AtomicUsize::new(0),
29        mutex: Mutex::new(Exclusive { grid: vec![0; 0x4000] }),
30    };
31
32    // Use as many cores as possible to parallelize the hashing.
33    spawn(|| worker(&shared));
34
35    shared.mutex.into_inner().unwrap().grid
36}
37
38pub fn part1(input: &[u8]) -> u32 {
39    input.iter().map(|&n| n as u32).sum()
40}
41
42pub fn part2(input: &[u8]) -> u32 {
43    let mut grid: Vec<_> = input.iter().map(|&n| n == 1).collect();
44    let mut groups = 0;
45
46    for start in 0..0x4000 {
47        // DFS from each new group.
48        if grid[start] {
49            groups += 1;
50            dfs(&mut grid, start);
51        }
52    }
53
54    groups
55}
56
57/// Each worker thread chooses the next available index then computes the hash and patches the
58/// final vec with the result.
59fn worker(shared: &Shared) {
60    loop {
61        let index = shared.counter.fetch_add(1, Ordering::Relaxed);
62        if index >= 128 {
63            break;
64        }
65
66        let row = fill_row(&shared.prefix, index);
67        let start = index * 128;
68        let end = start + 128;
69
70        let mut exclusive = shared.mutex.lock().unwrap();
71        exclusive.grid[start..end].copy_from_slice(&row);
72    }
73}
74
75/// Compute the knot hash for a row and expand into an array of bytes.
76fn fill_row(prefix: &str, index: usize) -> [u8; 128] {
77    let s = format!("{prefix}-{index}");
78    let mut lengths: Vec<_> = s.bytes().map(|b| b as usize).collect();
79    lengths.extend([17, 31, 73, 47, 23]);
80
81    let knot = knot_hash(&lengths);
82    let mut result = [0; 128];
83
84    for (i, chunk) in knot.chunks_exact(16).enumerate() {
85        let reduced = chunk.iter().fold(0, |acc, n| acc ^ n);
86        for j in 0..8 {
87            result[8 * i + j] = (reduced >> (7 - j)) & 1;
88        }
89    }
90
91    result
92}
93
94/// Slightly tweaked version of the code from Day 10 that always performs 64 rounds.
95fn knot_hash(lengths: &[usize]) -> Vec<u8> {
96    let mut knot: Vec<_> = (0..=255).collect();
97    let mut position = 0;
98    let mut skip = 0;
99
100    for _ in 0..64 {
101        for &length in lengths {
102            let next = length + skip;
103            knot[0..length].reverse();
104            knot.rotate_left(next % 256);
105            position += next;
106            skip += 1;
107        }
108    }
109
110    // Rotate the array the other direction so that the original starting position is restored.
111    knot.rotate_right(position % 256);
112    knot
113}
114
115/// Flood fill that explores the connected squares in the grid.
116fn dfs(grid: &mut [bool], index: usize) {
117    grid[index] = false;
118    let x = index % 128;
119    let y = index / 128;
120
121    if x > 0 && grid[index - 1] {
122        dfs(grid, index - 1);
123    }
124    if x < 127 && grid[index + 1] {
125        dfs(grid, index + 1);
126    }
127    if y > 0 && grid[index - 128] {
128        dfs(grid, index - 128);
129    }
130    if y < 127 && grid[index + 128] {
131        dfs(grid, index + 128);
132    }
133}