aoc/year2017/
day04.rs

1//! # High-Entropy Passphrases
2//!
3//! ## Part One
4//!
5//! We use a [`FastSet`] to detect duplicates. Sorting the words in each line
6//! then checking for duplicates in adjacent values also works but is slower.
7//!
8//! ## Part Two
9//!
10//! To detect anagrams we first convert each word into a histogram of its letter frequency values.
11//! As the cardinality is at most 26 we can use a fixed size array to represent the set.
12//!
13//! Then a [`FastSet`] is used to detect duplicates. Sorting the letters in each word so that
14//! anagrams become the same also works but is slower.
15use crate::util::hash::*;
16
17pub fn parse(input: &str) -> Vec<&str> {
18    input.lines().collect()
19}
20
21pub fn part1(input: &[&str]) -> usize {
22    let mut seen = FastSet::new();
23    input
24        .iter()
25        .filter(|line| {
26            seen.clear();
27            line.split_ascii_whitespace().all(|token| seen.insert(token))
28        })
29        .count()
30}
31
32pub fn part2(input: &[&str]) -> usize {
33    let mut seen = FastSet::new();
34    input
35        .iter()
36        .filter(|line| {
37            seen.clear();
38            line.split_ascii_whitespace().all(|token| seen.insert(letter_frequency(token)))
39        })
40        .count()
41}
42
43/// Convert a token to its letter frequency histogram.
44/// Only 26 elements are needed but 32 is faster to hash.
45#[inline]
46fn letter_frequency(token: &str) -> [u8; 32] {
47    let mut freq = [0; 32];
48    for b in token.bytes() {
49        freq[(b - b'a') as usize] += 1;
50    }
51    freq
52}