From: Plom Heller Date: Tue, 21 Apr 2026 22:43:19 +0000 (+0200) Subject: Initial commit. X-Git-Url: https://plomlompom.com/repos/%22https:/validator.w3.org/process?a=commitdiff_plain;h=c7c7f96e4f4cc7bcefc91132b509e50d432508e8;p=bricksplom Initial commit. --- c7c7f96e4f4cc7bcefc91132b509e50d432508e8 diff --git a/bricksplom.py b/bricksplom.py new file mode 100755 index 0000000..9d87abb --- /dev/null +++ b/bricksplom.py @@ -0,0 +1,284 @@ +#!/usr/bin/env python3 +'Data structures for managing/sorting bricks of a certain kind.' +from abc import ABC, abstractmethod +from pathlib import Path +from typing import Self + +PATH_BOXES = 'boxes.txt' +PATH_COLLECTIONS = 'collections.txt' +PATH_DESIGNS = 'designs.txt' +PATH_PIECES = 'pieces.txt' + +CHAR_NEWLINE = '\n' +CHAR_SPACE = ' ' +CHAR_COMMA = ',' +CHAR_EQ = '=' +CHAR_UNDER = '_' +CHAR_COMMENT = '#' +CHAR_SEPARATOR_COLUMN = '-' +CHAR_SEPARATOR_PAGE = CHAR_EQ + +PieceListing = tuple[int, str, str] +PageColumn = tuple[PieceListing, ...] +Page = tuple[PageColumn, ...] + + +class Textfiled(ABC): + 'Table to be read from textfile.' + + @staticmethod + def lines_of( + path: str + ) -> tuple[str, ...]: + 'Non-empty right-stripped lines of file at path.' + return tuple(line.rstrip() + for line in Path(path).read_text(encoding='utf8' + ).split(CHAR_NEWLINE) + if line.strip() and not line.startswith(CHAR_COMMENT)) + + @staticmethod + def tokify( + body: str, + len_expected: int + ) -> tuple[str, ...]: + 'Body parsd into left-stripped tokens of len_expected count.' + collected: list[str] = [] + while len(collected) < len_expected: + body = body.lstrip() + if len(collected) == len_expected - 1: + tok = body + else: + assert CHAR_SPACE in body + tok, body = body.split(CHAR_SPACE, maxsplit=1) + collected += [tok] + return tuple(collected) + + @classmethod + @abstractmethod + def from_textfile( + cls, + path: str + ) -> dict[str, Self]: + 'Build from file at path.' + + +class Design(Textfiled): + 'Shape and texture configurations with descriptions and equalities.' + + def __init__( + self, + id_: str, + description: str + ) -> None: + self.id_ = id_ + self.description = description + self.alternates: set[str] = set() + + @classmethod + def from_textfile( + cls, + path: str + ) -> dict[str, Self]: + collected = {} + alts: dict[str, set[str]] = {} + for design_id, body in [cls.tokify(line, 2) + for line in cls.lines_of(path)]: + assert len(body) > 1 + char_type, body = body[0], body[1:] + assert char_type in {CHAR_EQ, CHAR_UNDER} + if char_type == CHAR_EQ: + alts[body] = alts.get(body, set()) + alts[body].add(design_id) + else: # == CHAR_UNDER + collected[design_id] = cls(design_id, body) + for id_, alternatives in alts.items(): + collected[id_].alternates = alternatives + return collected + + def __str__(self) -> str: + return '\n'.join([f'{self.id_:>6} _{self.description}'] + + [f'{a:>6} ={self.id_}' for a in self.alternates]) + + +class Piece(Textfiled): + 'Individual configuration of design and color.' + + def __init__( + self, + id_: str, + design_id: str, + color_id: str, + comment: str + ) -> None: + self.id_ = id_ + self.design_id = design_id + self.color_id = color_id + self.comment = comment + + @classmethod + def from_textfile( + cls, + path: str + ) -> dict[str, Self]: + collected = {} + for toks in [cls.tokify(line, 3) for line in cls.lines_of(path)]: + piece_id, design_id = toks[:2] + color_id, comment = (toks[-1].split(CHAR_SPACE, maxsplit=1) + + [''])[:2] + collected[piece_id] = cls(piece_id, design_id, color_id, comment) + return collected + + def __str__(self) -> str: + return (f'{self.id_:>7} {self.design_id:>6} ' + f'{self.color_id:>3} {self.comment}').rstrip() + + +class Collection(Textfiled): + 'Named collection of pieces in order of pages of columns of counts.' + + def __init__( + self, + id_: str, + description: str, + piece_listings: tuple[Page, ...] + ) -> None: + self.id_ = id_ + self.description = description + self.piece_listings = piece_listings + + @classmethod + def from_textfile( + cls, + path: str + ) -> dict[str, Self]: + collected: dict[str, tuple[str, list[list[list[PieceListing]]]]] = {} + i_listings: list[list[list[PieceListing]]] = [[[]]] + for line in cls.lines_of(path): + if not line.startswith(CHAR_SPACE): + id_, description = cls.tokify(line, 2) + i_listings = [[[]]] + collected[id_] = description, i_listings + continue + if line[1:2] == CHAR_SEPARATOR_COLUMN: + i_listings[-1] += [[]] + elif line[1:2] == CHAR_SEPARATOR_PAGE: + i_listings += [[[]]] + else: + count, remainder = cls.tokify(line, 2) + assert count.isdigit() + id_, comment = (remainder.split(CHAR_SPACE, maxsplit=1) + + [''])[:2] + assert len(id_) > 0 + i_listings[-1][-1] += [(int(count), id_, comment)] + return { + k: cls(k, v[0], tuple(tuple(tuple(column) for column in page) + for page in v[1])) + for k, v in collected.items()} + + def piece_ids(self) -> tuple[str, ...]: + 'Flattened, alphabetically sorted list of recorded piece IDs.' + collected: list[str] = [] + for page in self.piece_listings: + for column in page: + collected += [p_id for _, p_id, _ in column] + return tuple(sorted(collected)) + + def __str__(self) -> str: + pages = [] + for page in self.piece_listings: + columns = [] + for column in page: + lines = [] + for count, piece_id, comment in column: + lines += [f' {count:2} {piece_id:>7} {comment}'] + columns += ['\n'.join(lines)] + pages += ['\n -\n'.join(columns)] + return f'\n{self.id_} {self.description}\n' + ' =\n'.join(pages) + + +class Box(Textfiled): + 'Order of designs.' + + def __init__( + self, + id_: str, + designs: tuple[str, ...] + ) -> None: + self.id_ = id_ + self.designs = designs + + @classmethod + def from_textfile( + cls, + path: str + ) -> dict[str, Self]: + return {id_: cls(id_, tuple(order.split(CHAR_COMMA))) + for id_, order in [cls.tokify(line, 2) + for line in cls.lines_of(path)]} + + def __str__(self) -> str: + return f'{self.id_:>2} {",".join(self.designs)}' + + +def check_consistencies_between_tables( + pieces: dict[str, Piece], + collections: dict[str, Collection], + designs: dict[str, Design], + boxes: dict[str, Box] + ) -> None: + 'Ensure intra-table consistencies between inputs.' + + # check all items listed in collections recorded in pieces + for coll in collections.values(): + for piece_id in coll.piece_ids(): + assert piece_id in pieces, piece_id + + # check all designs listed in boxes recorded in designs + for design_ids in [b.designs for b in boxes.values()]: + for design_id in design_ids: + assert design_id in designs, design_id + + # check all designs found in boxes + boxed = set() + for design_ids in [b.designs for b in boxes.values()]: + for design_id in design_ids: + boxed.add(design_id) + for design_id in designs: + assert design_id in boxed, design_id + + # check all pieces' designs recorded in designs + for design_id in [piece.design_id for piece in pieces.values()]: + if design_id not in designs: + for replacement in [k for k, v in designs.items() + if design_id in v.alternates]: + design_id = replacement + break + assert design_id in designs, design_id + + # check all recorded designs have matching pieces (at least via alts) + for design_id, alts in [(k, v.alternates) for k, v in designs.items()]: + pieces_found = False + for id_ in [id_ for id_ in {design_id} | alts + if id_ in [piece.design_id for piece in pieces.values()]]: + pieces_found = True + break + assert pieces_found, design_id + + +def main( + ) -> None: + pieces = Piece.from_textfile(PATH_PIECES) + collections = Collection.from_textfile(PATH_COLLECTIONS) + designs = Design.from_textfile(PATH_DESIGNS) + boxes = Box.from_textfile(PATH_BOXES) + check_consistencies_between_tables(pieces, collections, designs, boxes) + for title, items in (('PIECES', pieces), + ('DESIGNS', designs), + ('BOXES', boxes), + ('COLLECTIONS', collections)): + print(title) + for item in items.values(): + print(item) + + +main()