--- /dev/null
+[mypy]
+mypy_path = $MYPY_CONFIG_FILE_DIR/src
--- /dev/null
+[BASIC]
+init-hook='import sys; sys.path[0:0] = ["src"]'
+good-names-rgxs=(.*_)?(GET|POST)(_.+)?,,test_[A-Z]+
#!/bin/sh
-cd src
-for dir in $(echo '.' 'plomtask' 'tests'); do
- echo "Running mypy on ${dir}/ …."
- python3 -m mypy ${dir}/*.py
- echo "Running flake8 on ${dir}/ …"
- python3 -m flake8 ${dir}/*.py
- echo "Running pylint on ${dir}/ …"
- python3 -m pylint ${dir}/*.py
+DIRS='src src/plomtask tests'
+TOOLS='flake8 pylint mypy'
+for dir in $DIRS; do
+ for tool in $TOOLS; do
+ echo "Running ${tool} on ${dir}/ …."
+ python3 -m ${tool} ${dir}/*.py
+ done
done
echo "Running unittest-parallel on tests/."
unittest-parallel -t . -s tests/ -p '*.py'
-rm test_db:*
+++ /dev/null
-[BASIC]
-init-hook='import sys; sys.path.append(".")'
-good-names-rgxs=(.*_)?(GET|POST)(_.+)?,,test_[A-Z]+
+++ /dev/null
-"""Test Conditions module."""
-from typing import Any
-from tests.utils import (TestCaseSansDB, TestCaseWithDB, TestCaseWithServer,
- Expected)
-from plomtask.conditions import Condition
-
-
-class TestsSansDB(TestCaseSansDB):
- """Tests requiring no DB setup."""
- checked_class = Condition
-
-
-class TestsWithDB(TestCaseWithDB):
- """Tests requiring DB, but not server setup."""
- checked_class = Condition
- default_init_kwargs = {'is_active': 0}
-
-
-class ExpectedGetConditions(Expected):
- """Builder of expectations for GET /conditions."""
- _default_dict = {'sort_by': 'title', 'pattern': ''}
-
- def recalc(self) -> None:
- """Update internal dictionary by subclass-specific rules."""
- super().recalc()
- self._fields['conditions'] = self.as_ids(self.lib_all('Condition'))
-
-
-class ExpectedGetCondition(Expected):
- """Builder of expectations for GET /condition."""
- _default_dict = {'is_new': False}
- _on_empty_make_temp = ('Condition', 'cond_as_dict')
-
- def __init__(self, id_: int | None, *args: Any, **kwargs: Any) -> None:
- self._fields = {'condition': id_}
- super().__init__(*args, **kwargs)
-
- def recalc(self) -> None:
- """Update internal dictionary by subclass-specific rules."""
- super().recalc()
- for p_field, c_field in [('conditions', 'enabled_processes'),
- ('disables', 'disabling_processes'),
- ('blockers', 'disabled_processes'),
- ('enables', 'enabling_processes')]:
- self._fields[c_field] = self.as_ids([
- p for p in self.lib_all('Process')
- if self._fields['condition'] in p[p_field]])
-
-
-class TestsWithServer(TestCaseWithServer):
- """Module tests against our HTTP server/handler (and database)."""
- checked_class = Condition
-
- def test_fail_POST_condition(self) -> None:
- """Test malformed/illegal POST /condition requests."""
- # check incomplete POST payloads
- valid_payload = {'title': '', 'description': ''}
- self.check_minimal_inputs('/condition', valid_payload)
- # check valid POST payload on bad paths
- self.check_post(valid_payload, '/condition?id=foo', 400)
- # check cannot delete depended-upon Condition
- self.post_exp_cond([], {})
- for key in ('conditions', 'blockers', 'enables', 'disables'):
- self.post_exp_process([], {key: [1]}, 1)
- self.check_post({'delete': ''}, '/condition?id=1', 500)
- self.post_exp_process([], {}, 1)
- self.post_exp_day([], {'new_todo': '1'})
- for key in ('conditions', 'blockers', 'enables', 'disables'):
- self.post_exp_todo([], {key: [1]}, 1)
- self.check_post({'delete': ''}, '/condition?id=1', 500)
-
- def test_POST_condition(self) -> None:
- """Test (valid) POST /condition and its effect on GET /condition[s]."""
- url_single, url_all = '/condition?id=1', '/conditions'
- exp_single, exp_all = ExpectedGetCondition(1), ExpectedGetConditions()
- all_exps = [exp_single, exp_all]
- # test valid POST's effect on single /condition and full /conditions
- self.post_exp_cond(all_exps, {}, post_to_id=False)
- self.check_json_get(url_single, exp_single)
- self.check_json_get(url_all, exp_all)
- # test (no) effect of invalid POST to existing Condition on /condition
- self.check_post({}, url_single, 400)
- self.check_json_get(url_single, exp_single)
- # test effect of POST changing title, description, and activeness
- self.post_exp_cond(all_exps, {'title': 'bar', 'description': 'oof',
- 'is_active': 1})
- self.check_json_get(url_single, exp_single)
- # test POST sans 'is_active' setting it negative
- self.post_exp_cond(all_exps, {})
- self.check_json_get(url_single, exp_single)
- # test deletion POST's effect, both to return id=1 into empty single,
- # full /conditions into empty list
- self.check_json_get(url_single, exp_single)
- self.post_exp_cond(all_exps, {'delete': ''}, redir_to_id=False)
- exp_single.set('is_new', True)
- self.check_json_get(url_single, exp_single)
- self.check_json_get(url_all, exp_all)
-
- def test_GET_condition(self) -> None:
- """More GET /condition testing, especially for Process relations."""
- # check expected default status codes
- self.check_get_defaults('/condition')
- # check 'is_new' set if id= absent or pointing to not-yet-existing ID
- exp = ExpectedGetCondition(None)
- exp.set('is_new', True)
- self.check_json_get('/condition', exp)
- exp = ExpectedGetCondition(1)
- exp.set('is_new', True)
- self.check_json_get('/condition?id=1', exp)
- # make Condition and two Processes that among them establish all
- # possible ConditionsRelations to it, check /condition displays all
- exp = ExpectedGetCondition(1)
- self.post_exp_cond([exp], {}, post_to_id=False)
- for i, p in enumerate([('conditions', 'disables'),
- ('enables', 'blockers')]):
- self.post_exp_process([exp], {k: [1] for k in p}, i+1)
- self.check_json_get('/condition?id=1', exp)
-
- def test_GET_conditions(self) -> None:
- """Test GET /conditions."""
- # test empty result on empty DB, default-settings on empty params
- exp = ExpectedGetConditions()
- self.check_json_get('/conditions', exp)
- # test 'sort_by' default to 'title' (even if set to something else, as
- # long as without handler) and 'pattern' get preserved
- exp.set('pattern', 'bar')
- self.check_json_get('/conditions?sort_by=foo&pattern=bar&foo=x', exp)
- exp.set('pattern', '')
- # test non-empty result, automatic (positive) sorting by title
- post_cond1 = {'is_active': 0, 'title': 'foo', 'description': 'oof'}
- post_cond2 = {'is_active': 0, 'title': 'bar', 'description': 'rab'}
- post_cond3 = {'is_active': 1, 'title': 'baz', 'description': 'zab'}
- for i, post in enumerate([post_cond1, post_cond2, post_cond3]):
- self.post_exp_cond([exp], post, i+1, post_to_id=False)
- self.check_filter(exp, 'conditions', 'sort_by', 'title', [2, 3, 1])
- # test other sortings
- self.check_filter(exp, 'conditions', 'sort_by', '-title', [1, 3, 2])
- self.check_filter(exp, 'conditions', 'sort_by', 'is_active', [1, 2, 3])
- self.check_filter(exp, 'conditions', 'sort_by', '-is_active',
- [3, 2, 1])
- exp.set('sort_by', 'title')
- # test pattern matching on title
- exp.lib_del('Condition', 1)
- self.check_filter(exp, 'conditions', 'pattern', 'ba', [2, 3])
- # test pattern matching on description
- exp.lib_wipe('Condition')
- exp.set_cond_from_post(1, post_cond1)
- self.check_filter(exp, 'conditions', 'pattern', 'of', [1])
+++ /dev/null
-"""Test Days module."""
-from datetime import date as dt_date, datetime, timedelta
-from typing import Any
-from tests.utils import (TestCaseSansDB, TestCaseWithDB, TestCaseWithServer,
- Expected, date_and_day_id, dt_date_from_day_id)
-from plomtask.dating import date_in_n_days as tested_date_in_n_days
-from plomtask.days import Day
-
-# Simply the ISO format for dates as used in plomtask.dating, but for testing
-# purposes we state our expectations here independently and explicitly
-TESTING_DATE_FORMAT = '%Y-%m-%d'
-
-
-def _testing_date_in_n_days(n: int) -> str:
- """Return in ISO format / TEST_DATE_FORMAT date from today + n days.
-
- As with TESTING_DATE_FORMAT, we assume this equal the original's code
- at plomtask.dating.date_in_n_days, but want to state our expectations
- explicitly to rule out importing issues from the original.
- """
- date = dt_date.today() + timedelta(days=n)
- return date.strftime(TESTING_DATE_FORMAT)
-
-
-def _days_n_for_date(date: str) -> int:
- return (dt_date.fromisoformat(date) - dt_date(2000, 1, 1)).days
-
-
-class TestsSansDB(TestCaseSansDB):
- """Days module tests not requiring DB setup."""
- checked_class = Day
-
- def test_date_in_n_days(self) -> None:
- """Test dating.date_in_n_days"""
- for n in [-100, -2, -1, 0, 1, 2, 1000]:
- date = datetime.now() + timedelta(days=n)
- self.assertEqual(tested_date_in_n_days(n),
- date.strftime(TESTING_DATE_FORMAT))
-
- def test_Day_date_weekday_neighbor_dates(self) -> None:
- """Test Day's date parsing and neighbourhood resolution."""
- self.assertEqual(dt_date(2000, 1, 2).isoformat(), Day(1).date)
- self.assertEqual(dt_date(2001, 1, 2).isoformat(), Day(367).date)
- self.assertEqual('Sunday', Day(1).weekday)
- self.assertEqual('March', Day(75).month_name)
- self.assertEqual('2000-12-31', Day(366).prev_date)
- self.assertEqual('2001-03-01', Day(424).next_date)
-
-
-class TestsWithDB(TestCaseWithDB):
- """Tests requiring DB, but not server setup."""
- checked_class = Day
-
- def test_Day_with_filled_gaps(self) -> None:
- """Test .with_filled_gaps."""
- day_ids = [n + 1 for n in range(9)]
- dt_dates = [dt_date_from_day_id(id_) for id_ in day_ids]
-
- def expect_within_full_range_as_commented(
- range_indexes: tuple[int, int],
- indexes_to_provide: list[int]
- ) -> None:
- start_i, end_i = range_indexes
- days_expected = [Day(n) for n in day_ids]
- to_remove = []
- for idx in indexes_to_provide:
- days_expected[idx] = Day(day_ids[idx], '#')
- days_expected[idx].save(self.db_conn)
- to_remove += [days_expected[idx]]
- days_expected = days_expected[start_i:end_i+1]
- days_result = Day.with_filled_gaps(
- self.db_conn, dt_dates[start_i], dt_dates[end_i])
- self.assertEqual(days_result, days_expected)
- for day in to_remove:
- day.remove(self.db_conn)
-
- # check provided Days recognizable in (full-range) interval
- expect_within_full_range_as_commented((0, 8), [0, 4, 8])
- # check limited range, but limiting Days provided
- expect_within_full_range_as_commented((2, 6), [2, 5, 6])
- # check Days within range but beyond provided Days also filled in
- expect_within_full_range_as_commented((1, 7), [2, 5])
- # check provided Days beyond range ignored
- expect_within_full_range_as_commented((3, 5), [1, 2, 4, 6, 7])
- # check inversion of start_date and end_date returns empty list
- expect_within_full_range_as_commented((5, 3), [2, 4, 6])
- # check empty provision still creates filler elements in interval
- expect_within_full_range_as_commented((3, 5), [])
- # check single-element selection creating only filler beyond provided
- expect_within_full_range_as_commented((1, 1), [2, 4, 6])
- # check (un-saved) filler Days don't show up in cache or DB
- day = Day(day_ids[3])
- day.save(self.db_conn)
- Day.with_filled_gaps(self.db_conn, dt_dates[0], dt_dates[-1])
- self.check_identity_with_cache_and_db([day])
-
-
-class ExpectedGetCalendar(Expected):
- """Builder of expectations for GET /calendar."""
-
- def __init__(self, start: int, end: int, *args: Any, **kwargs: Any
- ) -> None:
- today_dt = dt_date.today()
- today_iso = today_dt.isoformat()
- self._fields = {
- 'start': (today_dt + timedelta(days=start)).isoformat(),
- 'end': (today_dt + timedelta(days=end)).isoformat(),
- 'today': today_iso}
- self._fields['days'] = [
- _days_n_for_date(today_iso) + i for i in range(start, end+1)]
- super().__init__(*args, **kwargs)
- for day_id in self._fields['days']:
- self.lib_set('Day', [self.day_as_dict(day_id)])
-
-
-class ExpectedGetDay(Expected):
- """Builder of expectations for GET /day."""
- _default_dict = {'make_type': 'full'}
- _on_empty_make_temp = ('Day', 'day_as_dict')
-
- def __init__(self, day_id: int, *args: Any, **kwargs: Any) -> None:
- self._fields = {'day': day_id}
- super().__init__(*args, **kwargs)
-
- def recalc(self) -> None:
- super().recalc()
- todos = [t for t in self.lib_all('Todo')
- if t['day_id'] == self._fields['day']]
- self.lib_get('Day', self._fields['day'])['todos'] = self.as_ids(todos)
- self._fields['top_nodes'] = [
- {'children': [], 'seen': 0, 'todo': todo['id']}
- for todo in todos]
- for todo in todos:
- proc = self.lib_get('Process', todo['process_id'])
- for title in ['conditions', 'enables', 'blockers', 'disables']:
- todo[title] = proc[title]
- conds_present = set()
- for todo in todos:
- for title in ['conditions', 'enables', 'blockers', 'disables']:
- for cond_id in todo[title]:
- conds_present.add(cond_id)
- self._fields['conditions_present'] = list(conds_present)
- for prefix in ['en', 'dis']:
- blers = {}
- for cond_id in conds_present:
- blers[cond_id] = self.as_ids(
- [t for t in todos if cond_id in t[f'{prefix}ables']])
- self._fields[f'{prefix}ablers_for'] = blers
- self._fields['processes'] = self.as_ids(self.lib_all('Process'))
-
-
-class TestsWithServer(TestCaseWithServer):
- """Tests against our HTTP server/handler (and database)."""
- checked_class = Day
-
- def test_basic_GET_day(self) -> None:
- """Test basic (no Processes/Conditions/Todos) GET /day basics."""
- # check illegal date parameters
- self.check_get_defaults('/day', '2024-01-01', 'date')
- self.check_get('/day?date=2024-02-30', 400)
- # check undefined day
- today_iso = dt_date.today().isoformat()
- exp = ExpectedGetDay(_days_n_for_date(today_iso))
- self.check_json_get('/day', exp)
- # check defined day with make_type parameter
- date, day_id = date_and_day_id(1)
- exp = ExpectedGetDay(day_id)
- exp.set('make_type', 'bar')
- self.check_json_get(f'/day?date={date}&make_type=bar', exp)
- # check parsing of 'yesterday', 'today', 'tomorrow'
- for name, dist in [('yesterday', -1), ('today', 0), ('tomorrow', +1)]:
- exp = ExpectedGetDay(_days_n_for_date(today_iso) + dist)
- self.check_json_get(f'/day?date={name}', exp)
-
- def test_fail_POST_day(self) -> None:
- """Test malformed/illegal POST /day requests."""
- # check payloads lacking minimum expecteds
- url = '/day?date=2024-01-01'
- minimal_post = {'make_type': '', 'day_comment': ''}
- self.check_minimal_inputs(url, minimal_post)
- # to next check illegal new_todo values, we need an actual Process
- self.post_exp_process([], {}, 1)
- # check illegal new_todo values
- self.check_post(minimal_post | {'new_todo': ['foo']}, url, 400)
- self.check_post(minimal_post | {'new_todo': [1, 2]}, url, 404)
- # to next check illegal old_todo inputs, we need to first post Todo
- self.check_post(minimal_post | {'new_todo': [1]}, url, 302,
- '/day?date=2024-01-01&make_type=')
- # check illegal old_todo inputs (equal list lengths though)
- post = minimal_post | {'comment': ['foo'], 'effort': [3.3],
- 'done': [], 'todo_id': [1]}
- self.check_post(post, url, 302, '/day?date=2024-01-01&make_type=')
- post['todo_id'] = [2] # reference to non-existant Process
- self.check_post(post, url, 404)
- post['todo_id'] = ['a']
- self.check_post(post, url, 400)
- post['todo_id'] = [1]
- post['done'] = ['foo']
- self.check_post(post, url, 400)
- post['done'] = [2] # reference to non-posted todo_id
- self.check_post(post, url, 400)
- post['done'] = []
- post['effort'] = ['foo']
- self.check_post(post, url, 400)
- post['effort'] = [None]
- self.check_post(post, url, 400)
- post['effort'] = [3.3]
- # check illegal old_todo inputs: unequal list lengths
- post['comment'] = []
- self.check_post(post, url, 400)
- post['comment'] = ['foo', 'foo']
- self.check_post(post, url, 400)
- post['comment'] = ['foo']
- post['effort'] = []
- self.check_post(post, url, 400)
- post['effort'] = [3.3, 3.3]
- self.check_post(post, url, 400)
- post['effort'] = [3.3]
- post['todo_id'] = [1, 1]
- self.check_post(post, url, 400)
- post['todo_id'] = [1]
- # # check valid POST payload on bad paths
- self.check_post(post, '/day', 400)
- self.check_post(post, '/day?date=', 400)
- self.check_post(post, '/day?date=foo', 400)
-
- def test_basic_POST_day(self) -> None:
- """Test basic (no Processes/Conditions/Todos) POST /day.
-
- Check POST requests properly parse 'today', 'tomorrow', 'yesterday',
- and actual date strings; store 'day_comment'; preserve 'make_type'
- setting in redirect even if nonsensical; and allow '' as 'new_todo'.
- """
- for name, dist, test_str in [('2024-01-01', None, 'a'),
- ('today', 0, 'b'),
- ('yesterday', -1, 'c'),
- ('tomorrow', +1, 'd')]:
- date = name if dist is None else _testing_date_in_n_days(dist)
- post = {'day_comment': test_str, 'make_type': f'x:{test_str}',
- 'new_todo': ['', '']}
- post_url = f'/day?date={name}'
- redir_url = f'{post_url}&make_type={post["make_type"]}'
- self.check_post(post, post_url, 302, redir_url)
- day_id = _days_n_for_date(date)
- exp = ExpectedGetDay(day_id)
- exp.set_day_from_post(day_id, post)
- self.check_json_get(post_url, exp)
-
- def test_GET_day_with_processes_and_todos(self) -> None:
- """Test GET /day displaying Processes and Todos (no trees)."""
- date, day_id = date_and_day_id(1)
- exp = ExpectedGetDay(day_id)
- # check Processes get displayed in ['processes'] and ['_library'],
- # even without any Todos referencing them
- proc_posts = [{'title': 'foo', 'description': 'oof', 'effort': 1.1},
- {'title': 'bar', 'description': 'rab', 'effort': 0.9}]
- for i, proc_post in enumerate(proc_posts):
- self.post_exp_process([exp], proc_post, i+1)
- self.check_json_get(f'/day?date={date}', exp)
- # post Todos of either Process and check their display
- self.post_exp_day([exp], {'new_todo': [1, 2]})
- self.check_json_get(f'/day?date={date}', exp)
- # test malformed Todo manipulation posts
- post_day = {'day_comment': '', 'make_type': '', 'comment': [''],
- 'new_todo': [], 'done': [1], 'effort': [2.3]}
- self.check_post(post_day, f'/day?date={date}', 400) # no todo_id
- post_day['todo_id'] = [2] # not identifying Todo refered by done
- self.check_post(post_day, f'/day?date={date}', 400)
- post_day['todo_id'] = [1, 2] # imply range beyond that of effort etc.
- self.check_post(post_day, f'/day?date={date}', 400)
- post_day['comment'] = ['FOO', '']
- self.check_post(post_day, f'/day?date={date}', 400)
- post_day['effort'] = [2.3, '']
- post_day['comment'] = ['']
- self.check_post(post_day, f'/day?date={date}', 400)
- # add a comment to one Todo and set the other's doneness and effort
- post_day['comment'] = ['FOO', '']
- self.post_exp_day([exp], post_day)
- self.check_json_get(f'/day?date={date}', exp)
- # invert effort and comment between both Todos
- # (cannot invert doneness, /day only collects positive setting)
- post_day['comment'] = ['', 'FOO']
- post_day['effort'] = ['', 2.3]
- self.post_exp_day([exp], post_day)
- self.check_json_get(f'/day?date={date}', exp)
-
- def test_POST_day_todo_make_types(self) -> None:
- """Test behavior of POST /todo on 'make_type'='full' and 'empty'."""
- date, day_id = date_and_day_id(1)
- exp = ExpectedGetDay(day_id)
- # create two Processes, with second one step of first one
- self.post_exp_process([exp], {}, 2)
- self.post_exp_process([exp], {'new_top_step': 2}, 1)
- exp.lib_set('ProcessStep', [
- exp.procstep_as_dict(1, owner_id=1, step_process_id=2)])
- self.check_json_get(f'/day?date={date}', exp)
- # post Todo of adopting Process, with make_type=full
- self.post_exp_day([exp], {'make_type': 'full', 'new_todo': [1]})
- exp.lib_get('Todo', 1)['children'] = [2]
- exp.lib_set('Todo', [exp.todo_as_dict(2, 2)])
- top_nodes = [{'todo': 1,
- 'seen': 0,
- 'children': [{'todo': 2,
- 'seen': 0,
- 'children': []}]}]
- exp.force('top_nodes', top_nodes)
- self.check_json_get(f'/day?date={date}', exp)
- # post another Todo of adopting Process, expect to adopt existing
- self.post_exp_day([exp], {'make_type': 'full', 'new_todo': [1]})
- exp.lib_set('Todo', [exp.todo_as_dict(3, 1, children=[2])])
- top_nodes += [{'todo': 3,
- 'seen': 0,
- 'children': [{'todo': 2,
- 'seen': 1,
- 'children': []}]}]
- exp.force('top_nodes', top_nodes)
- self.check_json_get(f'/day?date={date}', exp)
- # post another Todo of adopting Process, no adopt with make_type=empty
- self.post_exp_day([exp], {'make_type': 'empty', 'new_todo': [1]})
- exp.lib_set('Todo', [exp.todo_as_dict(4, 1)])
- top_nodes += [{'todo': 4,
- 'seen': 0,
- 'children': []}]
- exp.force('top_nodes', top_nodes)
- self.check_json_get(f'/day?date={date}', exp)
-
- def test_POST_day_new_todo_order_commutative(self) -> None:
- """Check that order of 'new_todo' values in POST /day don't matter."""
- date, day_id = date_and_day_id(1)
- exp = ExpectedGetDay(day_id)
- self.post_exp_process([exp], {}, 2)
- self.post_exp_process([exp], {'new_top_step': 2}, 1)
- exp.lib_set('ProcessStep', [
- exp.procstep_as_dict(1, owner_id=1, step_process_id=2)])
- # make-full-day-post batch of Todos of both Processes in one order …,
- self.post_exp_day([exp], {'make_type': 'full', 'new_todo': [1, 2]})
- top_nodes: list[dict[str, Any]] = [{'todo': 1,
- 'seen': 0,
- 'children': [{'todo': 2,
- 'seen': 0,
- 'children': []}]}]
- exp.force('top_nodes', top_nodes)
- exp.lib_get('Todo', 1)['children'] = [2]
- self.check_json_get(f'/day?date={date}', exp)
- # … and then in the other, expecting same node tree / relations
- exp.lib_del('Day', day_id)
- date, day_id = date_and_day_id(2)
- exp.set('day', day_id)
- day_post = {'make_type': 'full', 'new_todo': [2, 1]}
- self.post_exp_day([exp], day_post, day_id)
- exp.lib_del('Todo', 1)
- exp.lib_del('Todo', 2)
- top_nodes[0]['todo'] = 3 # was: 1
- top_nodes[0]['children'][0]['todo'] = 4 # was: 2
- exp.lib_get('Todo', 3)['children'] = [4]
- self.check_json_get(f'/day?date={date}', exp)
-
- def test_POST_day_todo_deletion_by_negative_effort(self) -> None:
- """Test POST /day removal of Todos by setting negative effort."""
- date, day_id = date_and_day_id(1)
- exp = ExpectedGetDay(day_id)
- self.post_exp_process([exp], {}, 1)
- self.post_exp_day([exp], {'new_todo': [1]})
- # check cannot remove Todo if commented
- self.post_exp_day([exp],
- {'todo_id': [1], 'comment': ['foo'], 'effort': [-1]})
- self.check_json_get(f'/day?date={date}', exp)
- # check *can* remove Todo while getting done
- self.post_exp_day([exp],
- {'todo_id': [1], 'comment': [''], 'effort': [-1],
- 'done': [1]})
- exp.lib_del('Todo', 1)
- self.check_json_get(f'/day?date={date}', exp)
-
- def test_GET_day_with_conditions(self) -> None:
- """Test GET /day displaying Conditions and their relations."""
- date, day_id = date_and_day_id(1)
- exp = ExpectedGetDay(day_id)
- # check non-referenced Conditions not shown
- cond_posts = [{'is_active': 0, 'title': 'A', 'description': 'a'},
- {'is_active': 1, 'title': 'B', 'description': 'b'}]
- for i, cond_post in enumerate(cond_posts):
- self.check_post(cond_post, f'/condition?id={i+1}')
- self.check_json_get(f'/day?date={date}', exp)
- # add Processes with Conditions, check Conditions now shown
- for i, (c1, c2) in enumerate([(1, 2), (2, 1)]):
- post = {'conditions': [c1], 'disables': [c1],
- 'blockers': [c2], 'enables': [c2]}
- self.post_exp_process([exp], post, i+1)
- for i, cond_post in enumerate(cond_posts):
- exp.set_cond_from_post(i+1, cond_post)
- self.check_json_get(f'/day?date={date}', exp)
- # add Todos in relation to Conditions, check consequence relations
- self.post_exp_day([exp], {'new_todo': [1, 2]})
- self.check_json_get(f'/day?date={date}', exp)
-
- def test_GET_calendar(self) -> None:
- """Test GET /calendar responses based on various inputs, DB states."""
- # check illegal date range delimiters
- self.check_get('/calendar?start=foo', 400)
- self.check_get('/calendar?end=foo', 400)
- # check default range for expected selection/order without saved days
- exp = ExpectedGetCalendar(-1, 366)
- self.check_json_get('/calendar', exp)
- self.check_json_get('/calendar?start=&end=', exp)
- # check with named days as delimiters
- exp = ExpectedGetCalendar(-1, +1)
- self.check_json_get('/calendar?start=yesterday&end=tomorrow', exp)
- # check zero-element range
- exp = ExpectedGetCalendar(+1, 0)
- self.check_json_get('/calendar?start=tomorrow&end=today', exp)
- # check saved day shows up in results, proven by its comment
- start_date = _testing_date_in_n_days(-5)
- date = _testing_date_in_n_days(-2)
- end_date = _testing_date_in_n_days(+5)
- exp = ExpectedGetCalendar(-5, +5)
- self.post_exp_day([exp],
- {'day_comment': 'foo'}, _days_n_for_date(date))
- url = f'/calendar?start={start_date}&end={end_date}'
- self.check_json_get(url, exp)
+++ /dev/null
-"""Miscellaneous tests."""
-from typing import Callable
-from unittest import TestCase
-from tests.utils import TestCaseWithServer
-from plomtask.http import InputsParser
-from plomtask.exceptions import BadFormatException
-
-
-class TestsSansServer(TestCase):
- """Tests that do not require DB setup or a server."""
-
- def _test_parser(self,
- method: Callable,
- serialized: str,
- expected: object,
- method_args: list[object],
- fails: bool = False
- ) -> None:
- # pylint: disable=too-many-arguments
- parser = InputsParser(serialized)
- if fails:
- with self.assertRaises(BadFormatException):
- method(parser, *method_args)
- else:
- self.assertEqual(expected, method(parser, *method_args))
-
- def test_InputsParser_get_str_or_fail(self) -> None:
- """Test InputsParser.get_str."""
- m = InputsParser.get_str_or_fail
- self._test_parser(m, '', 0, ['foo'], fails=True)
- self._test_parser(m, '', 'bar', ['foo', 'bar'])
- self._test_parser(m, 'foo=', '', ['foo'])
- self._test_parser(m, 'foo=', '', ['foo', 'bar'])
- self._test_parser(m, 'foo=baz', 'baz', ['foo', 'bar'])
- self._test_parser(m, 'foo=baz&foo=quux', 'baz', ['foo', 'bar'])
- self._test_parser(m, 'foo=baz,quux', 'baz,quux', ['foo', 'bar'])
-
- def test_InputsParser_get_str(self) -> None:
- """Test InputsParser.get_str."""
- m = InputsParser.get_str
- self._test_parser(m, '', None, ['foo'])
- self._test_parser(m, '', 'bar', ['foo', 'bar'])
- self._test_parser(m, 'foo=', '', ['foo'])
- self._test_parser(m, 'foo=', '', ['foo', 'bar'])
- self._test_parser(m, 'foo=baz', 'baz', ['foo', 'bar'])
- self._test_parser(m, 'foo=baz&foo=quux', 'baz', ['foo', 'bar'])
- self._test_parser(m, 'foo=baz,quux', 'baz,quux', ['foo', 'bar'])
-
- def test_InputsParser_get_all_of_key_prefixed(self) -> None:
- """Test InputsParser.get_all_of_key_prefixed."""
- m = InputsParser.get_all_of_key_prefixed
- self._test_parser(m, '', {}, [''])
- self._test_parser(m, '', {}, ['foo'])
- self._test_parser(m, 'foo=bar', {'foo': ['bar']}, [''])
- self._test_parser(m, 'x=y&x=z', {'': ['y', 'z']}, ['x'])
- self._test_parser(m, 'xx=y&xx=Z', {'x': ['y', 'Z']}, ['x'])
- self._test_parser(m, 'xx=y', {}, ['xxx'])
- self._test_parser(m, 'xxx=x&xxy=y&xyy=z', {'x': ['x'], 'y': ['y']},
- ['xx'])
-
- def test_InputsParser_get_int_or_none(self) -> None:
- """Test InputsParser.get_int_or_none."""
- m = InputsParser.get_int_or_none
- self._test_parser(m, '', None, ['foo'])
- self._test_parser(m, 'foo=', None, ['foo'])
- self._test_parser(m, 'foo=0', 0, ['foo'])
- self._test_parser(m, 'foo=None', 0, ['foo'], fails=True)
- self._test_parser(m, 'foo=0.1', 0, ['foo'], fails=True)
- self._test_parser(m, 'foo=23', 23, ['foo'])
-
- def test_InputsParser_get_float_or_fail(self) -> None:
- """Test InputsParser.get_float_or_fail."""
- m = InputsParser.get_float_or_fail
- self._test_parser(m, '', 0, ['foo'], fails=True)
- self._test_parser(m, 'foo=', 0, ['foo'], fails=True)
- self._test_parser(m, 'foo=bar', 0, ['foo'], fails=True)
- self._test_parser(m, 'foo=0', 0, ['foo'])
- self._test_parser(m, 'foo=0.1', 0.1, ['foo'])
- self._test_parser(m, 'foo=1.23&foo=456', 1.23, ['foo'])
-
- def test_InputsParser_get_bool(self) -> None:
- """Test InputsParser.get_bool."""
- m = InputsParser.get_bool
- self._test_parser(m, '', 0, ['foo'])
- self._test_parser(m, 'val=foo', 0, ['foo'])
- self._test_parser(m, 'val=True', 0, ['foo'])
- self._test_parser(m, 'foo=', 0, ['foo'])
- self._test_parser(m, 'foo=None', 0, ['foo'])
- self._test_parser(m, 'foo=0', 0, ['foo'])
- self._test_parser(m, 'foo=bar', 0, ['foo'])
- self._test_parser(m, 'foo=bar&foo=baz', 0, ['foo'])
- self._test_parser(m, 'foo=False', 0, ['foo'])
- self._test_parser(m, 'foo=true', 1, ['foo'])
- self._test_parser(m, 'foo=True', 1, ['foo'])
- self._test_parser(m, 'foo=1', 1, ['foo'])
- self._test_parser(m, 'foo=on', 1, ['foo'])
-
- def test_InputsParser_get_all_str(self) -> None:
- """Test InputsParser.get_all_str."""
- m = InputsParser.get_all_str
- self._test_parser(m, '', [], ['foo'])
- self._test_parser(m, 'foo=', [''], ['foo'])
- self._test_parser(m, 'foo=bar', ['bar'], ['foo'])
- self._test_parser(m, 'foo=bar&foo=baz', ['bar', 'baz'], ['foo'])
-
- def test_InputsParser_get_all_int(self) -> None:
- """Test InputsParser.get_all_int."""
- m = InputsParser.get_all_int
- self._test_parser(m, '', [], ['foo'])
- self._test_parser(m, 'foo=', [], ['foo'])
- self._test_parser(m, 'foo=', 0, ['foo', True], fails=True)
- self._test_parser(m, 'foo=0', [0], ['foo'])
- self._test_parser(m, 'foo=0&foo=17', [0, 17], ['foo'])
- self._test_parser(m, 'foo=0.1&foo=17', 0, ['foo'], fails=True)
- self._test_parser(m, 'foo=None&foo=17', 0, ['foo'], fails=True)
-
-
-class TestsWithServer(TestCaseWithServer):
- """Tests against our HTTP server/handler (and database)."""
-
- def test_do_GET(self) -> None:
- """Test GET / redirect, and unknown targets failing."""
- self.conn.request('GET', '/')
- self.check_redirect('/day')
- self.check_get('/foo', 404)
-
- def test_do_POST(self) -> None:
- """Test POST to / and other unknown targets failing."""
- self.check_post({}, '/', 404)
- self.check_post({}, '/foo', 404)
+++ /dev/null
-"""Test Processes module."""
-from typing import Any
-from tests.utils import (TestCaseSansDB, TestCaseWithDB, TestCaseWithServer,
- Expected)
-from plomtask.processes import Process, ProcessStep
-from plomtask.exceptions import NotFoundException
-
-
-class TestsSansDB(TestCaseSansDB):
- """Module tests not requiring DB setup."""
- checked_class = Process
-
-
-class TestsSansDBProcessStep(TestCaseSansDB):
- """Module tests not requiring DB setup."""
- checked_class = ProcessStep
- default_init_kwargs = {'owner_id': 2, 'step_process_id': 3,
- 'parent_step_id': 4}
-
-
-class TestsWithDB(TestCaseWithDB):
- """Module tests requiring DB setup."""
- checked_class = Process
-
- def test_remove(self) -> None:
- """Test removal of Processes and ProcessSteps."""
- super().test_remove()
- p1, p2, p3 = Process(None), Process(None), Process(None)
- for p in [p1, p2, p3]:
- p.save(self.db_conn)
- assert isinstance(p1.id_, int)
- assert isinstance(p2.id_, int)
- assert isinstance(p3.id_, int)
- step = ProcessStep(None, p2.id_, p1.id_, None)
- p2.set_steps(self.db_conn, [step])
- step_id = step.id_
- p2.set_steps(self.db_conn, [])
- with self.assertRaises(NotFoundException):
- # check unset ProcessSteps actually cannot be found anymore
- assert step_id is not None
- ProcessStep.by_id(self.db_conn, step_id)
- p1.remove(self.db_conn)
- step = ProcessStep(None, p2.id_, p3.id_, None)
- p2.set_steps(self.db_conn, [step])
- step_id = step.id_
- # check _can_ remove Process pointed to by ProcessStep.owner_id, and …
- p2.remove(self.db_conn)
- with self.assertRaises(NotFoundException):
- # … being dis-owned eliminates ProcessStep
- assert step_id is not None
- ProcessStep.by_id(self.db_conn, step_id)
-
-
-class TestsWithDBForProcessStep(TestCaseWithDB):
- """Module tests requiring DB setup."""
- checked_class = ProcessStep
- default_init_kwargs = {'owner_id': 1, 'step_process_id': 2,
- 'parent_step_id': 3}
-
- def setUp(self) -> None:
- super().setUp()
- self.p1 = Process(1)
- self.p1.save(self.db_conn)
-
- def test_remove(self) -> None:
- """Test .remove and unsetting of owner's .explicit_steps entry."""
- p2 = Process(2)
- p2.save(self.db_conn)
- assert isinstance(self.p1.id_, int)
- assert isinstance(p2.id_, int)
- step = ProcessStep(None, self.p1.id_, p2.id_, None)
- self.p1.set_steps(self.db_conn, [step])
- step.remove(self.db_conn)
- self.assertEqual(self.p1.explicit_steps, [])
- self.check_identity_with_cache_and_db([])
-
-
-class ExpectedGetProcess(Expected):
- """Builder of expectations for GET /processes."""
- _default_dict = {'is_new': False, 'preset_top_step': None, 'n_todos': 0}
- _on_empty_make_temp = ('Process', 'proc_as_dict')
-
- def __init__(self,
- proc_id: int,
- *args: Any, **kwargs: Any) -> None:
- self._fields = {'process': proc_id, 'steps': []}
- super().__init__(*args, **kwargs)
-
- @staticmethod
- def stepnode_as_dict(step_id: int,
- proc_id: int,
- seen: bool = False,
- steps: None | list[dict[str, object]] = None,
- is_explicit: bool = True,
- is_suppressed: bool = False) -> dict[str, object]:
- # pylint: disable=too-many-arguments
- """Return JSON of ProcessStepNode to expect."""
- return {'step': step_id,
- 'process': proc_id,
- 'seen': seen,
- 'steps': steps if steps else [],
- 'is_explicit': is_explicit,
- 'is_suppressed': is_suppressed}
-
- def recalc(self) -> None:
- """Update internal dictionary by subclass-specific rules."""
- super().recalc()
- self._fields['process_candidates'] = self.as_ids(
- self.lib_all('Process'))
- self._fields['condition_candidates'] = self.as_ids(
- self.lib_all('Condition'))
- self._fields['owners'] = [
- s['owner_id'] for s in self.lib_all('ProcessStep')
- if s['step_process_id'] == self._fields['process']]
-
-
-class ExpectedGetProcesses(Expected):
- """Builder of expectations for GET /processes."""
- _default_dict = {'sort_by': 'title', 'pattern': ''}
-
- def recalc(self) -> None:
- """Update internal dictionary by subclass-specific rules."""
- super().recalc()
- self._fields['processes'] = self.as_ids(self.lib_all('Process'))
-
-
-class TestsWithServer(TestCaseWithServer):
- """Module tests against our HTTP server/handler (and database)."""
- checked_class = Process
-
- def test_fail_POST_process(self) -> None:
- """Test POST /process and its effect on the database."""
- valid_post = {'title': '', 'description': '', 'effort': 1.0}
- # check payloads lacking minimum expecteds
- self.check_minimal_inputs('/process', valid_post)
- # check payloads of bad data types
- self.check_post(valid_post | {'effort': ''}, '/process', 400)
- # check references to non-existant items
- self.check_post(valid_post | {'conditions': [1]}, '/process', 404)
- self.check_post(valid_post | {'disables': [1]}, '/process', 404)
- self.check_post(valid_post | {'blockers': [1]}, '/process', 404)
- self.check_post(valid_post | {'enables': [1]}, '/process', 404)
- self.check_post(valid_post | {'new_top_step': 2}, '/process', 404)
- # check deletion of non-existant
- self.check_post({'delete': ''}, '/process?id=1', 404)
-
- def test_basic_POST_process(self) -> None:
- """Test basic GET/POST /process operations."""
- # check on un-saved
- exp = ExpectedGetProcess(1)
- exp.force('process_candidates', [])
- exp.set('is_new', True)
- self.check_json_get('/process?id=1', exp)
- # check on minimal payload post
- exp = ExpectedGetProcess(1)
- self.post_exp_process([exp], {}, 1)
- self.check_json_get('/process?id=1', exp)
- # check boolean 'calendarize'
- self.post_exp_process([exp], {'calendarize': True}, 1)
- self.check_json_get('/process?id=1', exp)
- self.post_exp_process([exp], {}, 1)
- self.check_json_get('/process?id=1', exp)
- # check conditions posting
- for i in range(3):
- self.post_exp_cond([exp], {}, i+1)
- p = {'conditions': [1, 2], 'disables': [1],
- 'blockers': [3], 'enables': [2, 3]}
- self.post_exp_process([exp], p, 1)
- self.check_json_get('/process?id=1', exp)
- # check n_todos field
- self.post_exp_day([], {'new_todo': ['1']}, 1)
- self.post_exp_day([], {'new_todo': ['1']}, 2)
- exp.set('n_todos', 2)
- self.check_json_get('/process?id=1', exp)
- # check cannot delete if Todos to Process
- self.check_post({'delete': ''}, '/process?id=1', 500)
- # check cannot delete if some ProcessStep's .step_process_id
- self.post_exp_process([exp], {}, 2)
- self.post_exp_process([exp], {'new_top_step': 2}, 3)
- self.check_post({'delete': ''}, '/process?id=2', 500)
- # check successful deletion
- self.post_exp_process([exp], {}, 4)
- self.check_post({'delete': ''}, '/process?id=4', 302, '/processes')
- exp = ExpectedGetProcess(4)
- exp.set('is_new', True)
- for i in range(3):
- self.post_exp_cond([exp], {}, i+1)
- self.post_exp_process([exp], {}, i+1)
- exp.force('process_candidates', [1, 2, 3])
- self.check_json_get('/process?id=4', exp)
-
- def test_POST_process_steps(self) -> None:
- """Test behavior of ProcessStep posting."""
- # pylint: disable=too-many-statements
- url = '/process?id=1'
- exp = ExpectedGetProcess(1)
- self.post_exp_process([exp], {}, 1)
- # post first (top-level) step of proc2 to proc1 by 'step_of' in 2
- self.post_exp_process([exp], {'step_of': 1}, 2)
- exp.lib_set('ProcessStep',
- [exp.procstep_as_dict(1, owner_id=1, step_process_id=2)])
- exp.set('steps', [
- exp.stepnode_as_dict(
- step_id=1,
- proc_id=2)])
- self.check_json_get(url, exp)
- # post empty/absent steps list to process, expect clean slate, and old
- # step to completely disappear
- self.post_exp_process([exp], {}, 1)
- exp.lib_wipe('ProcessStep')
- exp.set('steps', [])
- self.check_json_get(url, exp)
- # post anew (as only step yet) step of proc2 to proc1 by 'new_top_step'
- self.post_exp_process([exp], {'new_top_step': 2}, 1)
- exp.lib_set('ProcessStep',
- [exp.procstep_as_dict(1, owner_id=1, step_process_id=2)])
- self.post_exp_process([exp], {'kept_steps': [1]}, 1)
- step_nodes = [exp.stepnode_as_dict(step_id=1, proc_id=2)]
- exp.set('steps', step_nodes)
- self.check_json_get(url, exp)
- # fail on zero-step recursion
- p_min = {'title': '', 'description': '', 'effort': 0}
- self.check_post(p_min | {'new_top_step': 1}, url, 400)
- self.check_post(p_min | {'step_of': 1}, url, 400)
- # post sibling steps
- self.post_exp_process([exp], {}, 3)
- self.post_exp_process([exp], {'kept_steps': [1], 'new_top_step': 3}, 1)
- exp.lib_set('ProcessStep',
- [exp.procstep_as_dict(2, owner_id=1, step_process_id=3)])
- step_nodes += [exp.stepnode_as_dict(step_id=2, proc_id=3)]
- self.check_json_get(url, exp)
- # # post implicit sub-step via post to proc2
- self.post_exp_process([exp], {}, 4)
- self.post_exp_process([exp], {'step_of': [1], 'new_top_step': 4}, 2)
- exp.lib_set('ProcessStep',
- [exp.procstep_as_dict(3, owner_id=2, step_process_id=4)])
- step_nodes[0]['steps'] = [
- exp.stepnode_as_dict(step_id=3, proc_id=4, is_explicit=False)]
- self.check_json_get(url, exp)
- # post explicit sub-step via post to proc1
- p = {'kept_steps': [1, 2], 'new_step_to_2': 4}
- self.post_exp_process([exp], p, 1)
- exp.lib_set('ProcessStep', [exp.procstep_as_dict(
- 4, owner_id=1, step_process_id=4, parent_step_id=2)])
- step_nodes[1]['steps'] = [
- exp.stepnode_as_dict(step_id=4, proc_id=4)]
- self.check_json_get(url, exp)
- # to ensure suppressed step nodes are hidden, add new step to proc4,
- # implicitly adding it as sub-step to the proc4 steps in proc1, but
- # suppress one of the proc4 occurences there, marking its
- # .is_suppressed *and* hiding the new step below it
- p = {'kept_steps': [1, 2, 4], 'suppressed_steps': [3]}
- self.post_exp_process([exp], {'step_of': [4]}, 5)
- self.post_exp_process([exp], p, 1)
- exp.lib_set('ProcessStep',
- [exp.procstep_as_dict(5, owner_id=4, step_process_id=5)])
- assert isinstance(step_nodes[0]['steps'], list)
- assert isinstance(step_nodes[1]['steps'], list)
- step_nodes[0]['steps'][0]['is_suppressed'] = True
- step_nodes[1]['steps'][0]['steps'] = [
- exp.stepnode_as_dict(step_id=5, proc_id=5, is_explicit=False)]
- self.check_json_get(url, exp)
- # ensure implicit steps' non-top explicit sub-steps are shown
- self.post_exp_process([exp], {}, 6)
- self.post_exp_process([exp], {'kept_steps': [5], 'step_of': [1, 2],
- 'new_step_to_5': 6}, 4)
- exp.lib_set('ProcessStep', [exp.procstep_as_dict(
- 6, owner_id=4, parent_step_id=5, step_process_id=6)])
- step_nodes[1]['steps'][0]['steps'][0]['steps'] = [
- exp.stepnode_as_dict(step_id=6, proc_id=6, is_explicit=False)]
- self.check_json_get(url, exp)
- # try to post sub-step to non-existing sub-step, expect it to become
- # top-level step instead
- p['new_step_to_9'] = 5
- self.post_exp_process([exp], p, 1)
- exp.lib_set('ProcessStep',
- [exp.procstep_as_dict(7, owner_id=1, step_process_id=5)])
- step_nodes += [
- exp.stepnode_as_dict(step_id=7, proc_id=5)]
- self.check_json_get(url, exp)
- del p['new_step_to_9']
- assert isinstance(p['kept_steps'], list)
- p['kept_steps'] += [7]
- # try to post sub-step to implicit sub-step, expect same result
- p['new_step_to_5'] = 5
- self.post_exp_process([exp], p, 1)
- exp.lib_set('ProcessStep',
- [exp.procstep_as_dict(8, owner_id=1, step_process_id=5)])
- step_nodes += [
- exp.stepnode_as_dict(step_id=8, proc_id=5)]
- self.check_json_get(url, exp)
- del p['new_step_to_5']
- p['kept_steps'] += [8]
- # post sub-step to explicit sub-step with implicit sub-step of same
- # step process ID, expect it to eliminate/replace implicit sub-step
- p['new_step_to_4'] = 5
- self.post_exp_process([exp], p, 1)
- step_nodes[1]['steps'][0]['steps'][0] = exp.stepnode_as_dict(
- step_id=9, proc_id=5)
- exp.lib_set('ProcessStep', [exp.procstep_as_dict(
- 9, owner_id=1, parent_step_id=4, step_process_id=5)])
- self.check_json_get(url, exp)
- del p['new_step_to_4']
- p['kept_steps'] += [9]
- # fail on single-step recursion via top step
- self.post_exp_process([exp], {}, 7)
- self.post_exp_process([exp], {'new_top_step': 1}, 7)
- exp.lib_set('ProcessStep', [exp.procstep_as_dict(
- 10, owner_id=7, step_process_id=1)])
- p['step_of'] = [7]
- self.check_post(p_min | p | {'new_top_step': 7}, url, 400)
- # fail on double-step recursion via top step
- self.post_exp_process([exp], {}, 8)
- self.post_exp_process([exp], {'new_top_step': 7}, 8)
- exp.lib_set('ProcessStep', [exp.procstep_as_dict(
- 11, owner_id=8, step_process_id=7)])
- self.check_post(p_min | p | {'new_top_step': 8}, url, 400)
- # fail on single- and double-step recursion via explicit sub-step
- self.check_post(p_min | p | {'new_step_to_8': 7}, url, 400)
- self.check_post(p_min | p | {'new_step_to_8': 8}, url, 400)
-
- def test_fail_GET_process(self) -> None:
- """Test invalid GET /process params."""
- # check for invalid IDs
- self.check_get_defaults('/process')
- # check we catch invalid base64
- self.check_get('/process?title_b64=foo', 400)
- # check failure on references to unknown processes; we create Process
- # of ID=1 here so we know the 404 comes from step_to=2 etc. (that tie
- # the Process displayed by /process to others), not from not finding
- # the main Process itself
- self.post_exp_process([], {}, 1)
- self.check_get('/process?id=1&step_to=2', 404)
- self.check_get('/process?id=1&has_step=2', 404)
-
- def test_GET_processes(self) -> None:
- """Test GET /processes."""
- # pylint: disable=too-many-statements
- # test empty result on empty DB, default-settings on empty params
- exp = ExpectedGetProcesses()
- self.check_json_get('/processes', exp)
- # test on meaningless non-empty params (incl. entirely un-used key),
- # that 'sort_by' default to 'title' (even if set to something else, as
- # long as without handler) and 'pattern' get preserved
- exp.set('pattern', 'bar')
- url = '/processes?sort_by=foo&pattern=bar&foo=x'
- self.check_json_get(url, exp)
- # test non-empty result, automatic (positive) sorting by title
- for i, t in enumerate([('foo', 'oof', 1.0, []),
- ('bar', 'rab', 1.1, [1]),
- ('baz', 'zab', 0.9, [1, 2])]):
- payload = {'title': t[0], 'description': t[1], 'effort': t[2],
- 'new_top_step': t[3]}
- self.post_exp_process([exp], payload, i+1)
- exp.lib_set('ProcessStep',
- [exp.procstep_as_dict(1, owner_id=2, step_process_id=1),
- exp.procstep_as_dict(2, owner_id=3, step_process_id=1),
- exp.procstep_as_dict(3, owner_id=3, step_process_id=2)])
- exp.set('pattern', '')
- self.check_filter(exp, 'processes', 'sort_by', 'title', [2, 3, 1])
- # test other sortings
- self.check_filter(exp, 'processes', 'sort_by', '-title', [1, 3, 2])
- self.check_filter(exp, 'processes', 'sort_by', 'effort', [3, 1, 2])
- self.check_filter(exp, 'processes', 'sort_by', '-effort', [2, 1, 3])
- self.check_filter(exp, 'processes', 'sort_by', 'steps', [1, 2, 3])
- self.check_filter(exp, 'processes', 'sort_by', '-steps', [3, 2, 1])
- self.check_filter(exp, 'processes', 'sort_by', 'owners', [3, 2, 1])
- self.check_filter(exp, 'processes', 'sort_by', '-owners', [1, 2, 3])
- # test pattern matching on title
- exp.set('sort_by', 'title')
- exp.lib_del('Process', 1)
- self.check_filter(exp, 'processes', 'pattern', 'ba', [2, 3])
- # test pattern matching on description
- exp.lib_wipe('Process')
- exp.lib_wipe('ProcessStep')
- self.post_exp_process([exp], {'description': 'oof', 'effort': 1.0}, 1)
- self.check_filter(exp, 'processes', 'pattern', 'of', [1])
+++ /dev/null
-"""Test Todos module."""
-from typing import Any
-from datetime import date as dt_date, timedelta
-from tests.utils import (TestCaseSansDB, TestCaseWithDB, TestCaseWithServer,
- Expected, date_and_day_id)
-from plomtask.todos import Todo
-from plomtask.processes import Process
-from plomtask.exceptions import BadFormatException, HandledException
-
-
-class TestsWithDB(TestCaseWithDB, TestCaseSansDB):
- """Tests requiring DB, but not server setup.
-
- NB: We subclass TestCaseSansDB too, to run any tests there that due to any
- Todo requiring a _saved_ Process wouldn't run without a DB.
- """
- checked_class = Todo
- default_init_kwargs = {'process': None, 'is_done': False, 'day_id': 1}
-
- def setUp(self) -> None:
- super().setUp()
- self.proc = Process(None)
- self.proc.save(self.db_conn)
- self.default_init_kwargs['process'] = self.proc
-
- def test_Todo_by_date(self) -> None:
- """Test findability of Todos by date."""
- date_1, day_id_1 = date_and_day_id(1)
- date_2, _ = date_and_day_id(2)
- t1 = Todo(None, self.proc, False, day_id_1)
- t1.save(self.db_conn)
- t2 = Todo(None, self.proc, False, day_id_1)
- t2.save(self.db_conn)
- self.assertEqual(Todo.by_date(self.db_conn, date_1), [t1, t2])
- self.assertEqual(Todo.by_date(self.db_conn, date_2), [])
- with self.assertRaises(BadFormatException):
- self.assertEqual(Todo.by_date(self.db_conn, 'foo'), [])
-
- def test_Todo_by_date_range_with_limits(self) -> None:
- """Test .by_date_range_with_limits."""
- # pylint: disable=too-many-locals
- f = Todo.by_date_range_with_limits
- # check illegal ranges
- legal_range = ('yesterday', 'tomorrow')
- for i in [0, 1]:
- for bad_date in ['foo', '2024-02-30', '2024-01-01 12:00:00']:
- date_range_l = list(legal_range[:])
- date_range_l[i] = bad_date
- with self.assertRaises(HandledException):
- f(self.db_conn, (date_range_l[0], date_range_l[1]))
- # check empty, translation of 'yesterday' and 'tomorrow'
- items, start, end = f(self.db_conn, legal_range)
- self.assertEqual(items, [])
- dt_today = dt_date.today()
- dt_yesterday = dt_today + timedelta(days=-1)
- dt_tomorrow = dt_today + timedelta(days=+1)
- self.assertEqual(start, dt_yesterday.isoformat())
- self.assertEqual(end, dt_tomorrow.isoformat())
- # prepare dated items for non-empty results
- kwargs = self.default_init_kwargs.copy()
- todos = []
- dates_and_day_ids = [date_and_day_id(i) for i in range(5)]
- for day_id in [t[1] for t in dates_and_day_ids[1:-1]]:
- kwargs['day_id'] = day_id
- todos += [Todo(None, **kwargs)]
- # check ranges still empty before saving
- date_range = (dates_and_day_ids[1][0], dates_and_day_ids[-2][0])
- self.assertEqual(f(self.db_conn, date_range)[0], [])
- # check all objs displayed within interval
- for todo in todos:
- todo.save(self.db_conn)
- self.assertEqual(f(self.db_conn, date_range)[0], todos)
- # check that only displayed what exists within interval
- date_range = (dates_and_day_ids[1][0], dates_and_day_ids[-3][0])
- expected = [todos[0], todos[1]]
- self.assertEqual(f(self.db_conn, date_range)[0], expected)
- date_range = (dates_and_day_ids[-2][0], dates_and_day_ids[-1][0])
- expected = [todos[2]]
- self.assertEqual(f(self.db_conn, date_range)[0], expected)
- # check that inverted interval displays nothing
- date_range = (dates_and_day_ids[-1][0], dates_and_day_ids[0][0])
- self.assertEqual(f(self.db_conn, date_range)[0], [])
- # check that "today" is interpreted, and single-element interval
- kwargs['day_id'] = (dt_today - dt_date(2000, 1, 1)).days
- todo_today = Todo(None, **kwargs)
- todo_today.save(self.db_conn)
- date_range = ('today', 'today')
- items, start, end = f(self.db_conn, date_range)
- self.assertEqual(start, dt_today.isoformat())
- self.assertEqual(start, end)
- self.assertEqual(items, [todo_today])
-
- def test_Todo_children(self) -> None:
- """Test Todo.children relations."""
- todo_1 = Todo(None, self.proc, False, 1)
- todo_2 = Todo(None, self.proc, False, 1)
- todo_2.save(self.db_conn)
- # check un-saved Todo cannot parent
- with self.assertRaises(HandledException):
- todo_1.add_child(todo_2)
- todo_1.save(self.db_conn)
- todo_3 = Todo(None, self.proc, False, 1)
- # check un-saved Todo cannot be parented
- with self.assertRaises(HandledException):
- todo_1.add_child(todo_3)
-
-
-class ExpectedGetTodo(Expected):
- """Builder of expectations for GET /todo."""
-
- def __init__(self,
- todo_id: int,
- *args: Any, **kwargs: Any) -> None:
- self._fields = {'todo': todo_id,
- 'steps_todo_to_process': []}
- super().__init__(*args, **kwargs)
-
- def recalc(self) -> None:
- """Update internal dictionary by subclass-specific rules."""
-
- def walk_steps(step: dict[str, Any]) -> None:
- if not step['todo']:
- proc_id = step['process']
- cands = self.as_ids(
- [t for t in todos if proc_id == t['process_id']
- and t['id'] in self._fields['todo_candidates']])
- self._fields['adoption_candidates_for'][str(proc_id)] = cands
- for child in step['children']:
- walk_steps(child)
-
- super().recalc()
- self.lib_wipe('Day')
- todos = self.lib_all('Todo')
- procs = self.lib_all('Process')
- conds = self.lib_all('Condition')
- self._fields['todo_candidates'] = self.as_ids(
- [t for t in todos if t['id'] != self._fields['todo']])
- self._fields['process_candidates'] = self.as_ids(procs)
- self._fields['condition_candidates'] = self.as_ids(conds)
- self._fields['adoption_candidates_for'] = {}
- for step in self._fields['steps_todo_to_process']:
- walk_steps(step)
-
- @staticmethod
- def step_as_dict(node_id: int,
- process: int | None = None,
- todo: int | None = None,
- fillable: bool = False,
- children: None | list[dict[str, object]] = None
- ) -> dict[str, object]:
- """Return JSON of TodoOrProcStepsNode to expect."""
- return {'node_id': node_id,
- 'children': children if children is not None else [],
- 'process': process,
- 'fillable': fillable,
- 'todo': todo}
-
-
-class TestsWithServer(TestCaseWithServer):
- """Tests against our HTTP server/handler (and database)."""
- checked_class = Todo
-
- def test_basic_fail_POST_todo(self) -> None:
- """Test basic malformed/illegal POST /todo requests."""
- self.post_exp_process([], {}, 1)
- # test we cannot just POST into non-existing Todo
- self.check_post({}, '/todo', 404)
- self.check_post({}, '/todo?id=FOO', 400)
- self.check_post({}, '/todo?id=0', 400)
- self.check_post({}, '/todo?id=1', 404)
- # test malformed values on existing Todo
- self.post_exp_day([], {'new_todo': [1]})
- for name in ['adopt', 'effort', 'make_full', 'make_empty',
- 'conditions', 'disables', 'blockers', 'enables']:
- self.check_post({name: 'x'}, '/todo?id=1', 400, '/todo')
- for prefix in ['make_', '']:
- for suffix in ['', 'x', '1.1']:
- self.check_post({'step_filler_to_1': [f'{prefix}{suffix}']},
- '/todo?id=1', 400, '/todo')
- for suffix in ['', 'x', '1.1']:
- self.check_post({'step_filler_to_{suffix}': ['1']},
- '/todo?id=1', 400, '/todo')
-
- def test_basic_POST_todo(self) -> None:
- """Test basic POST /todo manipulations."""
- exp = ExpectedGetTodo(1)
- self.post_exp_process([exp], {'calendarize': 0}, 1)
- self.post_exp_day([exp], {'new_todo': [1]})
- # test posting naked entity at first changes nothing
- self.check_json_get('/todo?id=1', exp)
- self.check_post({}, '/todo?id=1')
- self.check_json_get('/todo?id=1', exp)
- # test posting doneness, comment, calendarization, effort
- todo_post = {'is_done': 1, 'calendarize': 1,
- 'comment': 'foo', 'effort': 2.3}
- self.post_exp_todo([exp], todo_post, 1)
- self.check_json_get('/todo?id=1', exp)
- # test implicitly un-setting comment/calendarize/is_done by empty post
- self.post_exp_todo([exp], {}, 1)
- self.check_json_get('/todo?id=1', exp)
- # test effort post can be explicitly unset by "effort":"" post
- self.check_post({'effort': ''}, '/todo?id=1')
- exp.lib_get('Todo', 1)['effort'] = None
- self.check_json_get('/todo?id=1', exp)
- # test Condition posts
- c1_post = {'title': 'foo', 'description': 'oof', 'is_active': 0}
- c2_post = {'title': 'bar', 'description': 'rab', 'is_active': 1}
- self.post_exp_cond([exp], c1_post, 1)
- self.post_exp_cond([exp], c2_post, 2)
- self.check_json_get('/todo?id=1', exp)
- todo_post = {'conditions': [1], 'disables': [1],
- 'blockers': [2], 'enables': [2]}
- self.post_exp_todo([exp], todo_post, 1)
- self.check_json_get('/todo?id=1', exp)
-
- def test_POST_todo_deletion(self) -> None:
- """Test deletions via POST /todo."""
- exp = ExpectedGetTodo(1)
- self.post_exp_process([exp], {}, 1)
- # test failure of deletion on non-existing Todo
- self.check_post({'delete': ''}, '/todo?id=2', 404, '/')
- # test deletion of existing Todo
- self.post_exp_day([exp], {'new_todo': [1]})
- self.check_post({'delete': ''}, '/todo?id=1', 302, '/')
- self.check_get('/todo?id=1', 404)
- exp.lib_del('Todo', 1)
- # test deletion of adopted Todo
- self.post_exp_day([exp], {'new_todo': [1]})
- self.post_exp_day([exp], {'new_todo': [1]})
- self.check_post({'adopt': 2}, '/todo?id=1')
- self.check_post({'delete': ''}, '/todo?id=2', 302, '/')
- exp.lib_del('Todo', 2)
- self.check_get('/todo?id=2', 404)
- self.check_json_get('/todo?id=1', exp)
- # test deletion of adopting Todo
- self.post_exp_day([exp], {'new_todo': [1]})
- self.check_post({'adopt': 2}, '/todo?id=1')
- self.check_post({'delete': ''}, '/todo?id=1', 302, '/')
- exp.set('todo', 2)
- exp.lib_del('Todo', 1)
- self.check_json_get('/todo?id=2', exp)
- # test cannot delete Todo with comment or effort
- self.check_post({'comment': 'foo'}, '/todo?id=2')
- self.check_post({'delete': ''}, '/todo?id=2', 500, '/')
- self.check_post({'effort': 5}, '/todo?id=2')
- self.check_post({'delete': ''}, '/todo?id=2', 500, '/')
- # test deletion via effort < 0, but only if deletable
- self.check_post({'effort': -1, 'comment': 'foo'}, '/todo?id=2')
- self.check_post({}, '/todo?id=2')
- self.check_get('/todo?id=2', 404)
-
- def test_POST_todo_adoption(self) -> None:
- """Test adoption via POST /todo with "adopt"."""
- # post two Todos to Day, have first adopt second
- exp = ExpectedGetTodo(1)
- self.post_exp_process([exp], {}, 1)
- self.post_exp_day([exp], {'new_todo': [1]})
- self.post_exp_day([exp], {'new_todo': [1]})
- self.post_exp_todo([exp], {'adopt': 2}, 1)
- exp.set('steps_todo_to_process', [
- exp.step_as_dict(node_id=1, process=None, todo=2)])
- self.check_json_get('/todo?id=1', exp)
- # test Todo un-adopting by just not sending an adopt
- self.post_exp_todo([exp], {}, 1)
- exp.set('steps_todo_to_process', [])
- self.check_json_get('/todo?id=1', exp)
- # test fail on trying to adopt non-existing Todo
- self.check_post({'adopt': 3}, '/todo?id=1', 404)
- # test cannot self-adopt
- self.check_post({'adopt': 1}, '/todo?id=1', 400)
- # test cannot do 1-step circular adoption
- self.post_exp_todo([exp], {'adopt': 1}, 2)
- self.check_post({'adopt': 2}, '/todo?id=1', 400)
- # test cannot do 2-step circular adoption
- self.post_exp_day([exp], {'new_todo': [1]})
- self.post_exp_todo([exp], {'adopt': 2}, 3)
- self.check_post({'adopt': 3}, '/todo?id=1', 400)
- # test can adopt Todo into ProcessStep chain via its Process (with key
- # 'step_filler' equivalent to single-element 'adopt' if intable)
- self.post_exp_process([exp], {}, 2)
- self.post_exp_process([exp], {}, 3)
- self.post_exp_process([exp], {'new_top_step': [2, 3]}, 1)
- exp.lib_set('ProcessStep',
- [exp.procstep_as_dict(1, owner_id=1, step_process_id=2),
- exp.procstep_as_dict(2, owner_id=1, step_process_id=3)])
- slots = [
- exp.step_as_dict(node_id=1, process=2, todo=None, fillable=True),
- exp.step_as_dict(node_id=2, process=3, todo=None, fillable=True)]
- exp.set('steps_todo_to_process', slots)
- self.post_exp_day([exp], {'new_todo': [2]})
- self.post_exp_day([exp], {'new_todo': [3]})
- self.check_json_get('/todo?id=1', exp)
- self.post_exp_todo([exp], {'step_filler_to_1': 5, 'adopt': [4]}, 1)
- exp.lib_get('Todo', 1)['children'] += [5]
- slots[0]['todo'] = 4
- slots[1]['todo'] = 5
- self.check_json_get('/todo?id=1', exp)
- # test 'ignore' values for 'step_filler' are ignored, and intable
- # 'step_filler' values are interchangeable with those of 'adopt'
- todo_post = {'adopt': 5, 'step_filler_to_1': ['ignore', 4]}
- self.check_post(todo_post, '/todo?id=1')
- self.check_json_get('/todo?id=1', exp)
- # test cannot adopt into non-top-level elements of chain, instead
- # creating new top-level steps when adopting of respective Process
- self.post_exp_process([exp], {}, 4)
- self.post_exp_process([exp], {'new_top_step': 4, 'step_of': [1]}, 3)
- exp.lib_set('ProcessStep',
- [exp.procstep_as_dict(3, owner_id=3, step_process_id=4)])
- slots[1]['children'] = [exp.step_as_dict(
- node_id=3, process=4, todo=None, fillable=True)]
- self.post_exp_day([exp], {'new_todo': [4]})
- self.post_exp_todo([exp], {'adopt': [4, 5, 6]}, 1)
- slots += [exp.step_as_dict(
- node_id=4, process=None, todo=6, fillable=False)]
- self.check_json_get('/todo?id=1', exp)
-
- def test_POST_todo_make_empty(self) -> None:
- """Test creation via POST /todo "step_filler_to"/"make"."""
- # create chain of Processes
- exp = ExpectedGetTodo(1)
- self.post_exp_process([exp], {}, 1)
- for i in range(1, 4):
- self.post_exp_process([exp], {'new_top_step': i}, i+1)
- exp.lib_set('ProcessStep',
- [exp.procstep_as_dict(1, owner_id=2, step_process_id=1),
- exp.procstep_as_dict(2, owner_id=3, step_process_id=2),
- exp.procstep_as_dict(3, owner_id=4, step_process_id=3)])
- # post (childless) Todo of chain end, then make empty on next in line
- self.post_exp_day([exp], {'new_todo': [4]})
- slots = [exp.step_as_dict(
- node_id=1, process=3, todo=None, fillable=True,
- children=[exp.step_as_dict(
- node_id=2, process=2, todo=None, fillable=False,
- children=[exp.step_as_dict(
- node_id=3, process=1, todo=None, fillable=False)])])]
- exp.set('steps_todo_to_process', slots)
- self.check_json_get('/todo?id=1', exp)
- self.check_post({'step_filler_to_1': 'make_3'}, '/todo?id=1')
- exp.set_todo_from_post(2, {'process_id': 3})
- exp.set_todo_from_post(1, {'process_id': 4, 'children': [2]})
- slots[0]['todo'] = 2
- assert isinstance(slots[0]['children'], list)
- slots[0]['children'][0]['fillable'] = True
- self.check_json_get('/todo?id=1', exp)
- # make new top-level Todo without chain implied by its Process
- self.check_post({'make_empty': 2, 'adopt': [2]}, '/todo?id=1')
- exp.set_todo_from_post(3, {'process_id': 2})
- exp.set_todo_from_post(1, {'process_id': 4, 'children': [2, 3]})
- slots += [exp.step_as_dict(
- node_id=4, process=None, todo=3, fillable=False)]
- self.check_json_get('/todo?id=1', exp)
- # fail on trying to call make_empty on non-existing Process
- self.check_post({'make_full': 5}, '/todo?id=1', 404)
-
- def test_GET_todo(self) -> None:
- """Test GET /todo response codes."""
- # test malformed or illegal parameter values
- self.check_get_defaults('/todo')
- # test all existing Processes are shown as available
- exp = ExpectedGetTodo(1)
- self.post_exp_process([exp], {}, 1)
- self.post_exp_day([exp], {'new_todo': [1]})
- self.post_exp_process([exp], {}, 2)
- self.check_json_get('/todo?id=1', exp)
- # test chain of Processes shown as potential step nodes
- self.post_exp_process([exp], {}, 3)
- self.post_exp_process([exp], {}, 4)
- self.post_exp_process([exp], {'new_top_step': 2}, 1)
- self.post_exp_process([exp], {'new_top_step': 3, 'step_of': [1]}, 2)
- self.post_exp_process([exp], {'new_top_step': 4, 'step_of': [2]}, 3)
- exp.lib_set('ProcessStep', [
- exp.procstep_as_dict(1, owner_id=1, step_process_id=2),
- exp.procstep_as_dict(2, owner_id=2, step_process_id=3),
- exp.procstep_as_dict(3, owner_id=3, step_process_id=4)])
- slots = [exp.step_as_dict(
- node_id=1, process=2, todo=None, fillable=True,
- children=[exp.step_as_dict(
- node_id=2, process=3, todo=None, fillable=False,
- children=[exp.step_as_dict(
- node_id=3, process=4, todo=None, fillable=False)])])]
- exp.set('steps_todo_to_process', slots)
- self.check_json_get('/todo?id=1', exp)
- # test display of parallel chains
- proc_steps_post = {'new_top_step': 4, 'kept_steps': [1, 3]}
- self.post_exp_process([], proc_steps_post, 1)
- exp.lib_set('ProcessStep', [
- exp.procstep_as_dict(4, owner_id=1, step_process_id=4)])
- slots += [exp.step_as_dict(
- node_id=4, process=4, todo=None, fillable=True)]
- self.check_json_get('/todo?id=1', exp)
-
- def test_POST_todo_doneness_relations(self) -> None:
- """Test Todo.is_done Condition, adoption relations for /todo POSTs."""
- self.post_exp_process([], {}, 1)
- # test Todo with adoptee can only be set done if adoptee is done too
- self.post_exp_day([], {'new_todo': [1]})
- self.post_exp_day([], {'new_todo': [1]})
- self.check_post({'adopt': 2, 'is_done': 1}, '/todo?id=1', 400)
- self.check_post({'is_done': 1}, '/todo?id=2')
- self.check_post({'adopt': 2, 'is_done': 1}, '/todo?id=1', 302)
- # test Todo cannot be set undone with adopted Todo not done yet
- self.check_post({'is_done': 0}, '/todo?id=2')
- self.check_post({'adopt': 2, 'is_done': 0}, '/todo?id=1', 400)
- # test unadoption relieves block
- self.check_post({'is_done': 0}, '/todo?id=1', 302)
- # test Condition being set or unset can block doneness setting
- c1_post = {'title': '', 'description': '', 'is_active': 0}
- c2_post = {'title': '', 'description': '', 'is_active': 1}
- self.check_post(c1_post, '/condition', redir='/condition?id=1')
- self.check_post(c2_post, '/condition', redir='/condition?id=2')
- self.check_post({'conditions': [1], 'is_done': 1}, '/todo?id=1', 400)
- self.check_post({'is_done': 1}, '/todo?id=1', 302)
- self.check_post({'is_done': 0}, '/todo?id=1', 302)
- self.check_post({'blockers': [2], 'is_done': 1}, '/todo?id=1', 400)
- self.check_post({'is_done': 1}, '/todo?id=1', 302)
- # test setting Todo doneness can set/un-set Conditions, but only on
- # doneness change, not by mere passive state
- self.check_post({'is_done': 0}, '/todo?id=2', 302)
- self.check_post({'enables': [1], 'is_done': 1}, '/todo?id=1')
- self.check_post({'conditions': [1], 'is_done': 1}, '/todo?id=2', 400)
- self.check_post({'enables': [1], 'is_done': 0}, '/todo?id=1')
- self.check_post({'enables': [1], 'is_done': 1}, '/todo?id=1')
- self.check_post({'conditions': [1], 'is_done': 1}, '/todo?id=2')
- self.check_post({'blockers': [1], 'is_done': 0}, '/todo?id=2', 400)
- self.check_post({'disables': [1], 'is_done': 1}, '/todo?id=1')
- self.check_post({'blockers': [1], 'is_done': 0}, '/todo?id=2', 400)
- self.check_post({'disables': [1]}, '/todo?id=1')
- self.check_post({'disables': [1], 'is_done': 1}, '/todo?id=1')
- self.check_post({'blockers': [1]}, '/todo?id=2')
+++ /dev/null
-"""Shared test utilities."""
-# pylint: disable=too-many-lines
-from __future__ import annotations
-from datetime import datetime, date as dt_date, timedelta
-from unittest import TestCase
-from typing import Mapping, Any, Callable
-from threading import Thread
-from pathlib import Path
-from http.client import HTTPConnection
-from time import sleep
-from json import loads as json_loads, dumps as json_dumps
-from urllib.parse import urlencode
-from uuid import uuid4
-from os import remove as remove_file
-from pprint import pprint
-from tempfile import gettempdir
-from plomtask.db import DatabaseFile, DatabaseConnection
-from plomtask.http import TaskHandler, TaskServer
-from plomtask.processes import Process, ProcessStep
-from plomtask.conditions import Condition
-from plomtask.days import Day
-from plomtask.todos import Todo
-from plomtask.versioned_attributes import VersionedAttribute, TIMESTAMP_FMT
-from plomtask.exceptions import NotFoundException, HandledException
-
-
-_VERSIONED_VALS: dict[str,
- list[str] | list[float]] = {'str': ['A', 'B'],
- 'float': [0.3, 1.1]}
-_VALID_TRUES = {True, 'True', 'true', '1', 'on'}
-
-
-def dt_date_from_day_id(day_id: int) -> dt_date:
- """Return datetime.date of adding day_id days to 2000-01-01."""
- return dt_date(2000, 1, 1) + timedelta(days=day_id)
-
-
-def date_and_day_id(day_id: int) -> tuple[str, int]:
- """Interpet day_id as n of days since millennium, return (date, day_id)."""
- return dt_date_from_day_id(day_id).isoformat(), day_id
-
-
-class TestCaseAugmented(TestCase):
- """Tester core providing helpful basic internal decorators and methods."""
- checked_class: Any
- default_init_kwargs: dict[str, Any] = {}
-
- @staticmethod
- def _run_on_versioned_attributes(f: Callable[..., None]
- ) -> Callable[..., None]:
- def wrapper(self: TestCase) -> None:
- assert isinstance(self, TestCaseAugmented)
- for attr_name in self.checked_class.to_save_versioned():
- default = self.checked_class.versioned_defaults[attr_name]
- owner = self.checked_class(None, **self.default_init_kwargs)
- attr = getattr(owner, attr_name)
- to_set = _VERSIONED_VALS[attr.value_type_name]
- f(self, owner, attr_name, attr, default, to_set)
- return wrapper
-
- @classmethod
- def _run_if_sans_db(cls, f: Callable[..., None]) -> Callable[..., None]:
- def wrapper(self: TestCaseSansDB) -> None:
- if issubclass(cls, TestCaseSansDB):
- f(self)
- return wrapper
-
- @classmethod
- def _run_if_with_db_but_not_server(cls,
- f: Callable[..., None]
- ) -> Callable[..., None]:
- def wrapper(self: TestCaseWithDB) -> None:
- if issubclass(cls, TestCaseWithDB) and\
- not issubclass(cls, TestCaseWithServer):
- f(self)
- return wrapper
-
- @classmethod
- def _make_from_defaults(cls, id_: int | None) -> Any:
- return cls.checked_class(id_, **cls.default_init_kwargs)
-
-
-class TestCaseSansDB(TestCaseAugmented):
- """Tests requiring no DB setup."""
- _legal_ids: list[int] = [1, 5]
- _illegal_ids: list[int] = [0]
-
- @TestCaseAugmented._run_if_sans_db
- def test_id_validation(self) -> None:
- """Test .id_ validation/setting."""
- for id_ in self._illegal_ids:
- with self.assertRaises(HandledException):
- self._make_from_defaults(id_)
- for id_ in self._legal_ids:
- obj = self._make_from_defaults(id_)
- self.assertEqual(obj.id_, id_)
-
- @TestCaseAugmented._run_if_sans_db
- @TestCaseAugmented._run_on_versioned_attributes
- def test_versioned_set(self,
- _: Any,
- __: str,
- attr: VersionedAttribute,
- default: str | float,
- to_set: list[str] | list[float]
- ) -> None:
- """Test VersionedAttribute.set() behaves as expected."""
- attr.set(default)
- self.assertEqual(list(attr.history.values()), [default])
- # check same value does not get set twice in a row,
- # and that not even its timestamp get updated
- timestamp = list(attr.history.keys())[0]
- attr.set(default)
- self.assertEqual(list(attr.history.values()), [default])
- self.assertEqual(list(attr.history.keys())[0], timestamp)
- # check that different value _will_ be set/added
- attr.set(to_set[0])
- timesorted_vals = [attr.history[t] for
- t in sorted(attr.history.keys())]
- expected = [default, to_set[0]]
- self.assertEqual(timesorted_vals, expected)
- # check that a previously used value can be set if not most recent
- attr.set(default)
- timesorted_vals = [attr.history[t] for
- t in sorted(attr.history.keys())]
- expected = [default, to_set[0], default]
- self.assertEqual(timesorted_vals, expected)
- # again check for same value not being set twice in a row, even for
- # later items
- attr.set(to_set[1])
- timesorted_vals = [attr.history[t] for
- t in sorted(attr.history.keys())]
- expected = [default, to_set[0], default, to_set[1]]
- self.assertEqual(timesorted_vals, expected)
- attr.set(to_set[1])
- self.assertEqual(timesorted_vals, expected)
-
- @TestCaseAugmented._run_if_sans_db
- @TestCaseAugmented._run_on_versioned_attributes
- def test_versioned_newest(self,
- _: Any,
- __: str,
- attr: VersionedAttribute,
- default: str | float,
- to_set: list[str] | list[float]
- ) -> None:
- """Test VersionedAttribute.newest."""
- # check .newest on empty history returns .default
- self.assertEqual(attr.newest, default)
- # check newest element always returned
- for v in [to_set[0], to_set[1]]:
- attr.set(v)
- self.assertEqual(attr.newest, v)
- # check newest element returned even if also early value
- attr.set(default)
- self.assertEqual(attr.newest, default)
-
- @TestCaseAugmented._run_if_sans_db
- @TestCaseAugmented._run_on_versioned_attributes
- def test_versioned_at(self,
- _: Any,
- __: str,
- attr: VersionedAttribute,
- default: str | float,
- to_set: list[str] | list[float]
- ) -> None:
- """Test .at() returns values nearest to queried time, or default."""
- # check .at() return default on empty history
- timestamp_a = datetime.now().strftime(TIMESTAMP_FMT)
- self.assertEqual(attr.at(timestamp_a), default)
- # check value exactly at timestamp returned
- attr.set(to_set[0])
- timestamp_b = list(attr.history.keys())[0]
- self.assertEqual(attr.at(timestamp_b), to_set[0])
- # check earliest value returned if exists, rather than default
- self.assertEqual(attr.at(timestamp_a), to_set[0])
- # check reverts to previous value for timestamps not indexed
- sleep(0.00001)
- timestamp_between = datetime.now().strftime(TIMESTAMP_FMT)
- sleep(0.00001)
- attr.set(to_set[1])
- timestamp_c = sorted(attr.history.keys())[-1]
- self.assertEqual(attr.at(timestamp_c), to_set[1])
- self.assertEqual(attr.at(timestamp_between), to_set[0])
- sleep(0.00001)
- timestamp_after_c = datetime.now().strftime(TIMESTAMP_FMT)
- self.assertEqual(attr.at(timestamp_after_c), to_set[1])
-
-
-class TestCaseWithDB(TestCaseAugmented):
- """Module tests not requiring DB setup."""
- _default_ids: tuple[int, int, int] = (1, 2, 3)
-
- def setUp(self) -> None:
- Condition.empty_cache()
- Day.empty_cache()
- Process.empty_cache()
- ProcessStep.empty_cache()
- Todo.empty_cache()
- db_path = Path(gettempdir()).joinpath(f'test_db:{uuid4()}')
- DatabaseFile.create(db_path)
- self.db_file = DatabaseFile(db_path)
- self.db_conn = DatabaseConnection(self.db_file)
-
- def tearDown(self) -> None:
- self.db_conn.close()
- remove_file(self.db_file.path)
-
- def _load_from_db(self, id_: int) -> list[object]:
- db_found: list[object] = []
- for row in self.db_conn.row_where(self.checked_class.table_name,
- 'id', id_):
- db_found += [self.checked_class.from_table_row(self.db_conn,
- row)]
- return db_found
-
- def _change_obj(self, obj: object) -> str:
- attr_name: str = self.checked_class.to_save_simples[-1]
- attr = getattr(obj, attr_name)
- new_attr: str | int | float | bool
- if isinstance(attr, (int, float)):
- new_attr = attr + 1
- elif isinstance(attr, str):
- new_attr = attr + '_'
- elif isinstance(attr, bool):
- new_attr = not attr
- setattr(obj, attr_name, new_attr)
- return attr_name
-
- def check_identity_with_cache_and_db(self, content: list[Any]) -> None:
- """Test both cache and DB equal content."""
- expected_cache = {}
- for item in content:
- expected_cache[item.id_] = item
- self.assertEqual(self.checked_class.get_cache(), expected_cache)
- hashes_content = [hash(x) for x in content]
- db_found: list[Any] = []
- for item in content:
- db_found += self._load_from_db(item.id_)
- hashes_db_found = [hash(x) for x in db_found]
- self.assertEqual(sorted(hashes_content), sorted(hashes_db_found))
-
- @TestCaseAugmented._run_if_with_db_but_not_server
- @TestCaseAugmented._run_on_versioned_attributes
- def test_saving_versioned_attributes(self,
- owner: Any,
- attr_name: str,
- attr: VersionedAttribute,
- _: str | float,
- to_set: list[str] | list[float]
- ) -> None:
- """Test storage and initialization of versioned attributes."""
-
- def retrieve_attr_vals(attr: VersionedAttribute) -> list[object]:
- attr_vals_saved: list[object] = []
- for row in self.db_conn.row_where(attr.table_name, 'parent',
- owner.id_):
- attr_vals_saved += [row[2]]
- return attr_vals_saved
-
- attr.set(to_set[0])
- # check that without attr.save() no rows in DB
- rows = self.db_conn.row_where(attr.table_name, 'parent', owner.id_)
- self.assertEqual([], rows)
- # fail saving attributes on non-saved owner
- with self.assertRaises(NotFoundException):
- attr.save(self.db_conn)
- # check owner.save() created entries as expected in attr table
- owner.save(self.db_conn)
- attr_vals_saved = retrieve_attr_vals(attr)
- self.assertEqual([to_set[0]], attr_vals_saved)
- # check changing attr val without save affects owner in memory …
- attr.set(to_set[1])
- cmp_attr = getattr(owner, attr_name)
- self.assertEqual(to_set, list(cmp_attr.history.values()))
- self.assertEqual(cmp_attr.history, attr.history)
- # … but does not yet affect DB
- attr_vals_saved = retrieve_attr_vals(attr)
- self.assertEqual([to_set[0]], attr_vals_saved)
- # check individual attr.save also stores new val to DB
- attr.save(self.db_conn)
- attr_vals_saved = retrieve_attr_vals(attr)
- self.assertEqual(to_set, attr_vals_saved)
-
- @TestCaseAugmented._run_if_with_db_but_not_server
- def test_saving_and_caching(self) -> None:
- """Test effects of .cache() and .save()."""
- id1 = self._default_ids[0]
- # check failure to cache without ID (if None-ID input possible)
- obj0 = self._make_from_defaults(None)
- with self.assertRaises(HandledException):
- obj0.cache()
- # check mere object init itself doesn't even store in cache
- obj1 = self._make_from_defaults(id1)
- self.assertEqual(self.checked_class.get_cache(), {})
- # check .cache() fills cache, but not DB
- obj1.cache()
- self.assertEqual(self.checked_class.get_cache(), {id1: obj1})
- found_in_db = self._load_from_db(id1)
- self.assertEqual(found_in_db, [])
- # check .save() sets ID, updates cache, and fills DB
- # (expect ID to be set to id1, despite obj1 already having that as ID:
- # it's generated by cursor.lastrowid on the DB table, and with obj1
- # not written there, obj2 should get it first!)
- obj2 = self._make_from_defaults(None)
- obj2.save(self.db_conn)
- self.assertEqual(self.checked_class.get_cache(), {id1: obj2})
- # NB: we'll only compare hashes because obj2 itself disappears on
- # .from_table_row-triggered database reload
- obj2_hash = hash(obj2)
- found_in_db += self._load_from_db(id1)
- self.assertEqual([hash(o) for o in found_in_db], [obj2_hash])
- # check we cannot overwrite obj2 with obj1 despite its same ID,
- # since it has disappeared now
- with self.assertRaises(HandledException):
- obj1.save(self.db_conn)
-
- @TestCaseAugmented._run_if_with_db_but_not_server
- def test_by_id(self) -> None:
- """Test .by_id()."""
- id1, id2, _ = self._default_ids
- # check failure if not yet saved
- obj1 = self._make_from_defaults(id1)
- with self.assertRaises(NotFoundException):
- self.checked_class.by_id(self.db_conn, id1)
- # check identity of cached and retrieved
- obj1.cache()
- self.assertEqual(obj1, self.checked_class.by_id(self.db_conn, id1))
- # check identity of saved and retrieved
- obj2 = self._make_from_defaults(id2)
- obj2.save(self.db_conn)
- self.assertEqual(obj2, self.checked_class.by_id(self.db_conn, id2))
-
- @TestCaseAugmented._run_if_with_db_but_not_server
- def test_by_id_or_create(self) -> None:
- """Test .by_id_or_create."""
- # check .by_id_or_create fails if wrong class
- if not self.checked_class.can_create_by_id:
- with self.assertRaises(HandledException):
- self.checked_class.by_id_or_create(self.db_conn, None)
- return
- # check ID input of None creates, on saving, ID=1,2,…
- for n in range(2):
- item = self.checked_class.by_id_or_create(self.db_conn, None)
- self.assertEqual(item.id_, None)
- item.save(self.db_conn)
- self.assertEqual(item.id_, n+1)
- # check .by_id_or_create acts like normal instantiation (sans saving)
- id_ = self._default_ids[2]
- item = self.checked_class.by_id_or_create(self.db_conn, id_)
- self.assertEqual(item.id_, id_)
- with self.assertRaises(NotFoundException):
- self.checked_class.by_id(self.db_conn, item.id_)
- self.assertEqual(self.checked_class(item.id_), item)
-
- @TestCaseAugmented._run_if_with_db_but_not_server
- def test_from_table_row(self) -> None:
- """Test .from_table_row() properly reads in class directly from DB."""
- obj = self._make_from_defaults(self._default_ids[0])
- obj.save(self.db_conn)
- for row in self.db_conn.row_where(self.checked_class.table_name,
- 'id', obj.id_):
- # check .from_table_row reproduces state saved, no matter if obj
- # later changed (with caching even)
- # NB: we'll only compare hashes because obj itself disappears on
- # .from_table_row-triggered database reload
- hash_original = hash(obj)
- attr_name = self._change_obj(obj)
- obj.cache()
- to_cmp = getattr(obj, attr_name)
- retrieved = self.checked_class.from_table_row(self.db_conn, row)
- self.assertNotEqual(to_cmp, getattr(retrieved, attr_name))
- self.assertEqual(hash_original, hash(retrieved))
- # check cache contains what .from_table_row just produced
- self.assertEqual({retrieved.id_: retrieved},
- self.checked_class.get_cache())
-
- @TestCaseAugmented._run_if_with_db_but_not_server
- @TestCaseAugmented._run_on_versioned_attributes
- def test_versioned_history_from_row(self,
- owner: Any,
- _: str,
- attr: VersionedAttribute,
- default: str | float,
- to_set: list[str] | list[float]
- ) -> None:
- """"Test VersionedAttribute.history_from_row() knows its DB rows."""
- attr.set(to_set[0])
- attr.set(to_set[1])
- owner.save(self.db_conn)
- # make empty VersionedAttribute, fill from rows, compare to owner's
- for row in self.db_conn.row_where(owner.table_name, 'id', owner.id_):
- loaded_attr = VersionedAttribute(owner, attr.table_name, default)
- for row in self.db_conn.row_where(attr.table_name, 'parent',
- owner.id_):
- loaded_attr.history_from_row(row)
- self.assertEqual(len(attr.history.keys()),
- len(loaded_attr.history.keys()))
- for timestamp, value in attr.history.items():
- self.assertEqual(value, loaded_attr.history[timestamp])
-
- @TestCaseAugmented._run_if_with_db_but_not_server
- def test_all(self) -> None:
- """Test .all() and its relation to cache and savings."""
- id1, id2, id3 = self._default_ids
- item1 = self._make_from_defaults(id1)
- item2 = self._make_from_defaults(id2)
- item3 = self._make_from_defaults(id3)
- # check .all() returns empty list on un-cached items
- self.assertEqual(self.checked_class.all(self.db_conn), [])
- # check that all() shows only cached/saved items
- item1.cache()
- item3.save(self.db_conn)
- self.assertEqual(sorted(self.checked_class.all(self.db_conn)),
- sorted([item1, item3]))
- item2.save(self.db_conn)
- self.assertEqual(sorted(self.checked_class.all(self.db_conn)),
- sorted([item1, item2, item3]))
-
- @TestCaseAugmented._run_if_with_db_but_not_server
- def test_singularity(self) -> None:
- """Test pointers made for single object keep pointing to it."""
- id1 = self._default_ids[0]
- obj = self._make_from_defaults(id1)
- obj.save(self.db_conn)
- # change object, expect retrieved through .by_id to carry change
- attr_name = self._change_obj(obj)
- new_attr = getattr(obj, attr_name)
- retrieved = self.checked_class.by_id(self.db_conn, id1)
- self.assertEqual(new_attr, getattr(retrieved, attr_name))
-
- @TestCaseAugmented._run_if_with_db_but_not_server
- @TestCaseAugmented._run_on_versioned_attributes
- def test_versioned_singularity(self,
- owner: Any,
- attr_name: str,
- attr: VersionedAttribute,
- _: str | float,
- to_set: list[str] | list[float]
- ) -> None:
- """Test singularity of VersionedAttributes on saving."""
- owner.save(self.db_conn)
- # change obj, expect retrieved through .by_id to carry change
- attr.set(to_set[0])
- retrieved = self.checked_class.by_id(self.db_conn, owner.id_)
- attr_retrieved = getattr(retrieved, attr_name)
- self.assertEqual(attr.history, attr_retrieved.history)
-
- @TestCaseAugmented._run_if_with_db_but_not_server
- def test_remove(self) -> None:
- """Test .remove() effects on DB and cache."""
- obj = self._make_from_defaults(self._default_ids[0])
- # check removal only works after saving
- with self.assertRaises(HandledException):
- obj.remove(self.db_conn)
- obj.save(self.db_conn)
- obj.remove(self.db_conn)
- # check access to obj fails after removal
- with self.assertRaises(HandledException):
- print(obj.id_)
- # check DB and cache now empty
- self.check_identity_with_cache_and_db([])
-
-
-class Expected:
- """Builder of (JSON-like) dict to compare against responses of test server.
-
- Collects all items and relations we expect expressed in the server's JSON
- responses and puts them into the proper json.dumps-friendly dict structure,
- accessibla via .as_dict, to compare them in TestsWithServer.check_json_get.
-
- On its own provides for .as_dict output only {"_library": …}, initialized
- from .__init__ and to be directly manipulated via the .lib* methods.
- Further structures of the expected response may be added and kept
- up-to-date by subclassing .__init__, .recalc, and .d.
-
- NB: Lots of expectations towards server behavior will be made explicit here
- (or in the subclasses) rather than in the actual TestCase methods' code.
- """
- _default_dict: dict[str, Any]
- _forced: dict[str, Any]
- _fields: dict[str, Any]
- _on_empty_make_temp: tuple[str, str]
-
- def __init__(self) -> None:
- for name in ['_default_dict', '_fields', '_forced']:
- if not hasattr(self, name):
- setattr(self, name, {})
- self._lib: dict[str, dict[int, dict[str, Any]]] = {}
- for k, v in self._default_dict.items():
- if k not in self._fields:
- self._fields[k] = v
-
- def recalc(self) -> None:
- """Update internal dictionary by subclass-specific rules."""
- todos = self.lib_all('Todo')
- for todo in todos:
- todo['parents'] = []
- for todo in todos:
- for child_id in todo['children']:
- self.lib_get('Todo', child_id)['parents'] += [todo['id']]
- todo['children'].sort()
- procsteps = self.lib_all('ProcessStep')
- procs = self.lib_all('Process')
- for proc in procs:
- proc['explicit_steps'] = [s['id'] for s in procsteps
- if s['owner_id'] == proc['id']]
-
- @property
- def as_dict(self) -> dict[str, Any]:
- """Return dict to compare against test server JSON responses."""
- make_temp = False
- if hasattr(self, '_on_empty_make_temp'):
- category, dicter = getattr(self, '_on_empty_make_temp')
- id_ = self._fields[category.lower()]
- make_temp = not bool(self.lib_get(category, id_))
- if make_temp:
- self.lib_set(category, [getattr(self, dicter)(id_)])
- self.recalc()
- d = {'_library': self._lib}
- for k, v in self._fields.items():
- # we expect everything sortable to be sorted
- if isinstance(v, list) and k not in self._forced:
- # NB: if we don't test for v being list, sorted() on an empty
- # dict may return an empty list
- try:
- v = sorted(v)
- except TypeError:
- pass
- d[k] = v
- for k, v in self._forced.items():
- d[k] = v
- if make_temp:
- json = json_dumps(d)
- id_ = id_ if id_ is not None else -1
- self.lib_del(category, id_)
- d = json_loads(json)
- return d
-
- def lib_get(self, category: str, id_: int) -> dict[str, Any]:
- """From library, return item of category and id_, or empty dict."""
- if category in self._lib and id_ in self._lib[category]:
- return self._lib[category][id_]
- return {}
-
- def lib_all(self, category: str) -> list[dict[str, Any]]:
- """From library, return items of category, or [] if none."""
- if category in self._lib:
- return list(self._lib[category].values())
- return []
-
- def lib_set(self, category: str, items: list[dict[str, object]]) -> None:
- """Update library for category with items."""
- if category not in self._lib:
- self._lib[category] = {}
- for item in items:
- id_ = item['id'] if item['id'] is not None else -1
- assert isinstance(id_, int)
- self._lib[category][id_] = item
-
- def lib_del(self, category: str, id_: int) -> None:
- """Remove category element of id_ from library."""
- del self._lib[category][id_]
- if 0 == len(self._lib[category]):
- del self._lib[category]
-
- def lib_wipe(self, category: str) -> None:
- """Remove category from library."""
- if category in self._lib:
- del self._lib[category]
-
- def set(self, field_name: str, value: object) -> None:
- """Set top-level .as_dict field."""
- self._fields[field_name] = value
-
- def force(self, field_name: str, value: object) -> None:
- """Set ._forced field to ensure value in .as_dict."""
- self._forced[field_name] = value
-
- @staticmethod
- def as_ids(items: list[dict[str, Any]]) -> list[int]:
- """Return list of only 'id' fields of items."""
- return [item['id'] for item in items]
-
- @staticmethod
- def day_as_dict(id_: int, comment: str = '') -> dict[str, object]:
- """Return JSON of Day to expect."""
- return {'id': id_, 'comment': comment, 'todos': []}
-
- def set_day_from_post(self, id_: int, d: dict[str, Any]) -> None:
- """Set Day of id_ in library based on POST dict d."""
- day = self.day_as_dict(id_)
- for k, v in d.items():
- if 'day_comment' == k:
- day['comment'] = v
- elif 'new_todo' == k:
- next_id = 1
- for todo in self.lib_all('Todo'):
- if next_id <= todo['id']:
- next_id = todo['id'] + 1
- for proc_id in sorted([id_ for id_ in v if id_]):
- todo = self.todo_as_dict(next_id, proc_id, id_)
- self.lib_set('Todo', [todo])
- next_id += 1
- elif 'done' == k:
- for todo_id in v:
- self.lib_get('Todo', todo_id)['is_done'] = True
- elif 'todo_id' == k:
- for i, todo_id in enumerate(v):
- t = self.lib_get('Todo', todo_id)
- if 'comment' in d:
- t['comment'] = d['comment'][i]
- if 'effort' in d:
- effort = d['effort'][i] if d['effort'][i] else None
- t['effort'] = effort
- self.lib_set('Day', [day])
-
- @staticmethod
- def cond_as_dict(id_: int = 1,
- is_active: bool = False,
- title: None | str = None,
- description: None | str = None,
- ) -> dict[str, object]:
- """Return JSON of Condition to expect."""
- versioned: dict[str, dict[str, object]]
- versioned = {'title': {}, 'description': {}}
- if title is not None:
- versioned['title']['0'] = title
- if description is not None:
- versioned['description']['0'] = description
- return {'id': id_, 'is_active': is_active, '_versioned': versioned}
-
- def set_cond_from_post(self, id_: int, d: dict[str, Any]) -> None:
- """Set Condition of id_ in library based on POST dict d."""
- if 'delete' in d:
- self.lib_del('Condition', id_)
- return
- cond = self.lib_get('Condition', id_)
- if cond:
- cond['is_active'] = 'is_active' in d and\
- d['is_active'] in _VALID_TRUES
- for category in ['title', 'description']:
- history = cond['_versioned'][category]
- if len(history) > 0:
- last_i = sorted([int(k) for k in history.keys()])[-1]
- if d[category] != history[str(last_i)]:
- history[str(last_i + 1)] = d[category]
- else:
- history['0'] = d[category]
- else:
- cond = self.cond_as_dict(id_, **d)
- self.lib_set('Condition', [cond])
-
- @staticmethod
- def todo_as_dict(id_: int = 1,
- process_id: int = 1,
- day_id: int = 1,
- conditions: None | list[int] = None,
- disables: None | list[int] = None,
- blockers: None | list[int] = None,
- enables: None | list[int] = None,
- calendarize: bool = False,
- comment: str = '',
- is_done: bool = False,
- effort: float | None = None,
- children: list[int] | None = None,
- parents: list[int] | None = None,
- ) -> dict[str, object]:
- """Return JSON of Todo to expect."""
- # pylint: disable=too-many-arguments
- d = {'id': id_,
- 'day_id': day_id,
- 'process_id': process_id,
- 'is_done': is_done,
- 'calendarize': calendarize,
- 'comment': comment,
- 'children': children if children else [],
- 'parents': parents if parents else [],
- 'effort': effort,
- 'conditions': conditions if conditions else [],
- 'disables': disables if disables else [],
- 'blockers': blockers if blockers else [],
- 'enables': enables if enables else []}
- return d
-
- def set_todo_from_post(self, id_: int, d: dict[str, Any]) -> None:
- """Set Todo of id_ in library based on POST dict d."""
- corrected_kwargs: dict[str, Any] = {
- 'children': [], 'is_done': 0, 'calendarize': 0, 'comment': ''}
- for k, v in d.items():
- if k.startswith('step_filler_to_'):
- continue
- if 'adopt' == k:
- new_children = v if isinstance(v, list) else [v]
- corrected_kwargs['children'] += new_children
- continue
- if k in {'is_done', 'calendarize'} and v in _VALID_TRUES:
- v = True
- corrected_kwargs[k] = v
- todo = self.lib_get('Todo', id_)
- if todo:
- for k, v in corrected_kwargs.items():
- todo[k] = v
- else:
- todo = self.todo_as_dict(id_, **corrected_kwargs)
- self.lib_set('Todo', [todo])
-
- @staticmethod
- def procstep_as_dict(id_: int,
- owner_id: int,
- step_process_id: int,
- parent_step_id: int | None = None
- ) -> dict[str, object]:
- """Return JSON of ProcessStep to expect."""
- return {'id': id_,
- 'owner_id': owner_id,
- 'step_process_id': step_process_id,
- 'parent_step_id': parent_step_id}
-
- @staticmethod
- def proc_as_dict(id_: int = 1,
- title: None | str = None,
- description: None | str = None,
- effort: None | float = None,
- conditions: None | list[int] = None,
- disables: None | list[int] = None,
- blockers: None | list[int] = None,
- enables: None | list[int] = None,
- explicit_steps: None | list[int] = None,
- suppressed_steps: None | list[int] = None
- ) -> dict[str, object]:
- """Return JSON of Process to expect."""
- # pylint: disable=too-many-arguments
- versioned: dict[str, dict[str, object]]
- versioned = {'title': {}, 'description': {}, 'effort': {}}
- if title is not None:
- versioned['title']['0'] = title
- if description is not None:
- versioned['description']['0'] = description
- if effort is not None:
- versioned['effort']['0'] = effort
- d = {'id': id_,
- 'calendarize': False,
- 'suppressed_steps': suppressed_steps if suppressed_steps else [],
- 'explicit_steps': explicit_steps if explicit_steps else [],
- '_versioned': versioned,
- 'conditions': conditions if conditions else [],
- 'disables': disables if disables else [],
- 'enables': enables if enables else [],
- 'blockers': blockers if blockers else []}
- return d
-
- def set_proc_from_post(self, id_: int, d: dict[str, Any]) -> None:
- """Set Process of id_ in library based on POST dict d."""
- proc = self.lib_get('Process', id_)
- if proc:
- for category in ['title', 'description', 'effort']:
- history = proc['_versioned'][category]
- if len(history) > 0:
- last_i = sorted([int(k) for k in history.keys()])[-1]
- if d[category] != history[str(last_i)]:
- history[str(last_i + 1)] = d[category]
- else:
- history['0'] = d[category]
- else:
- proc = self.proc_as_dict(id_,
- d['title'], d['description'], d['effort'])
- ignore = {'title', 'description', 'effort', 'new_top_step', 'step_of',
- 'kept_steps'}
- proc['calendarize'] = False
- for k, v in d.items():
- if k in ignore\
- or k.startswith('step_') or k.startswith('new_step_to'):
- continue
- if k in {'calendarize'} and v in _VALID_TRUES:
- v = True
- elif k in {'suppressed_steps', 'explicit_steps', 'conditions',
- 'disables', 'enables', 'blockers'}:
- if not isinstance(v, list):
- v = [v]
- proc[k] = v
- self.lib_set('Process', [proc])
-
-
-class TestCaseWithServer(TestCaseWithDB):
- """Module tests against our HTTP server/handler (and database)."""
-
- def setUp(self) -> None:
- super().setUp()
- self.httpd = TaskServer(self.db_file, ('localhost', 0), TaskHandler)
- self.server_thread = Thread(target=self.httpd.serve_forever)
- self.server_thread.daemon = True
- self.server_thread.start()
- self.conn = HTTPConnection(str(self.httpd.server_address[0]),
- self.httpd.server_address[1])
- self.httpd.render_mode = 'json'
-
- def tearDown(self) -> None:
- self.httpd.shutdown()
- self.httpd.server_close()
- self.server_thread.join()
- super().tearDown()
-
- def post_exp_cond(self,
- exps: list[Expected],
- payload: dict[str, object],
- id_: int = 1,
- post_to_id: bool = True,
- redir_to_id: bool = True
- ) -> None:
- """POST /condition(s), appropriately update Expecteds."""
- # pylint: disable=too-many-arguments
- target = f'/condition?id={id_}' if post_to_id else '/condition'
- redir = f'/condition?id={id_}' if redir_to_id else '/conditions'
- if 'title' not in payload:
- payload['title'] = 'foo'
- if 'description' not in payload:
- payload['description'] = 'foo'
- self.check_post(payload, target, redir=redir)
- for exp in exps:
- exp.set_cond_from_post(id_, payload)
-
- def post_exp_day(self,
- exps: list[Expected],
- payload: dict[str, Any],
- day_id: int = 1
- ) -> None:
- """POST /day, appropriately update Expecteds."""
- if 'make_type' not in payload:
- payload['make_type'] = 'empty'
- if 'day_comment' not in payload:
- payload['day_comment'] = ''
- date = dt_date_from_day_id(day_id).isoformat()
- target = f'/day?date={date}'
- redir_to = f'{target}&make_type={payload["make_type"]}'
- self.check_post(payload, target, 302, redir_to)
- for exp in exps:
- exp.set_day_from_post(day_id, payload)
-
- def post_exp_process(self,
- exps: list[Expected],
- payload: dict[str, Any],
- id_: int,
- ) -> dict[str, object]:
- """POST /process, appropriately update Expecteds."""
- if 'title' not in payload:
- payload['title'] = 'foo'
- if 'description' not in payload:
- payload['description'] = 'foo'
- if 'effort' not in payload:
- payload['effort'] = 1.1
- self.check_post(payload, f'/process?id={id_}',
- redir=f'/process?id={id_}')
- for exp in exps:
- exp.set_proc_from_post(id_, payload)
- return payload
-
- def post_exp_todo(self,
- exps: list[Expected],
- payload: dict[str, Any],
- id_: int,
- ) -> None:
- """POST /todo, appropriately updated Expecteds."""
- self.check_post(payload, f'/todo?id={id_}')
- for exp in exps:
- exp.set_todo_from_post(id_, payload)
-
- def check_filter(self, exp: Expected, category: str, key: str,
- val: str, list_ids: list[int]) -> None:
- """Check GET /{category}?{key}={val} sorts to list_ids."""
- # pylint: disable=too-many-arguments
- exp.set(key, val)
- exp.force(category, list_ids)
- self.check_json_get(f'/{category}?{key}={val}', exp)
-
- def check_redirect(self, target: str) -> None:
- """Check that self.conn answers with a 302 redirect to target."""
- response = self.conn.getresponse()
- self.assertEqual(response.status, 302)
- self.assertEqual(response.getheader('Location'), target)
-
- def check_get(self, target: str, expected_code: int) -> None:
- """Check that a GET to target yields expected_code."""
- self.conn.request('GET', target)
- self.assertEqual(self.conn.getresponse().status, expected_code)
-
- def check_minimal_inputs(self,
- url: str,
- minimal_inputs: dict[str, Any]
- ) -> None:
- """Check that url 400's unless all of minimal_inputs provided."""
- for to_hide in minimal_inputs.keys():
- to_post = {k: v for k, v in minimal_inputs.items() if k != to_hide}
- self.check_post(to_post, url, 400)
-
- def check_post(self, data: Mapping[str, object], target: str,
- expected_code: int = 302, redir: str = '') -> None:
- """Check that POST of data to target yields expected_code."""
- encoded_form_data = urlencode(data, doseq=True).encode('utf-8')
- headers = {'Content-Type': 'application/x-www-form-urlencoded',
- 'Content-Length': str(len(encoded_form_data))}
- self.conn.request('POST', target,
- body=encoded_form_data, headers=headers)
- if 302 == expected_code:
- redir = target if redir == '' else redir
- self.check_redirect(redir)
- else:
- self.assertEqual(self.conn.getresponse().status, expected_code)
-
- def check_get_defaults(self,
- path: str,
- default_id: str = '1',
- id_name: str = 'id'
- ) -> None:
- """Some standard model paths to test."""
- nonexist_status = 200 if self.checked_class.can_create_by_id else 404
- self.check_get(path, nonexist_status)
- self.check_get(f'{path}?{id_name}=', 400)
- self.check_get(f'{path}?{id_name}=foo', 400)
- self.check_get(f'/{path}?{id_name}=0', 400)
- self.check_get(f'{path}?{id_name}={default_id}', nonexist_status)
-
- def check_json_get(self, path: str, expected: Expected) -> None:
- """Compare JSON on GET path with expected.
-
- To simplify comparison of VersionedAttribute histories, transforms
- timestamp keys of VersionedAttribute history keys into (strings of)
- integers counting chronologically forward from 0.
- """
-
- def rewrite_history_keys_in(item: Any) -> Any:
- if isinstance(item, dict):
- if '_versioned' in item.keys():
- for category in item['_versioned']:
- vals = item['_versioned'][category].values()
- history = {}
- for i, val in enumerate(vals):
- history[str(i)] = val
- item['_versioned'][category] = history
- for category in list(item.keys()):
- rewrite_history_keys_in(item[category])
- elif isinstance(item, list):
- item[:] = [rewrite_history_keys_in(i) for i in item]
- return item
-
- def walk_diffs(path: str, cmp1: object, cmp2: object) -> None:
- # pylint: disable=too-many-branches
- def warn(intro: str, val: object) -> None:
- if isinstance(val, (str, int, float)):
- print(intro, val)
- else:
- print(intro)
- pprint(val)
- if cmp1 != cmp2:
- if isinstance(cmp1, dict) and isinstance(cmp2, dict):
- for k, v in cmp1.items():
- if k not in cmp2:
- warn(f'DIFF {path}: retrieved lacks {k}', v)
- elif v != cmp2[k]:
- walk_diffs(f'{path}:{k}', v, cmp2[k])
- for k in [k for k in cmp2.keys() if k not in cmp1]:
- warn(f'DIFF {path}: expected lacks retrieved\'s {k}',
- cmp2[k])
- elif isinstance(cmp1, list) and isinstance(cmp2, list):
- for i, v1 in enumerate(cmp1):
- if i >= len(cmp2):
- warn(f'DIFF {path}[{i}] retrieved misses:', v1)
- elif v1 != cmp2[i]:
- walk_diffs(f'{path}[{i}]', v1, cmp2[i])
- if len(cmp2) > len(cmp1):
- for i, v2 in enumerate(cmp2[len(cmp1):]):
- warn(f'DIFF {path}[{len(cmp1)+i}] misses:', v2)
- else:
- warn(f'DIFF {path} – for expected:', cmp1)
- warn('… and for retrieved:', cmp2)
-
- self.conn.request('GET', path)
- response = self.conn.getresponse()
- self.assertEqual(response.status, 200)
- retrieved = json_loads(response.read().decode())
- rewrite_history_keys_in(retrieved)
- # to convert ._lib int keys to str
- cmp = json_loads(json_dumps(expected.as_dict))
- try:
- self.assertEqual(cmp, retrieved)
- except AssertionError as e:
- print('EXPECTED:')
- pprint(cmp)
- print('RETRIEVED:')
- pprint(retrieved)
- walk_diffs('', cmp, retrieved)
- raise e
--- /dev/null
+"""Test Conditions module."""
+from typing import Any
+from tests.utils import (TestCaseSansDB, TestCaseWithDB, TestCaseWithServer,
+ Expected)
+from plomtask.conditions import Condition
+
+
+class TestsSansDB(TestCaseSansDB):
+ """Tests requiring no DB setup."""
+ checked_class = Condition
+
+
+class TestsWithDB(TestCaseWithDB):
+ """Tests requiring DB, but not server setup."""
+ checked_class = Condition
+ default_init_kwargs = {'is_active': 0}
+
+
+class ExpectedGetConditions(Expected):
+ """Builder of expectations for GET /conditions."""
+ _default_dict = {'sort_by': 'title', 'pattern': ''}
+
+ def recalc(self) -> None:
+ """Update internal dictionary by subclass-specific rules."""
+ super().recalc()
+ self._fields['conditions'] = self.as_ids(self.lib_all('Condition'))
+
+
+class ExpectedGetCondition(Expected):
+ """Builder of expectations for GET /condition."""
+ _default_dict = {'is_new': False}
+ _on_empty_make_temp = ('Condition', 'cond_as_dict')
+
+ def __init__(self, id_: int | None, *args: Any, **kwargs: Any) -> None:
+ self._fields = {'condition': id_}
+ super().__init__(*args, **kwargs)
+
+ def recalc(self) -> None:
+ """Update internal dictionary by subclass-specific rules."""
+ super().recalc()
+ for p_field, c_field in [('conditions', 'enabled_processes'),
+ ('disables', 'disabling_processes'),
+ ('blockers', 'disabled_processes'),
+ ('enables', 'enabling_processes')]:
+ self._fields[c_field] = self.as_ids([
+ p for p in self.lib_all('Process')
+ if self._fields['condition'] in p[p_field]])
+
+
+class TestsWithServer(TestCaseWithServer):
+ """Module tests against our HTTP server/handler (and database)."""
+ checked_class = Condition
+
+ def test_fail_POST_condition(self) -> None:
+ """Test malformed/illegal POST /condition requests."""
+ # check incomplete POST payloads
+ valid_payload = {'title': '', 'description': ''}
+ self.check_minimal_inputs('/condition', valid_payload)
+ # check valid POST payload on bad paths
+ self.check_post(valid_payload, '/condition?id=foo', 400)
+ # check cannot delete depended-upon Condition
+ self.post_exp_cond([], {})
+ for key in ('conditions', 'blockers', 'enables', 'disables'):
+ self.post_exp_process([], {key: [1]}, 1)
+ self.check_post({'delete': ''}, '/condition?id=1', 500)
+ self.post_exp_process([], {}, 1)
+ self.post_exp_day([], {'new_todo': '1'})
+ for key in ('conditions', 'blockers', 'enables', 'disables'):
+ self.post_exp_todo([], {key: [1]}, 1)
+ self.check_post({'delete': ''}, '/condition?id=1', 500)
+
+ def test_POST_condition(self) -> None:
+ """Test (valid) POST /condition and its effect on GET /condition[s]."""
+ url_single, url_all = '/condition?id=1', '/conditions'
+ exp_single, exp_all = ExpectedGetCondition(1), ExpectedGetConditions()
+ all_exps = [exp_single, exp_all]
+ # test valid POST's effect on single /condition and full /conditions
+ self.post_exp_cond(all_exps, {}, post_to_id=False)
+ self.check_json_get(url_single, exp_single)
+ self.check_json_get(url_all, exp_all)
+ # test (no) effect of invalid POST to existing Condition on /condition
+ self.check_post({}, url_single, 400)
+ self.check_json_get(url_single, exp_single)
+ # test effect of POST changing title, description, and activeness
+ self.post_exp_cond(all_exps, {'title': 'bar', 'description': 'oof',
+ 'is_active': 1})
+ self.check_json_get(url_single, exp_single)
+ # test POST sans 'is_active' setting it negative
+ self.post_exp_cond(all_exps, {})
+ self.check_json_get(url_single, exp_single)
+ # test deletion POST's effect, both to return id=1 into empty single,
+ # full /conditions into empty list
+ self.check_json_get(url_single, exp_single)
+ self.post_exp_cond(all_exps, {'delete': ''}, redir_to_id=False)
+ exp_single.set('is_new', True)
+ self.check_json_get(url_single, exp_single)
+ self.check_json_get(url_all, exp_all)
+
+ def test_GET_condition(self) -> None:
+ """More GET /condition testing, especially for Process relations."""
+ # check expected default status codes
+ self.check_get_defaults('/condition')
+ # check 'is_new' set if id= absent or pointing to not-yet-existing ID
+ exp = ExpectedGetCondition(None)
+ exp.set('is_new', True)
+ self.check_json_get('/condition', exp)
+ exp = ExpectedGetCondition(1)
+ exp.set('is_new', True)
+ self.check_json_get('/condition?id=1', exp)
+ # make Condition and two Processes that among them establish all
+ # possible ConditionsRelations to it, check /condition displays all
+ exp = ExpectedGetCondition(1)
+ self.post_exp_cond([exp], {}, post_to_id=False)
+ for i, p in enumerate([('conditions', 'disables'),
+ ('enables', 'blockers')]):
+ self.post_exp_process([exp], {k: [1] for k in p}, i+1)
+ self.check_json_get('/condition?id=1', exp)
+
+ def test_GET_conditions(self) -> None:
+ """Test GET /conditions."""
+ # test empty result on empty DB, default-settings on empty params
+ exp = ExpectedGetConditions()
+ self.check_json_get('/conditions', exp)
+ # test 'sort_by' default to 'title' (even if set to something else, as
+ # long as without handler) and 'pattern' get preserved
+ exp.set('pattern', 'bar')
+ self.check_json_get('/conditions?sort_by=foo&pattern=bar&foo=x', exp)
+ exp.set('pattern', '')
+ # test non-empty result, automatic (positive) sorting by title
+ post_cond1 = {'is_active': 0, 'title': 'foo', 'description': 'oof'}
+ post_cond2 = {'is_active': 0, 'title': 'bar', 'description': 'rab'}
+ post_cond3 = {'is_active': 1, 'title': 'baz', 'description': 'zab'}
+ for i, post in enumerate([post_cond1, post_cond2, post_cond3]):
+ self.post_exp_cond([exp], post, i+1, post_to_id=False)
+ self.check_filter(exp, 'conditions', 'sort_by', 'title', [2, 3, 1])
+ # test other sortings
+ self.check_filter(exp, 'conditions', 'sort_by', '-title', [1, 3, 2])
+ self.check_filter(exp, 'conditions', 'sort_by', 'is_active', [1, 2, 3])
+ self.check_filter(exp, 'conditions', 'sort_by', '-is_active',
+ [3, 2, 1])
+ exp.set('sort_by', 'title')
+ # test pattern matching on title
+ exp.lib_del('Condition', 1)
+ self.check_filter(exp, 'conditions', 'pattern', 'ba', [2, 3])
+ # test pattern matching on description
+ exp.lib_wipe('Condition')
+ exp.set_cond_from_post(1, post_cond1)
+ self.check_filter(exp, 'conditions', 'pattern', 'of', [1])
--- /dev/null
+"""Test Days module."""
+from datetime import date as dt_date, datetime, timedelta
+from typing import Any
+from tests.utils import (TestCaseSansDB, TestCaseWithDB, TestCaseWithServer,
+ Expected, date_and_day_id, dt_date_from_day_id)
+from plomtask.dating import date_in_n_days as tested_date_in_n_days
+from plomtask.days import Day
+
+# Simply the ISO format for dates as used in plomtask.dating, but for testing
+# purposes we state our expectations here independently and explicitly
+TESTING_DATE_FORMAT = '%Y-%m-%d'
+
+
+def _testing_date_in_n_days(n: int) -> str:
+ """Return in ISO format / TEST_DATE_FORMAT date from today + n days.
+
+ As with TESTING_DATE_FORMAT, we assume this equal the original's code
+ at plomtask.dating.date_in_n_days, but want to state our expectations
+ explicitly to rule out importing issues from the original.
+ """
+ date = dt_date.today() + timedelta(days=n)
+ return date.strftime(TESTING_DATE_FORMAT)
+
+
+def _days_n_for_date(date: str) -> int:
+ return (dt_date.fromisoformat(date) - dt_date(2000, 1, 1)).days
+
+
+class TestsSansDB(TestCaseSansDB):
+ """Days module tests not requiring DB setup."""
+ checked_class = Day
+
+ def test_date_in_n_days(self) -> None:
+ """Test dating.date_in_n_days"""
+ for n in [-100, -2, -1, 0, 1, 2, 1000]:
+ date = datetime.now() + timedelta(days=n)
+ self.assertEqual(tested_date_in_n_days(n),
+ date.strftime(TESTING_DATE_FORMAT))
+
+ def test_Day_date_weekday_neighbor_dates(self) -> None:
+ """Test Day's date parsing and neighbourhood resolution."""
+ self.assertEqual(dt_date(2000, 1, 2).isoformat(), Day(1).date)
+ self.assertEqual(dt_date(2001, 1, 2).isoformat(), Day(367).date)
+ self.assertEqual('Sunday', Day(1).weekday)
+ self.assertEqual('March', Day(75).month_name)
+ self.assertEqual('2000-12-31', Day(366).prev_date)
+ self.assertEqual('2001-03-01', Day(424).next_date)
+
+
+class TestsWithDB(TestCaseWithDB):
+ """Tests requiring DB, but not server setup."""
+ checked_class = Day
+
+ def test_Day_with_filled_gaps(self) -> None:
+ """Test .with_filled_gaps."""
+ day_ids = [n + 1 for n in range(9)]
+ dt_dates = [dt_date_from_day_id(id_) for id_ in day_ids]
+
+ def expect_within_full_range_as_commented(
+ range_indexes: tuple[int, int],
+ indexes_to_provide: list[int]
+ ) -> None:
+ start_i, end_i = range_indexes
+ days_expected = [Day(n) for n in day_ids]
+ to_remove = []
+ for idx in indexes_to_provide:
+ days_expected[idx] = Day(day_ids[idx], '#')
+ days_expected[idx].save(self.db_conn)
+ to_remove += [days_expected[idx]]
+ days_expected = days_expected[start_i:end_i+1]
+ days_result = Day.with_filled_gaps(
+ self.db_conn, dt_dates[start_i], dt_dates[end_i])
+ self.assertEqual(days_result, days_expected)
+ for day in to_remove:
+ day.remove(self.db_conn)
+
+ # check provided Days recognizable in (full-range) interval
+ expect_within_full_range_as_commented((0, 8), [0, 4, 8])
+ # check limited range, but limiting Days provided
+ expect_within_full_range_as_commented((2, 6), [2, 5, 6])
+ # check Days within range but beyond provided Days also filled in
+ expect_within_full_range_as_commented((1, 7), [2, 5])
+ # check provided Days beyond range ignored
+ expect_within_full_range_as_commented((3, 5), [1, 2, 4, 6, 7])
+ # check inversion of start_date and end_date returns empty list
+ expect_within_full_range_as_commented((5, 3), [2, 4, 6])
+ # check empty provision still creates filler elements in interval
+ expect_within_full_range_as_commented((3, 5), [])
+ # check single-element selection creating only filler beyond provided
+ expect_within_full_range_as_commented((1, 1), [2, 4, 6])
+ # check (un-saved) filler Days don't show up in cache or DB
+ day = Day(day_ids[3])
+ day.save(self.db_conn)
+ Day.with_filled_gaps(self.db_conn, dt_dates[0], dt_dates[-1])
+ self.check_identity_with_cache_and_db([day])
+
+
+class ExpectedGetCalendar(Expected):
+ """Builder of expectations for GET /calendar."""
+
+ def __init__(self, start: int, end: int, *args: Any, **kwargs: Any
+ ) -> None:
+ today_dt = dt_date.today()
+ today_iso = today_dt.isoformat()
+ self._fields = {
+ 'start': (today_dt + timedelta(days=start)).isoformat(),
+ 'end': (today_dt + timedelta(days=end)).isoformat(),
+ 'today': today_iso}
+ self._fields['days'] = [
+ _days_n_for_date(today_iso) + i for i in range(start, end+1)]
+ super().__init__(*args, **kwargs)
+ for day_id in self._fields['days']:
+ self.lib_set('Day', [self.day_as_dict(day_id)])
+
+
+class ExpectedGetDay(Expected):
+ """Builder of expectations for GET /day."""
+ _default_dict = {'make_type': 'full'}
+ _on_empty_make_temp = ('Day', 'day_as_dict')
+
+ def __init__(self, day_id: int, *args: Any, **kwargs: Any) -> None:
+ self._fields = {'day': day_id}
+ super().__init__(*args, **kwargs)
+
+ def recalc(self) -> None:
+ super().recalc()
+ todos = [t for t in self.lib_all('Todo')
+ if t['day_id'] == self._fields['day']]
+ self.lib_get('Day', self._fields['day'])['todos'] = self.as_ids(todos)
+ self._fields['top_nodes'] = [
+ {'children': [], 'seen': 0, 'todo': todo['id']}
+ for todo in todos]
+ for todo in todos:
+ proc = self.lib_get('Process', todo['process_id'])
+ for title in ['conditions', 'enables', 'blockers', 'disables']:
+ todo[title] = proc[title]
+ conds_present = set()
+ for todo in todos:
+ for title in ['conditions', 'enables', 'blockers', 'disables']:
+ for cond_id in todo[title]:
+ conds_present.add(cond_id)
+ self._fields['conditions_present'] = list(conds_present)
+ for prefix in ['en', 'dis']:
+ blers = {}
+ for cond_id in conds_present:
+ blers[cond_id] = self.as_ids(
+ [t for t in todos if cond_id in t[f'{prefix}ables']])
+ self._fields[f'{prefix}ablers_for'] = blers
+ self._fields['processes'] = self.as_ids(self.lib_all('Process'))
+
+
+class TestsWithServer(TestCaseWithServer):
+ """Tests against our HTTP server/handler (and database)."""
+ checked_class = Day
+
+ def test_basic_GET_day(self) -> None:
+ """Test basic (no Processes/Conditions/Todos) GET /day basics."""
+ # check illegal date parameters
+ self.check_get_defaults('/day', '2024-01-01', 'date')
+ self.check_get('/day?date=2024-02-30', 400)
+ # check undefined day
+ today_iso = dt_date.today().isoformat()
+ exp = ExpectedGetDay(_days_n_for_date(today_iso))
+ self.check_json_get('/day', exp)
+ # check defined day with make_type parameter
+ date, day_id = date_and_day_id(1)
+ exp = ExpectedGetDay(day_id)
+ exp.set('make_type', 'bar')
+ self.check_json_get(f'/day?date={date}&make_type=bar', exp)
+ # check parsing of 'yesterday', 'today', 'tomorrow'
+ for name, dist in [('yesterday', -1), ('today', 0), ('tomorrow', +1)]:
+ exp = ExpectedGetDay(_days_n_for_date(today_iso) + dist)
+ self.check_json_get(f'/day?date={name}', exp)
+
+ def test_fail_POST_day(self) -> None:
+ """Test malformed/illegal POST /day requests."""
+ # check payloads lacking minimum expecteds
+ url = '/day?date=2024-01-01'
+ minimal_post = {'make_type': '', 'day_comment': ''}
+ self.check_minimal_inputs(url, minimal_post)
+ # to next check illegal new_todo values, we need an actual Process
+ self.post_exp_process([], {}, 1)
+ # check illegal new_todo values
+ self.check_post(minimal_post | {'new_todo': ['foo']}, url, 400)
+ self.check_post(minimal_post | {'new_todo': [1, 2]}, url, 404)
+ # to next check illegal old_todo inputs, we need to first post Todo
+ self.check_post(minimal_post | {'new_todo': [1]}, url, 302,
+ '/day?date=2024-01-01&make_type=')
+ # check illegal old_todo inputs (equal list lengths though)
+ post = minimal_post | {'comment': ['foo'], 'effort': [3.3],
+ 'done': [], 'todo_id': [1]}
+ self.check_post(post, url, 302, '/day?date=2024-01-01&make_type=')
+ post['todo_id'] = [2] # reference to non-existant Process
+ self.check_post(post, url, 404)
+ post['todo_id'] = ['a']
+ self.check_post(post, url, 400)
+ post['todo_id'] = [1]
+ post['done'] = ['foo']
+ self.check_post(post, url, 400)
+ post['done'] = [2] # reference to non-posted todo_id
+ self.check_post(post, url, 400)
+ post['done'] = []
+ post['effort'] = ['foo']
+ self.check_post(post, url, 400)
+ post['effort'] = [None]
+ self.check_post(post, url, 400)
+ post['effort'] = [3.3]
+ # check illegal old_todo inputs: unequal list lengths
+ post['comment'] = []
+ self.check_post(post, url, 400)
+ post['comment'] = ['foo', 'foo']
+ self.check_post(post, url, 400)
+ post['comment'] = ['foo']
+ post['effort'] = []
+ self.check_post(post, url, 400)
+ post['effort'] = [3.3, 3.3]
+ self.check_post(post, url, 400)
+ post['effort'] = [3.3]
+ post['todo_id'] = [1, 1]
+ self.check_post(post, url, 400)
+ post['todo_id'] = [1]
+ # # check valid POST payload on bad paths
+ self.check_post(post, '/day', 400)
+ self.check_post(post, '/day?date=', 400)
+ self.check_post(post, '/day?date=foo', 400)
+
+ def test_basic_POST_day(self) -> None:
+ """Test basic (no Processes/Conditions/Todos) POST /day.
+
+ Check POST requests properly parse 'today', 'tomorrow', 'yesterday',
+ and actual date strings; store 'day_comment'; preserve 'make_type'
+ setting in redirect even if nonsensical; and allow '' as 'new_todo'.
+ """
+ for name, dist, test_str in [('2024-01-01', None, 'a'),
+ ('today', 0, 'b'),
+ ('yesterday', -1, 'c'),
+ ('tomorrow', +1, 'd')]:
+ date = name if dist is None else _testing_date_in_n_days(dist)
+ post = {'day_comment': test_str, 'make_type': f'x:{test_str}',
+ 'new_todo': ['', '']}
+ post_url = f'/day?date={name}'
+ redir_url = f'{post_url}&make_type={post["make_type"]}'
+ self.check_post(post, post_url, 302, redir_url)
+ day_id = _days_n_for_date(date)
+ exp = ExpectedGetDay(day_id)
+ exp.set_day_from_post(day_id, post)
+ self.check_json_get(post_url, exp)
+
+ def test_GET_day_with_processes_and_todos(self) -> None:
+ """Test GET /day displaying Processes and Todos (no trees)."""
+ date, day_id = date_and_day_id(1)
+ exp = ExpectedGetDay(day_id)
+ # check Processes get displayed in ['processes'] and ['_library'],
+ # even without any Todos referencing them
+ proc_posts = [{'title': 'foo', 'description': 'oof', 'effort': 1.1},
+ {'title': 'bar', 'description': 'rab', 'effort': 0.9}]
+ for i, proc_post in enumerate(proc_posts):
+ self.post_exp_process([exp], proc_post, i+1)
+ self.check_json_get(f'/day?date={date}', exp)
+ # post Todos of either Process and check their display
+ self.post_exp_day([exp], {'new_todo': [1, 2]})
+ self.check_json_get(f'/day?date={date}', exp)
+ # test malformed Todo manipulation posts
+ post_day = {'day_comment': '', 'make_type': '', 'comment': [''],
+ 'new_todo': [], 'done': [1], 'effort': [2.3]}
+ self.check_post(post_day, f'/day?date={date}', 400) # no todo_id
+ post_day['todo_id'] = [2] # not identifying Todo refered by done
+ self.check_post(post_day, f'/day?date={date}', 400)
+ post_day['todo_id'] = [1, 2] # imply range beyond that of effort etc.
+ self.check_post(post_day, f'/day?date={date}', 400)
+ post_day['comment'] = ['FOO', '']
+ self.check_post(post_day, f'/day?date={date}', 400)
+ post_day['effort'] = [2.3, '']
+ post_day['comment'] = ['']
+ self.check_post(post_day, f'/day?date={date}', 400)
+ # add a comment to one Todo and set the other's doneness and effort
+ post_day['comment'] = ['FOO', '']
+ self.post_exp_day([exp], post_day)
+ self.check_json_get(f'/day?date={date}', exp)
+ # invert effort and comment between both Todos
+ # (cannot invert doneness, /day only collects positive setting)
+ post_day['comment'] = ['', 'FOO']
+ post_day['effort'] = ['', 2.3]
+ self.post_exp_day([exp], post_day)
+ self.check_json_get(f'/day?date={date}', exp)
+
+ def test_POST_day_todo_make_types(self) -> None:
+ """Test behavior of POST /todo on 'make_type'='full' and 'empty'."""
+ date, day_id = date_and_day_id(1)
+ exp = ExpectedGetDay(day_id)
+ # create two Processes, with second one step of first one
+ self.post_exp_process([exp], {}, 2)
+ self.post_exp_process([exp], {'new_top_step': 2}, 1)
+ exp.lib_set('ProcessStep', [
+ exp.procstep_as_dict(1, owner_id=1, step_process_id=2)])
+ self.check_json_get(f'/day?date={date}', exp)
+ # post Todo of adopting Process, with make_type=full
+ self.post_exp_day([exp], {'make_type': 'full', 'new_todo': [1]})
+ exp.lib_get('Todo', 1)['children'] = [2]
+ exp.lib_set('Todo', [exp.todo_as_dict(2, 2)])
+ top_nodes = [{'todo': 1,
+ 'seen': 0,
+ 'children': [{'todo': 2,
+ 'seen': 0,
+ 'children': []}]}]
+ exp.force('top_nodes', top_nodes)
+ self.check_json_get(f'/day?date={date}', exp)
+ # post another Todo of adopting Process, expect to adopt existing
+ self.post_exp_day([exp], {'make_type': 'full', 'new_todo': [1]})
+ exp.lib_set('Todo', [exp.todo_as_dict(3, 1, children=[2])])
+ top_nodes += [{'todo': 3,
+ 'seen': 0,
+ 'children': [{'todo': 2,
+ 'seen': 1,
+ 'children': []}]}]
+ exp.force('top_nodes', top_nodes)
+ self.check_json_get(f'/day?date={date}', exp)
+ # post another Todo of adopting Process, no adopt with make_type=empty
+ self.post_exp_day([exp], {'make_type': 'empty', 'new_todo': [1]})
+ exp.lib_set('Todo', [exp.todo_as_dict(4, 1)])
+ top_nodes += [{'todo': 4,
+ 'seen': 0,
+ 'children': []}]
+ exp.force('top_nodes', top_nodes)
+ self.check_json_get(f'/day?date={date}', exp)
+
+ def test_POST_day_new_todo_order_commutative(self) -> None:
+ """Check that order of 'new_todo' values in POST /day don't matter."""
+ date, day_id = date_and_day_id(1)
+ exp = ExpectedGetDay(day_id)
+ self.post_exp_process([exp], {}, 2)
+ self.post_exp_process([exp], {'new_top_step': 2}, 1)
+ exp.lib_set('ProcessStep', [
+ exp.procstep_as_dict(1, owner_id=1, step_process_id=2)])
+ # make-full-day-post batch of Todos of both Processes in one order …,
+ self.post_exp_day([exp], {'make_type': 'full', 'new_todo': [1, 2]})
+ top_nodes: list[dict[str, Any]] = [{'todo': 1,
+ 'seen': 0,
+ 'children': [{'todo': 2,
+ 'seen': 0,
+ 'children': []}]}]
+ exp.force('top_nodes', top_nodes)
+ exp.lib_get('Todo', 1)['children'] = [2]
+ self.check_json_get(f'/day?date={date}', exp)
+ # … and then in the other, expecting same node tree / relations
+ exp.lib_del('Day', day_id)
+ date, day_id = date_and_day_id(2)
+ exp.set('day', day_id)
+ day_post = {'make_type': 'full', 'new_todo': [2, 1]}
+ self.post_exp_day([exp], day_post, day_id)
+ exp.lib_del('Todo', 1)
+ exp.lib_del('Todo', 2)
+ top_nodes[0]['todo'] = 3 # was: 1
+ top_nodes[0]['children'][0]['todo'] = 4 # was: 2
+ exp.lib_get('Todo', 3)['children'] = [4]
+ self.check_json_get(f'/day?date={date}', exp)
+
+ def test_POST_day_todo_deletion_by_negative_effort(self) -> None:
+ """Test POST /day removal of Todos by setting negative effort."""
+ date, day_id = date_and_day_id(1)
+ exp = ExpectedGetDay(day_id)
+ self.post_exp_process([exp], {}, 1)
+ self.post_exp_day([exp], {'new_todo': [1]})
+ # check cannot remove Todo if commented
+ self.post_exp_day([exp],
+ {'todo_id': [1], 'comment': ['foo'], 'effort': [-1]})
+ self.check_json_get(f'/day?date={date}', exp)
+ # check *can* remove Todo while getting done
+ self.post_exp_day([exp],
+ {'todo_id': [1], 'comment': [''], 'effort': [-1],
+ 'done': [1]})
+ exp.lib_del('Todo', 1)
+ self.check_json_get(f'/day?date={date}', exp)
+
+ def test_GET_day_with_conditions(self) -> None:
+ """Test GET /day displaying Conditions and their relations."""
+ date, day_id = date_and_day_id(1)
+ exp = ExpectedGetDay(day_id)
+ # check non-referenced Conditions not shown
+ cond_posts = [{'is_active': 0, 'title': 'A', 'description': 'a'},
+ {'is_active': 1, 'title': 'B', 'description': 'b'}]
+ for i, cond_post in enumerate(cond_posts):
+ self.check_post(cond_post, f'/condition?id={i+1}')
+ self.check_json_get(f'/day?date={date}', exp)
+ # add Processes with Conditions, check Conditions now shown
+ for i, (c1, c2) in enumerate([(1, 2), (2, 1)]):
+ post = {'conditions': [c1], 'disables': [c1],
+ 'blockers': [c2], 'enables': [c2]}
+ self.post_exp_process([exp], post, i+1)
+ for i, cond_post in enumerate(cond_posts):
+ exp.set_cond_from_post(i+1, cond_post)
+ self.check_json_get(f'/day?date={date}', exp)
+ # add Todos in relation to Conditions, check consequence relations
+ self.post_exp_day([exp], {'new_todo': [1, 2]})
+ self.check_json_get(f'/day?date={date}', exp)
+
+ def test_GET_calendar(self) -> None:
+ """Test GET /calendar responses based on various inputs, DB states."""
+ # check illegal date range delimiters
+ self.check_get('/calendar?start=foo', 400)
+ self.check_get('/calendar?end=foo', 400)
+ # check default range for expected selection/order without saved days
+ exp = ExpectedGetCalendar(-1, 366)
+ self.check_json_get('/calendar', exp)
+ self.check_json_get('/calendar?start=&end=', exp)
+ # check with named days as delimiters
+ exp = ExpectedGetCalendar(-1, +1)
+ self.check_json_get('/calendar?start=yesterday&end=tomorrow', exp)
+ # check zero-element range
+ exp = ExpectedGetCalendar(+1, 0)
+ self.check_json_get('/calendar?start=tomorrow&end=today', exp)
+ # check saved day shows up in results, proven by its comment
+ start_date = _testing_date_in_n_days(-5)
+ date = _testing_date_in_n_days(-2)
+ end_date = _testing_date_in_n_days(+5)
+ exp = ExpectedGetCalendar(-5, +5)
+ self.post_exp_day([exp],
+ {'day_comment': 'foo'}, _days_n_for_date(date))
+ url = f'/calendar?start={start_date}&end={end_date}'
+ self.check_json_get(url, exp)
--- /dev/null
+"""Miscellaneous tests."""
+from typing import Callable
+from unittest import TestCase
+from tests.utils import TestCaseWithServer
+from plomtask.http import InputsParser
+from plomtask.exceptions import BadFormatException
+
+
+class TestsSansServer(TestCase):
+ """Tests that do not require DB setup or a server."""
+
+ def _test_parser(self,
+ method: Callable,
+ serialized: str,
+ expected: object,
+ method_args: list[object],
+ fails: bool = False
+ ) -> None:
+ # pylint: disable=too-many-arguments
+ parser = InputsParser(serialized)
+ if fails:
+ with self.assertRaises(BadFormatException):
+ method(parser, *method_args)
+ else:
+ self.assertEqual(expected, method(parser, *method_args))
+
+ def test_InputsParser_get_str_or_fail(self) -> None:
+ """Test InputsParser.get_str."""
+ m = InputsParser.get_str_or_fail
+ self._test_parser(m, '', 0, ['foo'], fails=True)
+ self._test_parser(m, '', 'bar', ['foo', 'bar'])
+ self._test_parser(m, 'foo=', '', ['foo'])
+ self._test_parser(m, 'foo=', '', ['foo', 'bar'])
+ self._test_parser(m, 'foo=baz', 'baz', ['foo', 'bar'])
+ self._test_parser(m, 'foo=baz&foo=quux', 'baz', ['foo', 'bar'])
+ self._test_parser(m, 'foo=baz,quux', 'baz,quux', ['foo', 'bar'])
+
+ def test_InputsParser_get_str(self) -> None:
+ """Test InputsParser.get_str."""
+ m = InputsParser.get_str
+ self._test_parser(m, '', None, ['foo'])
+ self._test_parser(m, '', 'bar', ['foo', 'bar'])
+ self._test_parser(m, 'foo=', '', ['foo'])
+ self._test_parser(m, 'foo=', '', ['foo', 'bar'])
+ self._test_parser(m, 'foo=baz', 'baz', ['foo', 'bar'])
+ self._test_parser(m, 'foo=baz&foo=quux', 'baz', ['foo', 'bar'])
+ self._test_parser(m, 'foo=baz,quux', 'baz,quux', ['foo', 'bar'])
+
+ def test_InputsParser_get_all_of_key_prefixed(self) -> None:
+ """Test InputsParser.get_all_of_key_prefixed."""
+ m = InputsParser.get_all_of_key_prefixed
+ self._test_parser(m, '', {}, [''])
+ self._test_parser(m, '', {}, ['foo'])
+ self._test_parser(m, 'foo=bar', {'foo': ['bar']}, [''])
+ self._test_parser(m, 'x=y&x=z', {'': ['y', 'z']}, ['x'])
+ self._test_parser(m, 'xx=y&xx=Z', {'x': ['y', 'Z']}, ['x'])
+ self._test_parser(m, 'xx=y', {}, ['xxx'])
+ self._test_parser(m, 'xxx=x&xxy=y&xyy=z', {'x': ['x'], 'y': ['y']},
+ ['xx'])
+
+ def test_InputsParser_get_int_or_none(self) -> None:
+ """Test InputsParser.get_int_or_none."""
+ m = InputsParser.get_int_or_none
+ self._test_parser(m, '', None, ['foo'])
+ self._test_parser(m, 'foo=', None, ['foo'])
+ self._test_parser(m, 'foo=0', 0, ['foo'])
+ self._test_parser(m, 'foo=None', 0, ['foo'], fails=True)
+ self._test_parser(m, 'foo=0.1', 0, ['foo'], fails=True)
+ self._test_parser(m, 'foo=23', 23, ['foo'])
+
+ def test_InputsParser_get_float_or_fail(self) -> None:
+ """Test InputsParser.get_float_or_fail."""
+ m = InputsParser.get_float_or_fail
+ self._test_parser(m, '', 0, ['foo'], fails=True)
+ self._test_parser(m, 'foo=', 0, ['foo'], fails=True)
+ self._test_parser(m, 'foo=bar', 0, ['foo'], fails=True)
+ self._test_parser(m, 'foo=0', 0, ['foo'])
+ self._test_parser(m, 'foo=0.1', 0.1, ['foo'])
+ self._test_parser(m, 'foo=1.23&foo=456', 1.23, ['foo'])
+
+ def test_InputsParser_get_bool(self) -> None:
+ """Test InputsParser.get_bool."""
+ m = InputsParser.get_bool
+ self._test_parser(m, '', 0, ['foo'])
+ self._test_parser(m, 'val=foo', 0, ['foo'])
+ self._test_parser(m, 'val=True', 0, ['foo'])
+ self._test_parser(m, 'foo=', 0, ['foo'])
+ self._test_parser(m, 'foo=None', 0, ['foo'])
+ self._test_parser(m, 'foo=0', 0, ['foo'])
+ self._test_parser(m, 'foo=bar', 0, ['foo'])
+ self._test_parser(m, 'foo=bar&foo=baz', 0, ['foo'])
+ self._test_parser(m, 'foo=False', 0, ['foo'])
+ self._test_parser(m, 'foo=true', 1, ['foo'])
+ self._test_parser(m, 'foo=True', 1, ['foo'])
+ self._test_parser(m, 'foo=1', 1, ['foo'])
+ self._test_parser(m, 'foo=on', 1, ['foo'])
+
+ def test_InputsParser_get_all_str(self) -> None:
+ """Test InputsParser.get_all_str."""
+ m = InputsParser.get_all_str
+ self._test_parser(m, '', [], ['foo'])
+ self._test_parser(m, 'foo=', [''], ['foo'])
+ self._test_parser(m, 'foo=bar', ['bar'], ['foo'])
+ self._test_parser(m, 'foo=bar&foo=baz', ['bar', 'baz'], ['foo'])
+
+ def test_InputsParser_get_all_int(self) -> None:
+ """Test InputsParser.get_all_int."""
+ m = InputsParser.get_all_int
+ self._test_parser(m, '', [], ['foo'])
+ self._test_parser(m, 'foo=', [], ['foo'])
+ self._test_parser(m, 'foo=', 0, ['foo', True], fails=True)
+ self._test_parser(m, 'foo=0', [0], ['foo'])
+ self._test_parser(m, 'foo=0&foo=17', [0, 17], ['foo'])
+ self._test_parser(m, 'foo=0.1&foo=17', 0, ['foo'], fails=True)
+ self._test_parser(m, 'foo=None&foo=17', 0, ['foo'], fails=True)
+
+
+class TestsWithServer(TestCaseWithServer):
+ """Tests against our HTTP server/handler (and database)."""
+
+ def test_do_GET(self) -> None:
+ """Test GET / redirect, and unknown targets failing."""
+ self.conn.request('GET', '/')
+ self.check_redirect('/day')
+ self.check_get('/foo', 404)
+
+ def test_do_POST(self) -> None:
+ """Test POST to / and other unknown targets failing."""
+ self.check_post({}, '/', 404)
+ self.check_post({}, '/foo', 404)
--- /dev/null
+"""Test Processes module."""
+from typing import Any
+from tests.utils import (TestCaseSansDB, TestCaseWithDB, TestCaseWithServer,
+ Expected)
+from plomtask.processes import Process, ProcessStep
+from plomtask.exceptions import NotFoundException
+
+
+class TestsSansDB(TestCaseSansDB):
+ """Module tests not requiring DB setup."""
+ checked_class = Process
+
+
+class TestsSansDBProcessStep(TestCaseSansDB):
+ """Module tests not requiring DB setup."""
+ checked_class = ProcessStep
+ default_init_kwargs = {'owner_id': 2, 'step_process_id': 3,
+ 'parent_step_id': 4}
+
+
+class TestsWithDB(TestCaseWithDB):
+ """Module tests requiring DB setup."""
+ checked_class = Process
+
+ def test_remove(self) -> None:
+ """Test removal of Processes and ProcessSteps."""
+ super().test_remove()
+ p1, p2, p3 = Process(None), Process(None), Process(None)
+ for p in [p1, p2, p3]:
+ p.save(self.db_conn)
+ assert isinstance(p1.id_, int)
+ assert isinstance(p2.id_, int)
+ assert isinstance(p3.id_, int)
+ step = ProcessStep(None, p2.id_, p1.id_, None)
+ p2.set_steps(self.db_conn, [step])
+ step_id = step.id_
+ p2.set_steps(self.db_conn, [])
+ with self.assertRaises(NotFoundException):
+ # check unset ProcessSteps actually cannot be found anymore
+ assert step_id is not None
+ ProcessStep.by_id(self.db_conn, step_id)
+ p1.remove(self.db_conn)
+ step = ProcessStep(None, p2.id_, p3.id_, None)
+ p2.set_steps(self.db_conn, [step])
+ step_id = step.id_
+ # check _can_ remove Process pointed to by ProcessStep.owner_id, and …
+ p2.remove(self.db_conn)
+ with self.assertRaises(NotFoundException):
+ # … being dis-owned eliminates ProcessStep
+ assert step_id is not None
+ ProcessStep.by_id(self.db_conn, step_id)
+
+
+class TestsWithDBForProcessStep(TestCaseWithDB):
+ """Module tests requiring DB setup."""
+ checked_class = ProcessStep
+ default_init_kwargs = {'owner_id': 1, 'step_process_id': 2,
+ 'parent_step_id': 3}
+
+ def setUp(self) -> None:
+ super().setUp()
+ self.p1 = Process(1)
+ self.p1.save(self.db_conn)
+
+ def test_remove(self) -> None:
+ """Test .remove and unsetting of owner's .explicit_steps entry."""
+ p2 = Process(2)
+ p2.save(self.db_conn)
+ assert isinstance(self.p1.id_, int)
+ assert isinstance(p2.id_, int)
+ step = ProcessStep(None, self.p1.id_, p2.id_, None)
+ self.p1.set_steps(self.db_conn, [step])
+ step.remove(self.db_conn)
+ self.assertEqual(self.p1.explicit_steps, [])
+ self.check_identity_with_cache_and_db([])
+
+
+class ExpectedGetProcess(Expected):
+ """Builder of expectations for GET /processes."""
+ _default_dict = {'is_new': False, 'preset_top_step': None, 'n_todos': 0}
+ _on_empty_make_temp = ('Process', 'proc_as_dict')
+
+ def __init__(self,
+ proc_id: int,
+ *args: Any, **kwargs: Any) -> None:
+ self._fields = {'process': proc_id, 'steps': []}
+ super().__init__(*args, **kwargs)
+
+ @staticmethod
+ def stepnode_as_dict(step_id: int,
+ proc_id: int,
+ seen: bool = False,
+ steps: None | list[dict[str, object]] = None,
+ is_explicit: bool = True,
+ is_suppressed: bool = False) -> dict[str, object]:
+ # pylint: disable=too-many-arguments
+ """Return JSON of ProcessStepNode to expect."""
+ return {'step': step_id,
+ 'process': proc_id,
+ 'seen': seen,
+ 'steps': steps if steps else [],
+ 'is_explicit': is_explicit,
+ 'is_suppressed': is_suppressed}
+
+ def recalc(self) -> None:
+ """Update internal dictionary by subclass-specific rules."""
+ super().recalc()
+ self._fields['process_candidates'] = self.as_ids(
+ self.lib_all('Process'))
+ self._fields['condition_candidates'] = self.as_ids(
+ self.lib_all('Condition'))
+ self._fields['owners'] = [
+ s['owner_id'] for s in self.lib_all('ProcessStep')
+ if s['step_process_id'] == self._fields['process']]
+
+
+class ExpectedGetProcesses(Expected):
+ """Builder of expectations for GET /processes."""
+ _default_dict = {'sort_by': 'title', 'pattern': ''}
+
+ def recalc(self) -> None:
+ """Update internal dictionary by subclass-specific rules."""
+ super().recalc()
+ self._fields['processes'] = self.as_ids(self.lib_all('Process'))
+
+
+class TestsWithServer(TestCaseWithServer):
+ """Module tests against our HTTP server/handler (and database)."""
+ checked_class = Process
+
+ def test_fail_POST_process(self) -> None:
+ """Test POST /process and its effect on the database."""
+ valid_post = {'title': '', 'description': '', 'effort': 1.0}
+ # check payloads lacking minimum expecteds
+ self.check_minimal_inputs('/process', valid_post)
+ # check payloads of bad data types
+ self.check_post(valid_post | {'effort': ''}, '/process', 400)
+ # check references to non-existant items
+ self.check_post(valid_post | {'conditions': [1]}, '/process', 404)
+ self.check_post(valid_post | {'disables': [1]}, '/process', 404)
+ self.check_post(valid_post | {'blockers': [1]}, '/process', 404)
+ self.check_post(valid_post | {'enables': [1]}, '/process', 404)
+ self.check_post(valid_post | {'new_top_step': 2}, '/process', 404)
+ # check deletion of non-existant
+ self.check_post({'delete': ''}, '/process?id=1', 404)
+
+ def test_basic_POST_process(self) -> None:
+ """Test basic GET/POST /process operations."""
+ # check on un-saved
+ exp = ExpectedGetProcess(1)
+ exp.force('process_candidates', [])
+ exp.set('is_new', True)
+ self.check_json_get('/process?id=1', exp)
+ # check on minimal payload post
+ exp = ExpectedGetProcess(1)
+ self.post_exp_process([exp], {}, 1)
+ self.check_json_get('/process?id=1', exp)
+ # check boolean 'calendarize'
+ self.post_exp_process([exp], {'calendarize': True}, 1)
+ self.check_json_get('/process?id=1', exp)
+ self.post_exp_process([exp], {}, 1)
+ self.check_json_get('/process?id=1', exp)
+ # check conditions posting
+ for i in range(3):
+ self.post_exp_cond([exp], {}, i+1)
+ p = {'conditions': [1, 2], 'disables': [1],
+ 'blockers': [3], 'enables': [2, 3]}
+ self.post_exp_process([exp], p, 1)
+ self.check_json_get('/process?id=1', exp)
+ # check n_todos field
+ self.post_exp_day([], {'new_todo': ['1']}, 1)
+ self.post_exp_day([], {'new_todo': ['1']}, 2)
+ exp.set('n_todos', 2)
+ self.check_json_get('/process?id=1', exp)
+ # check cannot delete if Todos to Process
+ self.check_post({'delete': ''}, '/process?id=1', 500)
+ # check cannot delete if some ProcessStep's .step_process_id
+ self.post_exp_process([exp], {}, 2)
+ self.post_exp_process([exp], {'new_top_step': 2}, 3)
+ self.check_post({'delete': ''}, '/process?id=2', 500)
+ # check successful deletion
+ self.post_exp_process([exp], {}, 4)
+ self.check_post({'delete': ''}, '/process?id=4', 302, '/processes')
+ exp = ExpectedGetProcess(4)
+ exp.set('is_new', True)
+ for i in range(3):
+ self.post_exp_cond([exp], {}, i+1)
+ self.post_exp_process([exp], {}, i+1)
+ exp.force('process_candidates', [1, 2, 3])
+ self.check_json_get('/process?id=4', exp)
+
+ def test_POST_process_steps(self) -> None:
+ """Test behavior of ProcessStep posting."""
+ # pylint: disable=too-many-statements
+ url = '/process?id=1'
+ exp = ExpectedGetProcess(1)
+ self.post_exp_process([exp], {}, 1)
+ # post first (top-level) step of proc2 to proc1 by 'step_of' in 2
+ self.post_exp_process([exp], {'step_of': 1}, 2)
+ exp.lib_set('ProcessStep',
+ [exp.procstep_as_dict(1, owner_id=1, step_process_id=2)])
+ exp.set('steps', [
+ exp.stepnode_as_dict(
+ step_id=1,
+ proc_id=2)])
+ self.check_json_get(url, exp)
+ # post empty/absent steps list to process, expect clean slate, and old
+ # step to completely disappear
+ self.post_exp_process([exp], {}, 1)
+ exp.lib_wipe('ProcessStep')
+ exp.set('steps', [])
+ self.check_json_get(url, exp)
+ # post anew (as only step yet) step of proc2 to proc1 by 'new_top_step'
+ self.post_exp_process([exp], {'new_top_step': 2}, 1)
+ exp.lib_set('ProcessStep',
+ [exp.procstep_as_dict(1, owner_id=1, step_process_id=2)])
+ self.post_exp_process([exp], {'kept_steps': [1]}, 1)
+ step_nodes = [exp.stepnode_as_dict(step_id=1, proc_id=2)]
+ exp.set('steps', step_nodes)
+ self.check_json_get(url, exp)
+ # fail on zero-step recursion
+ p_min = {'title': '', 'description': '', 'effort': 0}
+ self.check_post(p_min | {'new_top_step': 1}, url, 400)
+ self.check_post(p_min | {'step_of': 1}, url, 400)
+ # post sibling steps
+ self.post_exp_process([exp], {}, 3)
+ self.post_exp_process([exp], {'kept_steps': [1], 'new_top_step': 3}, 1)
+ exp.lib_set('ProcessStep',
+ [exp.procstep_as_dict(2, owner_id=1, step_process_id=3)])
+ step_nodes += [exp.stepnode_as_dict(step_id=2, proc_id=3)]
+ self.check_json_get(url, exp)
+ # # post implicit sub-step via post to proc2
+ self.post_exp_process([exp], {}, 4)
+ self.post_exp_process([exp], {'step_of': [1], 'new_top_step': 4}, 2)
+ exp.lib_set('ProcessStep',
+ [exp.procstep_as_dict(3, owner_id=2, step_process_id=4)])
+ step_nodes[0]['steps'] = [
+ exp.stepnode_as_dict(step_id=3, proc_id=4, is_explicit=False)]
+ self.check_json_get(url, exp)
+ # post explicit sub-step via post to proc1
+ p = {'kept_steps': [1, 2], 'new_step_to_2': 4}
+ self.post_exp_process([exp], p, 1)
+ exp.lib_set('ProcessStep', [exp.procstep_as_dict(
+ 4, owner_id=1, step_process_id=4, parent_step_id=2)])
+ step_nodes[1]['steps'] = [
+ exp.stepnode_as_dict(step_id=4, proc_id=4)]
+ self.check_json_get(url, exp)
+ # to ensure suppressed step nodes are hidden, add new step to proc4,
+ # implicitly adding it as sub-step to the proc4 steps in proc1, but
+ # suppress one of the proc4 occurences there, marking its
+ # .is_suppressed *and* hiding the new step below it
+ p = {'kept_steps': [1, 2, 4], 'suppressed_steps': [3]}
+ self.post_exp_process([exp], {'step_of': [4]}, 5)
+ self.post_exp_process([exp], p, 1)
+ exp.lib_set('ProcessStep',
+ [exp.procstep_as_dict(5, owner_id=4, step_process_id=5)])
+ assert isinstance(step_nodes[0]['steps'], list)
+ assert isinstance(step_nodes[1]['steps'], list)
+ step_nodes[0]['steps'][0]['is_suppressed'] = True
+ step_nodes[1]['steps'][0]['steps'] = [
+ exp.stepnode_as_dict(step_id=5, proc_id=5, is_explicit=False)]
+ self.check_json_get(url, exp)
+ # ensure implicit steps' non-top explicit sub-steps are shown
+ self.post_exp_process([exp], {}, 6)
+ self.post_exp_process([exp], {'kept_steps': [5], 'step_of': [1, 2],
+ 'new_step_to_5': 6}, 4)
+ exp.lib_set('ProcessStep', [exp.procstep_as_dict(
+ 6, owner_id=4, parent_step_id=5, step_process_id=6)])
+ step_nodes[1]['steps'][0]['steps'][0]['steps'] = [
+ exp.stepnode_as_dict(step_id=6, proc_id=6, is_explicit=False)]
+ self.check_json_get(url, exp)
+ # try to post sub-step to non-existing sub-step, expect it to become
+ # top-level step instead
+ p['new_step_to_9'] = 5
+ self.post_exp_process([exp], p, 1)
+ exp.lib_set('ProcessStep',
+ [exp.procstep_as_dict(7, owner_id=1, step_process_id=5)])
+ step_nodes += [
+ exp.stepnode_as_dict(step_id=7, proc_id=5)]
+ self.check_json_get(url, exp)
+ del p['new_step_to_9']
+ assert isinstance(p['kept_steps'], list)
+ p['kept_steps'] += [7]
+ # try to post sub-step to implicit sub-step, expect same result
+ p['new_step_to_5'] = 5
+ self.post_exp_process([exp], p, 1)
+ exp.lib_set('ProcessStep',
+ [exp.procstep_as_dict(8, owner_id=1, step_process_id=5)])
+ step_nodes += [
+ exp.stepnode_as_dict(step_id=8, proc_id=5)]
+ self.check_json_get(url, exp)
+ del p['new_step_to_5']
+ p['kept_steps'] += [8]
+ # post sub-step to explicit sub-step with implicit sub-step of same
+ # step process ID, expect it to eliminate/replace implicit sub-step
+ p['new_step_to_4'] = 5
+ self.post_exp_process([exp], p, 1)
+ step_nodes[1]['steps'][0]['steps'][0] = exp.stepnode_as_dict(
+ step_id=9, proc_id=5)
+ exp.lib_set('ProcessStep', [exp.procstep_as_dict(
+ 9, owner_id=1, parent_step_id=4, step_process_id=5)])
+ self.check_json_get(url, exp)
+ del p['new_step_to_4']
+ p['kept_steps'] += [9]
+ # fail on single-step recursion via top step
+ self.post_exp_process([exp], {}, 7)
+ self.post_exp_process([exp], {'new_top_step': 1}, 7)
+ exp.lib_set('ProcessStep', [exp.procstep_as_dict(
+ 10, owner_id=7, step_process_id=1)])
+ p['step_of'] = [7]
+ self.check_post(p_min | p | {'new_top_step': 7}, url, 400)
+ # fail on double-step recursion via top step
+ self.post_exp_process([exp], {}, 8)
+ self.post_exp_process([exp], {'new_top_step': 7}, 8)
+ exp.lib_set('ProcessStep', [exp.procstep_as_dict(
+ 11, owner_id=8, step_process_id=7)])
+ self.check_post(p_min | p | {'new_top_step': 8}, url, 400)
+ # fail on single- and double-step recursion via explicit sub-step
+ self.check_post(p_min | p | {'new_step_to_8': 7}, url, 400)
+ self.check_post(p_min | p | {'new_step_to_8': 8}, url, 400)
+
+ def test_fail_GET_process(self) -> None:
+ """Test invalid GET /process params."""
+ # check for invalid IDs
+ self.check_get_defaults('/process')
+ # check we catch invalid base64
+ self.check_get('/process?title_b64=foo', 400)
+ # check failure on references to unknown processes; we create Process
+ # of ID=1 here so we know the 404 comes from step_to=2 etc. (that tie
+ # the Process displayed by /process to others), not from not finding
+ # the main Process itself
+ self.post_exp_process([], {}, 1)
+ self.check_get('/process?id=1&step_to=2', 404)
+ self.check_get('/process?id=1&has_step=2', 404)
+
+ def test_GET_processes(self) -> None:
+ """Test GET /processes."""
+ # pylint: disable=too-many-statements
+ # test empty result on empty DB, default-settings on empty params
+ exp = ExpectedGetProcesses()
+ self.check_json_get('/processes', exp)
+ # test on meaningless non-empty params (incl. entirely un-used key),
+ # that 'sort_by' default to 'title' (even if set to something else, as
+ # long as without handler) and 'pattern' get preserved
+ exp.set('pattern', 'bar')
+ url = '/processes?sort_by=foo&pattern=bar&foo=x'
+ self.check_json_get(url, exp)
+ # test non-empty result, automatic (positive) sorting by title
+ for i, t in enumerate([('foo', 'oof', 1.0, []),
+ ('bar', 'rab', 1.1, [1]),
+ ('baz', 'zab', 0.9, [1, 2])]):
+ payload = {'title': t[0], 'description': t[1], 'effort': t[2],
+ 'new_top_step': t[3]}
+ self.post_exp_process([exp], payload, i+1)
+ exp.lib_set('ProcessStep',
+ [exp.procstep_as_dict(1, owner_id=2, step_process_id=1),
+ exp.procstep_as_dict(2, owner_id=3, step_process_id=1),
+ exp.procstep_as_dict(3, owner_id=3, step_process_id=2)])
+ exp.set('pattern', '')
+ self.check_filter(exp, 'processes', 'sort_by', 'title', [2, 3, 1])
+ # test other sortings
+ self.check_filter(exp, 'processes', 'sort_by', '-title', [1, 3, 2])
+ self.check_filter(exp, 'processes', 'sort_by', 'effort', [3, 1, 2])
+ self.check_filter(exp, 'processes', 'sort_by', '-effort', [2, 1, 3])
+ self.check_filter(exp, 'processes', 'sort_by', 'steps', [1, 2, 3])
+ self.check_filter(exp, 'processes', 'sort_by', '-steps', [3, 2, 1])
+ self.check_filter(exp, 'processes', 'sort_by', 'owners', [3, 2, 1])
+ self.check_filter(exp, 'processes', 'sort_by', '-owners', [1, 2, 3])
+ # test pattern matching on title
+ exp.set('sort_by', 'title')
+ exp.lib_del('Process', 1)
+ self.check_filter(exp, 'processes', 'pattern', 'ba', [2, 3])
+ # test pattern matching on description
+ exp.lib_wipe('Process')
+ exp.lib_wipe('ProcessStep')
+ self.post_exp_process([exp], {'description': 'oof', 'effort': 1.0}, 1)
+ self.check_filter(exp, 'processes', 'pattern', 'of', [1])
--- /dev/null
+"""Test Todos module."""
+from typing import Any
+from datetime import date as dt_date, timedelta
+from tests.utils import (TestCaseSansDB, TestCaseWithDB, TestCaseWithServer,
+ Expected, date_and_day_id)
+from plomtask.todos import Todo
+from plomtask.processes import Process
+from plomtask.exceptions import BadFormatException, HandledException
+
+
+class TestsWithDB(TestCaseWithDB, TestCaseSansDB):
+ """Tests requiring DB, but not server setup.
+
+ NB: We subclass TestCaseSansDB too, to run any tests there that due to any
+ Todo requiring a _saved_ Process wouldn't run without a DB.
+ """
+ checked_class = Todo
+ default_init_kwargs = {'process': None, 'is_done': False, 'day_id': 1}
+
+ def setUp(self) -> None:
+ super().setUp()
+ self.proc = Process(None)
+ self.proc.save(self.db_conn)
+ self.default_init_kwargs['process'] = self.proc
+
+ def test_Todo_by_date(self) -> None:
+ """Test findability of Todos by date."""
+ date_1, day_id_1 = date_and_day_id(1)
+ date_2, _ = date_and_day_id(2)
+ t1 = Todo(None, self.proc, False, day_id_1)
+ t1.save(self.db_conn)
+ t2 = Todo(None, self.proc, False, day_id_1)
+ t2.save(self.db_conn)
+ self.assertEqual(Todo.by_date(self.db_conn, date_1), [t1, t2])
+ self.assertEqual(Todo.by_date(self.db_conn, date_2), [])
+ with self.assertRaises(BadFormatException):
+ self.assertEqual(Todo.by_date(self.db_conn, 'foo'), [])
+
+ def test_Todo_by_date_range_with_limits(self) -> None:
+ """Test .by_date_range_with_limits."""
+ # pylint: disable=too-many-locals
+ f = Todo.by_date_range_with_limits
+ # check illegal ranges
+ legal_range = ('yesterday', 'tomorrow')
+ for i in [0, 1]:
+ for bad_date in ['foo', '2024-02-30', '2024-01-01 12:00:00']:
+ date_range_l = list(legal_range[:])
+ date_range_l[i] = bad_date
+ with self.assertRaises(HandledException):
+ f(self.db_conn, (date_range_l[0], date_range_l[1]))
+ # check empty, translation of 'yesterday' and 'tomorrow'
+ items, start, end = f(self.db_conn, legal_range)
+ self.assertEqual(items, [])
+ dt_today = dt_date.today()
+ dt_yesterday = dt_today + timedelta(days=-1)
+ dt_tomorrow = dt_today + timedelta(days=+1)
+ self.assertEqual(start, dt_yesterday.isoformat())
+ self.assertEqual(end, dt_tomorrow.isoformat())
+ # prepare dated items for non-empty results
+ kwargs = self.default_init_kwargs.copy()
+ todos = []
+ dates_and_day_ids = [date_and_day_id(i) for i in range(5)]
+ for day_id in [t[1] for t in dates_and_day_ids[1:-1]]:
+ kwargs['day_id'] = day_id
+ todos += [Todo(None, **kwargs)]
+ # check ranges still empty before saving
+ date_range = (dates_and_day_ids[1][0], dates_and_day_ids[-2][0])
+ self.assertEqual(f(self.db_conn, date_range)[0], [])
+ # check all objs displayed within interval
+ for todo in todos:
+ todo.save(self.db_conn)
+ self.assertEqual(f(self.db_conn, date_range)[0], todos)
+ # check that only displayed what exists within interval
+ date_range = (dates_and_day_ids[1][0], dates_and_day_ids[-3][0])
+ expected = [todos[0], todos[1]]
+ self.assertEqual(f(self.db_conn, date_range)[0], expected)
+ date_range = (dates_and_day_ids[-2][0], dates_and_day_ids[-1][0])
+ expected = [todos[2]]
+ self.assertEqual(f(self.db_conn, date_range)[0], expected)
+ # check that inverted interval displays nothing
+ date_range = (dates_and_day_ids[-1][0], dates_and_day_ids[0][0])
+ self.assertEqual(f(self.db_conn, date_range)[0], [])
+ # check that "today" is interpreted, and single-element interval
+ kwargs['day_id'] = (dt_today - dt_date(2000, 1, 1)).days
+ todo_today = Todo(None, **kwargs)
+ todo_today.save(self.db_conn)
+ date_range = ('today', 'today')
+ items, start, end = f(self.db_conn, date_range)
+ self.assertEqual(start, dt_today.isoformat())
+ self.assertEqual(start, end)
+ self.assertEqual(items, [todo_today])
+
+ def test_Todo_children(self) -> None:
+ """Test Todo.children relations."""
+ todo_1 = Todo(None, self.proc, False, 1)
+ todo_2 = Todo(None, self.proc, False, 1)
+ todo_2.save(self.db_conn)
+ # check un-saved Todo cannot parent
+ with self.assertRaises(HandledException):
+ todo_1.add_child(todo_2)
+ todo_1.save(self.db_conn)
+ todo_3 = Todo(None, self.proc, False, 1)
+ # check un-saved Todo cannot be parented
+ with self.assertRaises(HandledException):
+ todo_1.add_child(todo_3)
+
+
+class ExpectedGetTodo(Expected):
+ """Builder of expectations for GET /todo."""
+
+ def __init__(self,
+ todo_id: int,
+ *args: Any, **kwargs: Any) -> None:
+ self._fields = {'todo': todo_id,
+ 'steps_todo_to_process': []}
+ super().__init__(*args, **kwargs)
+
+ def recalc(self) -> None:
+ """Update internal dictionary by subclass-specific rules."""
+
+ def walk_steps(step: dict[str, Any]) -> None:
+ if not step['todo']:
+ proc_id = step['process']
+ cands = self.as_ids(
+ [t for t in todos if proc_id == t['process_id']
+ and t['id'] in self._fields['todo_candidates']])
+ self._fields['adoption_candidates_for'][str(proc_id)] = cands
+ for child in step['children']:
+ walk_steps(child)
+
+ super().recalc()
+ self.lib_wipe('Day')
+ todos = self.lib_all('Todo')
+ procs = self.lib_all('Process')
+ conds = self.lib_all('Condition')
+ self._fields['todo_candidates'] = self.as_ids(
+ [t for t in todos if t['id'] != self._fields['todo']])
+ self._fields['process_candidates'] = self.as_ids(procs)
+ self._fields['condition_candidates'] = self.as_ids(conds)
+ self._fields['adoption_candidates_for'] = {}
+ for step in self._fields['steps_todo_to_process']:
+ walk_steps(step)
+
+ @staticmethod
+ def step_as_dict(node_id: int,
+ process: int | None = None,
+ todo: int | None = None,
+ fillable: bool = False,
+ children: None | list[dict[str, object]] = None
+ ) -> dict[str, object]:
+ """Return JSON of TodoOrProcStepsNode to expect."""
+ return {'node_id': node_id,
+ 'children': children if children is not None else [],
+ 'process': process,
+ 'fillable': fillable,
+ 'todo': todo}
+
+
+class TestsWithServer(TestCaseWithServer):
+ """Tests against our HTTP server/handler (and database)."""
+ checked_class = Todo
+
+ def test_basic_fail_POST_todo(self) -> None:
+ """Test basic malformed/illegal POST /todo requests."""
+ self.post_exp_process([], {}, 1)
+ # test we cannot just POST into non-existing Todo
+ self.check_post({}, '/todo', 404)
+ self.check_post({}, '/todo?id=FOO', 400)
+ self.check_post({}, '/todo?id=0', 400)
+ self.check_post({}, '/todo?id=1', 404)
+ # test malformed values on existing Todo
+ self.post_exp_day([], {'new_todo': [1]})
+ for name in ['adopt', 'effort', 'make_full', 'make_empty',
+ 'conditions', 'disables', 'blockers', 'enables']:
+ self.check_post({name: 'x'}, '/todo?id=1', 400, '/todo')
+ for prefix in ['make_', '']:
+ for suffix in ['', 'x', '1.1']:
+ self.check_post({'step_filler_to_1': [f'{prefix}{suffix}']},
+ '/todo?id=1', 400, '/todo')
+ for suffix in ['', 'x', '1.1']:
+ self.check_post({'step_filler_to_{suffix}': ['1']},
+ '/todo?id=1', 400, '/todo')
+
+ def test_basic_POST_todo(self) -> None:
+ """Test basic POST /todo manipulations."""
+ exp = ExpectedGetTodo(1)
+ self.post_exp_process([exp], {'calendarize': 0}, 1)
+ self.post_exp_day([exp], {'new_todo': [1]})
+ # test posting naked entity at first changes nothing
+ self.check_json_get('/todo?id=1', exp)
+ self.check_post({}, '/todo?id=1')
+ self.check_json_get('/todo?id=1', exp)
+ # test posting doneness, comment, calendarization, effort
+ todo_post = {'is_done': 1, 'calendarize': 1,
+ 'comment': 'foo', 'effort': 2.3}
+ self.post_exp_todo([exp], todo_post, 1)
+ self.check_json_get('/todo?id=1', exp)
+ # test implicitly un-setting comment/calendarize/is_done by empty post
+ self.post_exp_todo([exp], {}, 1)
+ self.check_json_get('/todo?id=1', exp)
+ # test effort post can be explicitly unset by "effort":"" post
+ self.check_post({'effort': ''}, '/todo?id=1')
+ exp.lib_get('Todo', 1)['effort'] = None
+ self.check_json_get('/todo?id=1', exp)
+ # test Condition posts
+ c1_post = {'title': 'foo', 'description': 'oof', 'is_active': 0}
+ c2_post = {'title': 'bar', 'description': 'rab', 'is_active': 1}
+ self.post_exp_cond([exp], c1_post, 1)
+ self.post_exp_cond([exp], c2_post, 2)
+ self.check_json_get('/todo?id=1', exp)
+ todo_post = {'conditions': [1], 'disables': [1],
+ 'blockers': [2], 'enables': [2]}
+ self.post_exp_todo([exp], todo_post, 1)
+ self.check_json_get('/todo?id=1', exp)
+
+ def test_POST_todo_deletion(self) -> None:
+ """Test deletions via POST /todo."""
+ exp = ExpectedGetTodo(1)
+ self.post_exp_process([exp], {}, 1)
+ # test failure of deletion on non-existing Todo
+ self.check_post({'delete': ''}, '/todo?id=2', 404, '/')
+ # test deletion of existing Todo
+ self.post_exp_day([exp], {'new_todo': [1]})
+ self.check_post({'delete': ''}, '/todo?id=1', 302, '/')
+ self.check_get('/todo?id=1', 404)
+ exp.lib_del('Todo', 1)
+ # test deletion of adopted Todo
+ self.post_exp_day([exp], {'new_todo': [1]})
+ self.post_exp_day([exp], {'new_todo': [1]})
+ self.check_post({'adopt': 2}, '/todo?id=1')
+ self.check_post({'delete': ''}, '/todo?id=2', 302, '/')
+ exp.lib_del('Todo', 2)
+ self.check_get('/todo?id=2', 404)
+ self.check_json_get('/todo?id=1', exp)
+ # test deletion of adopting Todo
+ self.post_exp_day([exp], {'new_todo': [1]})
+ self.check_post({'adopt': 2}, '/todo?id=1')
+ self.check_post({'delete': ''}, '/todo?id=1', 302, '/')
+ exp.set('todo', 2)
+ exp.lib_del('Todo', 1)
+ self.check_json_get('/todo?id=2', exp)
+ # test cannot delete Todo with comment or effort
+ self.check_post({'comment': 'foo'}, '/todo?id=2')
+ self.check_post({'delete': ''}, '/todo?id=2', 500, '/')
+ self.check_post({'effort': 5}, '/todo?id=2')
+ self.check_post({'delete': ''}, '/todo?id=2', 500, '/')
+ # test deletion via effort < 0, but only if deletable
+ self.check_post({'effort': -1, 'comment': 'foo'}, '/todo?id=2')
+ self.check_post({}, '/todo?id=2')
+ self.check_get('/todo?id=2', 404)
+
+ def test_POST_todo_adoption(self) -> None:
+ """Test adoption via POST /todo with "adopt"."""
+ # post two Todos to Day, have first adopt second
+ exp = ExpectedGetTodo(1)
+ self.post_exp_process([exp], {}, 1)
+ self.post_exp_day([exp], {'new_todo': [1]})
+ self.post_exp_day([exp], {'new_todo': [1]})
+ self.post_exp_todo([exp], {'adopt': 2}, 1)
+ exp.set('steps_todo_to_process', [
+ exp.step_as_dict(node_id=1, process=None, todo=2)])
+ self.check_json_get('/todo?id=1', exp)
+ # test Todo un-adopting by just not sending an adopt
+ self.post_exp_todo([exp], {}, 1)
+ exp.set('steps_todo_to_process', [])
+ self.check_json_get('/todo?id=1', exp)
+ # test fail on trying to adopt non-existing Todo
+ self.check_post({'adopt': 3}, '/todo?id=1', 404)
+ # test cannot self-adopt
+ self.check_post({'adopt': 1}, '/todo?id=1', 400)
+ # test cannot do 1-step circular adoption
+ self.post_exp_todo([exp], {'adopt': 1}, 2)
+ self.check_post({'adopt': 2}, '/todo?id=1', 400)
+ # test cannot do 2-step circular adoption
+ self.post_exp_day([exp], {'new_todo': [1]})
+ self.post_exp_todo([exp], {'adopt': 2}, 3)
+ self.check_post({'adopt': 3}, '/todo?id=1', 400)
+ # test can adopt Todo into ProcessStep chain via its Process (with key
+ # 'step_filler' equivalent to single-element 'adopt' if intable)
+ self.post_exp_process([exp], {}, 2)
+ self.post_exp_process([exp], {}, 3)
+ self.post_exp_process([exp], {'new_top_step': [2, 3]}, 1)
+ exp.lib_set('ProcessStep',
+ [exp.procstep_as_dict(1, owner_id=1, step_process_id=2),
+ exp.procstep_as_dict(2, owner_id=1, step_process_id=3)])
+ slots = [
+ exp.step_as_dict(node_id=1, process=2, todo=None, fillable=True),
+ exp.step_as_dict(node_id=2, process=3, todo=None, fillable=True)]
+ exp.set('steps_todo_to_process', slots)
+ self.post_exp_day([exp], {'new_todo': [2]})
+ self.post_exp_day([exp], {'new_todo': [3]})
+ self.check_json_get('/todo?id=1', exp)
+ self.post_exp_todo([exp], {'step_filler_to_1': 5, 'adopt': [4]}, 1)
+ exp.lib_get('Todo', 1)['children'] += [5]
+ slots[0]['todo'] = 4
+ slots[1]['todo'] = 5
+ self.check_json_get('/todo?id=1', exp)
+ # test 'ignore' values for 'step_filler' are ignored, and intable
+ # 'step_filler' values are interchangeable with those of 'adopt'
+ todo_post = {'adopt': 5, 'step_filler_to_1': ['ignore', 4]}
+ self.check_post(todo_post, '/todo?id=1')
+ self.check_json_get('/todo?id=1', exp)
+ # test cannot adopt into non-top-level elements of chain, instead
+ # creating new top-level steps when adopting of respective Process
+ self.post_exp_process([exp], {}, 4)
+ self.post_exp_process([exp], {'new_top_step': 4, 'step_of': [1]}, 3)
+ exp.lib_set('ProcessStep',
+ [exp.procstep_as_dict(3, owner_id=3, step_process_id=4)])
+ slots[1]['children'] = [exp.step_as_dict(
+ node_id=3, process=4, todo=None, fillable=True)]
+ self.post_exp_day([exp], {'new_todo': [4]})
+ self.post_exp_todo([exp], {'adopt': [4, 5, 6]}, 1)
+ slots += [exp.step_as_dict(
+ node_id=4, process=None, todo=6, fillable=False)]
+ self.check_json_get('/todo?id=1', exp)
+
+ def test_POST_todo_make_empty(self) -> None:
+ """Test creation via POST /todo "step_filler_to"/"make"."""
+ # create chain of Processes
+ exp = ExpectedGetTodo(1)
+ self.post_exp_process([exp], {}, 1)
+ for i in range(1, 4):
+ self.post_exp_process([exp], {'new_top_step': i}, i+1)
+ exp.lib_set('ProcessStep',
+ [exp.procstep_as_dict(1, owner_id=2, step_process_id=1),
+ exp.procstep_as_dict(2, owner_id=3, step_process_id=2),
+ exp.procstep_as_dict(3, owner_id=4, step_process_id=3)])
+ # post (childless) Todo of chain end, then make empty on next in line
+ self.post_exp_day([exp], {'new_todo': [4]})
+ slots = [exp.step_as_dict(
+ node_id=1, process=3, todo=None, fillable=True,
+ children=[exp.step_as_dict(
+ node_id=2, process=2, todo=None, fillable=False,
+ children=[exp.step_as_dict(
+ node_id=3, process=1, todo=None, fillable=False)])])]
+ exp.set('steps_todo_to_process', slots)
+ self.check_json_get('/todo?id=1', exp)
+ self.check_post({'step_filler_to_1': 'make_3'}, '/todo?id=1')
+ exp.set_todo_from_post(2, {'process_id': 3})
+ exp.set_todo_from_post(1, {'process_id': 4, 'children': [2]})
+ slots[0]['todo'] = 2
+ assert isinstance(slots[0]['children'], list)
+ slots[0]['children'][0]['fillable'] = True
+ self.check_json_get('/todo?id=1', exp)
+ # make new top-level Todo without chain implied by its Process
+ self.check_post({'make_empty': 2, 'adopt': [2]}, '/todo?id=1')
+ exp.set_todo_from_post(3, {'process_id': 2})
+ exp.set_todo_from_post(1, {'process_id': 4, 'children': [2, 3]})
+ slots += [exp.step_as_dict(
+ node_id=4, process=None, todo=3, fillable=False)]
+ self.check_json_get('/todo?id=1', exp)
+ # fail on trying to call make_empty on non-existing Process
+ self.check_post({'make_full': 5}, '/todo?id=1', 404)
+
+ def test_GET_todo(self) -> None:
+ """Test GET /todo response codes."""
+ # test malformed or illegal parameter values
+ self.check_get_defaults('/todo')
+ # test all existing Processes are shown as available
+ exp = ExpectedGetTodo(1)
+ self.post_exp_process([exp], {}, 1)
+ self.post_exp_day([exp], {'new_todo': [1]})
+ self.post_exp_process([exp], {}, 2)
+ self.check_json_get('/todo?id=1', exp)
+ # test chain of Processes shown as potential step nodes
+ self.post_exp_process([exp], {}, 3)
+ self.post_exp_process([exp], {}, 4)
+ self.post_exp_process([exp], {'new_top_step': 2}, 1)
+ self.post_exp_process([exp], {'new_top_step': 3, 'step_of': [1]}, 2)
+ self.post_exp_process([exp], {'new_top_step': 4, 'step_of': [2]}, 3)
+ exp.lib_set('ProcessStep', [
+ exp.procstep_as_dict(1, owner_id=1, step_process_id=2),
+ exp.procstep_as_dict(2, owner_id=2, step_process_id=3),
+ exp.procstep_as_dict(3, owner_id=3, step_process_id=4)])
+ slots = [exp.step_as_dict(
+ node_id=1, process=2, todo=None, fillable=True,
+ children=[exp.step_as_dict(
+ node_id=2, process=3, todo=None, fillable=False,
+ children=[exp.step_as_dict(
+ node_id=3, process=4, todo=None, fillable=False)])])]
+ exp.set('steps_todo_to_process', slots)
+ self.check_json_get('/todo?id=1', exp)
+ # test display of parallel chains
+ proc_steps_post = {'new_top_step': 4, 'kept_steps': [1, 3]}
+ self.post_exp_process([], proc_steps_post, 1)
+ exp.lib_set('ProcessStep', [
+ exp.procstep_as_dict(4, owner_id=1, step_process_id=4)])
+ slots += [exp.step_as_dict(
+ node_id=4, process=4, todo=None, fillable=True)]
+ self.check_json_get('/todo?id=1', exp)
+
+ def test_POST_todo_doneness_relations(self) -> None:
+ """Test Todo.is_done Condition, adoption relations for /todo POSTs."""
+ self.post_exp_process([], {}, 1)
+ # test Todo with adoptee can only be set done if adoptee is done too
+ self.post_exp_day([], {'new_todo': [1]})
+ self.post_exp_day([], {'new_todo': [1]})
+ self.check_post({'adopt': 2, 'is_done': 1}, '/todo?id=1', 400)
+ self.check_post({'is_done': 1}, '/todo?id=2')
+ self.check_post({'adopt': 2, 'is_done': 1}, '/todo?id=1', 302)
+ # test Todo cannot be set undone with adopted Todo not done yet
+ self.check_post({'is_done': 0}, '/todo?id=2')
+ self.check_post({'adopt': 2, 'is_done': 0}, '/todo?id=1', 400)
+ # test unadoption relieves block
+ self.check_post({'is_done': 0}, '/todo?id=1', 302)
+ # test Condition being set or unset can block doneness setting
+ c1_post = {'title': '', 'description': '', 'is_active': 0}
+ c2_post = {'title': '', 'description': '', 'is_active': 1}
+ self.check_post(c1_post, '/condition', redir='/condition?id=1')
+ self.check_post(c2_post, '/condition', redir='/condition?id=2')
+ self.check_post({'conditions': [1], 'is_done': 1}, '/todo?id=1', 400)
+ self.check_post({'is_done': 1}, '/todo?id=1', 302)
+ self.check_post({'is_done': 0}, '/todo?id=1', 302)
+ self.check_post({'blockers': [2], 'is_done': 1}, '/todo?id=1', 400)
+ self.check_post({'is_done': 1}, '/todo?id=1', 302)
+ # test setting Todo doneness can set/un-set Conditions, but only on
+ # doneness change, not by mere passive state
+ self.check_post({'is_done': 0}, '/todo?id=2', 302)
+ self.check_post({'enables': [1], 'is_done': 1}, '/todo?id=1')
+ self.check_post({'conditions': [1], 'is_done': 1}, '/todo?id=2', 400)
+ self.check_post({'enables': [1], 'is_done': 0}, '/todo?id=1')
+ self.check_post({'enables': [1], 'is_done': 1}, '/todo?id=1')
+ self.check_post({'conditions': [1], 'is_done': 1}, '/todo?id=2')
+ self.check_post({'blockers': [1], 'is_done': 0}, '/todo?id=2', 400)
+ self.check_post({'disables': [1], 'is_done': 1}, '/todo?id=1')
+ self.check_post({'blockers': [1], 'is_done': 0}, '/todo?id=2', 400)
+ self.check_post({'disables': [1]}, '/todo?id=1')
+ self.check_post({'disables': [1], 'is_done': 1}, '/todo?id=1')
+ self.check_post({'blockers': [1]}, '/todo?id=2')
--- /dev/null
+"""Shared test utilities."""
+# pylint: disable=too-many-lines
+
+# included
+from __future__ import annotations
+from datetime import datetime, date as dt_date, timedelta
+from http.client import HTTPConnection
+from json import loads as json_loads, dumps as json_dumps
+from os import remove as remove_file
+from pathlib import Path
+from pprint import pprint
+from sys import path as sys_path
+from tempfile import gettempdir
+from threading import Thread
+from time import sleep
+from typing import Mapping, Any, Callable
+from unittest import TestCase
+from urllib.parse import urlencode
+from uuid import uuid4
+
+# ourselves; since we're outside repo's ./src, tell Python to look in there
+NAME_SRC_DIR = 'src'
+sys_path[0:0] = [NAME_SRC_DIR]
+# pylint: disable=wrong-import-position
+from plomtask.db import DatabaseFile, DatabaseConnection # noqa: E402
+from plomtask.http import TaskHandler, TaskServer # noqa: E402
+from plomtask.processes import Process, ProcessStep # noqa: E402
+from plomtask.conditions import Condition # noqa: E402
+from plomtask.days import Day # noqa: E402
+from plomtask.todos import Todo # noqa: E402
+from plomtask.versioned_attributes import ( # noqa: E402
+ VersionedAttribute, TIMESTAMP_FMT)
+from plomtask.exceptions import ( # noqa: E402
+ NotFoundException, HandledException)
+
+
+# to look for schema file in ./src/migrations rather than ./migrations
+DatabaseFile.path_schema = Path(NAME_SRC_DIR
+ ).joinpath(DatabaseFile.path_schema)
+
+
+_VERSIONED_VALS: dict[str,
+ list[str] | list[float]] = {'str': ['A', 'B'],
+ 'float': [0.3, 1.1]}
+_VALID_TRUES = {True, 'True', 'true', '1', 'on'}
+
+
+def dt_date_from_day_id(day_id: int) -> dt_date:
+ """Return datetime.date of adding day_id days to 2000-01-01."""
+ return dt_date(2000, 1, 1) + timedelta(days=day_id)
+
+
+def date_and_day_id(day_id: int) -> tuple[str, int]:
+ """Interpet day_id as n of days since millennium, return (date, day_id)."""
+ return dt_date_from_day_id(day_id).isoformat(), day_id
+
+
+class TestCaseAugmented(TestCase):
+ """Tester core providing helpful basic internal decorators and methods."""
+ checked_class: Any
+ default_init_kwargs: dict[str, Any] = {}
+
+ @staticmethod
+ def _run_on_versioned_attributes(f: Callable[..., None]
+ ) -> Callable[..., None]:
+ def wrapper(self: TestCase) -> None:
+ assert isinstance(self, TestCaseAugmented)
+ for attr_name in self.checked_class.to_save_versioned():
+ default = self.checked_class.versioned_defaults[attr_name]
+ owner = self.checked_class(None, **self.default_init_kwargs)
+ attr = getattr(owner, attr_name)
+ to_set = _VERSIONED_VALS[attr.value_type_name]
+ f(self, owner, attr_name, attr, default, to_set)
+ return wrapper
+
+ @classmethod
+ def _run_if_sans_db(cls, f: Callable[..., None]) -> Callable[..., None]:
+ def wrapper(self: TestCaseSansDB) -> None:
+ if issubclass(cls, TestCaseSansDB):
+ f(self)
+ return wrapper
+
+ @classmethod
+ def _run_if_with_db_but_not_server(cls,
+ f: Callable[..., None]
+ ) -> Callable[..., None]:
+ def wrapper(self: TestCaseWithDB) -> None:
+ if issubclass(cls, TestCaseWithDB) and\
+ not issubclass(cls, TestCaseWithServer):
+ f(self)
+ return wrapper
+
+ @classmethod
+ def _make_from_defaults(cls, id_: int | None) -> Any:
+ return cls.checked_class(id_, **cls.default_init_kwargs)
+
+
+class TestCaseSansDB(TestCaseAugmented):
+ """Tests requiring no DB setup."""
+ _legal_ids: list[int] = [1, 5]
+ _illegal_ids: list[int] = [0]
+
+ @TestCaseAugmented._run_if_sans_db
+ def test_id_validation(self) -> None:
+ """Test .id_ validation/setting."""
+ for id_ in self._illegal_ids:
+ with self.assertRaises(HandledException):
+ self._make_from_defaults(id_)
+ for id_ in self._legal_ids:
+ obj = self._make_from_defaults(id_)
+ self.assertEqual(obj.id_, id_)
+
+ @TestCaseAugmented._run_if_sans_db
+ @TestCaseAugmented._run_on_versioned_attributes
+ def test_versioned_set(self,
+ _: Any,
+ __: str,
+ attr: VersionedAttribute,
+ default: str | float,
+ to_set: list[str] | list[float]
+ ) -> None:
+ """Test VersionedAttribute.set() behaves as expected."""
+ attr.set(default)
+ self.assertEqual(list(attr.history.values()), [default])
+ # check same value does not get set twice in a row,
+ # and that not even its timestamp get updated
+ timestamp = list(attr.history.keys())[0]
+ attr.set(default)
+ self.assertEqual(list(attr.history.values()), [default])
+ self.assertEqual(list(attr.history.keys())[0], timestamp)
+ # check that different value _will_ be set/added
+ attr.set(to_set[0])
+ timesorted_vals = [attr.history[t] for
+ t in sorted(attr.history.keys())]
+ expected = [default, to_set[0]]
+ self.assertEqual(timesorted_vals, expected)
+ # check that a previously used value can be set if not most recent
+ attr.set(default)
+ timesorted_vals = [attr.history[t] for
+ t in sorted(attr.history.keys())]
+ expected = [default, to_set[0], default]
+ self.assertEqual(timesorted_vals, expected)
+ # again check for same value not being set twice in a row, even for
+ # later items
+ attr.set(to_set[1])
+ timesorted_vals = [attr.history[t] for
+ t in sorted(attr.history.keys())]
+ expected = [default, to_set[0], default, to_set[1]]
+ self.assertEqual(timesorted_vals, expected)
+ attr.set(to_set[1])
+ self.assertEqual(timesorted_vals, expected)
+
+ @TestCaseAugmented._run_if_sans_db
+ @TestCaseAugmented._run_on_versioned_attributes
+ def test_versioned_newest(self,
+ _: Any,
+ __: str,
+ attr: VersionedAttribute,
+ default: str | float,
+ to_set: list[str] | list[float]
+ ) -> None:
+ """Test VersionedAttribute.newest."""
+ # check .newest on empty history returns .default
+ self.assertEqual(attr.newest, default)
+ # check newest element always returned
+ for v in [to_set[0], to_set[1]]:
+ attr.set(v)
+ self.assertEqual(attr.newest, v)
+ # check newest element returned even if also early value
+ attr.set(default)
+ self.assertEqual(attr.newest, default)
+
+ @TestCaseAugmented._run_if_sans_db
+ @TestCaseAugmented._run_on_versioned_attributes
+ def test_versioned_at(self,
+ _: Any,
+ __: str,
+ attr: VersionedAttribute,
+ default: str | float,
+ to_set: list[str] | list[float]
+ ) -> None:
+ """Test .at() returns values nearest to queried time, or default."""
+ # check .at() return default on empty history
+ timestamp_a = datetime.now().strftime(TIMESTAMP_FMT)
+ self.assertEqual(attr.at(timestamp_a), default)
+ # check value exactly at timestamp returned
+ attr.set(to_set[0])
+ timestamp_b = list(attr.history.keys())[0]
+ self.assertEqual(attr.at(timestamp_b), to_set[0])
+ # check earliest value returned if exists, rather than default
+ self.assertEqual(attr.at(timestamp_a), to_set[0])
+ # check reverts to previous value for timestamps not indexed
+ sleep(0.00001)
+ timestamp_between = datetime.now().strftime(TIMESTAMP_FMT)
+ sleep(0.00001)
+ attr.set(to_set[1])
+ timestamp_c = sorted(attr.history.keys())[-1]
+ self.assertEqual(attr.at(timestamp_c), to_set[1])
+ self.assertEqual(attr.at(timestamp_between), to_set[0])
+ sleep(0.00001)
+ timestamp_after_c = datetime.now().strftime(TIMESTAMP_FMT)
+ self.assertEqual(attr.at(timestamp_after_c), to_set[1])
+
+
+class TestCaseWithDB(TestCaseAugmented):
+ """Module tests not requiring DB setup."""
+ _default_ids: tuple[int, int, int] = (1, 2, 3)
+
+ def setUp(self) -> None:
+ Condition.empty_cache()
+ Day.empty_cache()
+ Process.empty_cache()
+ ProcessStep.empty_cache()
+ Todo.empty_cache()
+ db_path = Path(gettempdir()).joinpath(f'test_db:{uuid4()}')
+ DatabaseFile.create(db_path)
+ self.db_file = DatabaseFile(db_path)
+ self.db_conn = DatabaseConnection(self.db_file)
+
+ def tearDown(self) -> None:
+ self.db_conn.close()
+ remove_file(self.db_file.path)
+
+ def _load_from_db(self, id_: int) -> list[object]:
+ db_found: list[object] = []
+ for row in self.db_conn.row_where(self.checked_class.table_name,
+ 'id', id_):
+ db_found += [self.checked_class.from_table_row(self.db_conn,
+ row)]
+ return db_found
+
+ def _change_obj(self, obj: object) -> str:
+ attr_name: str = self.checked_class.to_save_simples[-1]
+ attr = getattr(obj, attr_name)
+ new_attr: str | int | float | bool
+ if isinstance(attr, (int, float)):
+ new_attr = attr + 1
+ elif isinstance(attr, str):
+ new_attr = attr + '_'
+ elif isinstance(attr, bool):
+ new_attr = not attr
+ setattr(obj, attr_name, new_attr)
+ return attr_name
+
+ def check_identity_with_cache_and_db(self, content: list[Any]) -> None:
+ """Test both cache and DB equal content."""
+ expected_cache = {}
+ for item in content:
+ expected_cache[item.id_] = item
+ self.assertEqual(self.checked_class.get_cache(), expected_cache)
+ hashes_content = [hash(x) for x in content]
+ db_found: list[Any] = []
+ for item in content:
+ db_found += self._load_from_db(item.id_)
+ hashes_db_found = [hash(x) for x in db_found]
+ self.assertEqual(sorted(hashes_content), sorted(hashes_db_found))
+
+ @TestCaseAugmented._run_if_with_db_but_not_server
+ @TestCaseAugmented._run_on_versioned_attributes
+ def test_saving_versioned_attributes(self,
+ owner: Any,
+ attr_name: str,
+ attr: VersionedAttribute,
+ _: str | float,
+ to_set: list[str] | list[float]
+ ) -> None:
+ """Test storage and initialization of versioned attributes."""
+
+ def retrieve_attr_vals(attr: VersionedAttribute) -> list[object]:
+ attr_vals_saved: list[object] = []
+ for row in self.db_conn.row_where(attr.table_name, 'parent',
+ owner.id_):
+ attr_vals_saved += [row[2]]
+ return attr_vals_saved
+
+ attr.set(to_set[0])
+ # check that without attr.save() no rows in DB
+ rows = self.db_conn.row_where(attr.table_name, 'parent', owner.id_)
+ self.assertEqual([], rows)
+ # fail saving attributes on non-saved owner
+ with self.assertRaises(NotFoundException):
+ attr.save(self.db_conn)
+ # check owner.save() created entries as expected in attr table
+ owner.save(self.db_conn)
+ attr_vals_saved = retrieve_attr_vals(attr)
+ self.assertEqual([to_set[0]], attr_vals_saved)
+ # check changing attr val without save affects owner in memory …
+ attr.set(to_set[1])
+ cmp_attr = getattr(owner, attr_name)
+ self.assertEqual(to_set, list(cmp_attr.history.values()))
+ self.assertEqual(cmp_attr.history, attr.history)
+ # … but does not yet affect DB
+ attr_vals_saved = retrieve_attr_vals(attr)
+ self.assertEqual([to_set[0]], attr_vals_saved)
+ # check individual attr.save also stores new val to DB
+ attr.save(self.db_conn)
+ attr_vals_saved = retrieve_attr_vals(attr)
+ self.assertEqual(to_set, attr_vals_saved)
+
+ @TestCaseAugmented._run_if_with_db_but_not_server
+ def test_saving_and_caching(self) -> None:
+ """Test effects of .cache() and .save()."""
+ id1 = self._default_ids[0]
+ # check failure to cache without ID (if None-ID input possible)
+ obj0 = self._make_from_defaults(None)
+ with self.assertRaises(HandledException):
+ obj0.cache()
+ # check mere object init itself doesn't even store in cache
+ obj1 = self._make_from_defaults(id1)
+ self.assertEqual(self.checked_class.get_cache(), {})
+ # check .cache() fills cache, but not DB
+ obj1.cache()
+ self.assertEqual(self.checked_class.get_cache(), {id1: obj1})
+ found_in_db = self._load_from_db(id1)
+ self.assertEqual(found_in_db, [])
+ # check .save() sets ID, updates cache, and fills DB
+ # (expect ID to be set to id1, despite obj1 already having that as ID:
+ # it's generated by cursor.lastrowid on the DB table, and with obj1
+ # not written there, obj2 should get it first!)
+ obj2 = self._make_from_defaults(None)
+ obj2.save(self.db_conn)
+ self.assertEqual(self.checked_class.get_cache(), {id1: obj2})
+ # NB: we'll only compare hashes because obj2 itself disappears on
+ # .from_table_row-triggered database reload
+ obj2_hash = hash(obj2)
+ found_in_db += self._load_from_db(id1)
+ self.assertEqual([hash(o) for o in found_in_db], [obj2_hash])
+ # check we cannot overwrite obj2 with obj1 despite its same ID,
+ # since it has disappeared now
+ with self.assertRaises(HandledException):
+ obj1.save(self.db_conn)
+
+ @TestCaseAugmented._run_if_with_db_but_not_server
+ def test_by_id(self) -> None:
+ """Test .by_id()."""
+ id1, id2, _ = self._default_ids
+ # check failure if not yet saved
+ obj1 = self._make_from_defaults(id1)
+ with self.assertRaises(NotFoundException):
+ self.checked_class.by_id(self.db_conn, id1)
+ # check identity of cached and retrieved
+ obj1.cache()
+ self.assertEqual(obj1, self.checked_class.by_id(self.db_conn, id1))
+ # check identity of saved and retrieved
+ obj2 = self._make_from_defaults(id2)
+ obj2.save(self.db_conn)
+ self.assertEqual(obj2, self.checked_class.by_id(self.db_conn, id2))
+
+ @TestCaseAugmented._run_if_with_db_but_not_server
+ def test_by_id_or_create(self) -> None:
+ """Test .by_id_or_create."""
+ # check .by_id_or_create fails if wrong class
+ if not self.checked_class.can_create_by_id:
+ with self.assertRaises(HandledException):
+ self.checked_class.by_id_or_create(self.db_conn, None)
+ return
+ # check ID input of None creates, on saving, ID=1,2,…
+ for n in range(2):
+ item = self.checked_class.by_id_or_create(self.db_conn, None)
+ self.assertEqual(item.id_, None)
+ item.save(self.db_conn)
+ self.assertEqual(item.id_, n+1)
+ # check .by_id_or_create acts like normal instantiation (sans saving)
+ id_ = self._default_ids[2]
+ item = self.checked_class.by_id_or_create(self.db_conn, id_)
+ self.assertEqual(item.id_, id_)
+ with self.assertRaises(NotFoundException):
+ self.checked_class.by_id(self.db_conn, item.id_)
+ self.assertEqual(self.checked_class(item.id_), item)
+
+ @TestCaseAugmented._run_if_with_db_but_not_server
+ def test_from_table_row(self) -> None:
+ """Test .from_table_row() properly reads in class directly from DB."""
+ obj = self._make_from_defaults(self._default_ids[0])
+ obj.save(self.db_conn)
+ for row in self.db_conn.row_where(self.checked_class.table_name,
+ 'id', obj.id_):
+ # check .from_table_row reproduces state saved, no matter if obj
+ # later changed (with caching even)
+ # NB: we'll only compare hashes because obj itself disappears on
+ # .from_table_row-triggered database reload
+ hash_original = hash(obj)
+ attr_name = self._change_obj(obj)
+ obj.cache()
+ to_cmp = getattr(obj, attr_name)
+ retrieved = self.checked_class.from_table_row(self.db_conn, row)
+ self.assertNotEqual(to_cmp, getattr(retrieved, attr_name))
+ self.assertEqual(hash_original, hash(retrieved))
+ # check cache contains what .from_table_row just produced
+ self.assertEqual({retrieved.id_: retrieved},
+ self.checked_class.get_cache())
+
+ @TestCaseAugmented._run_if_with_db_but_not_server
+ @TestCaseAugmented._run_on_versioned_attributes
+ def test_versioned_history_from_row(self,
+ owner: Any,
+ _: str,
+ attr: VersionedAttribute,
+ default: str | float,
+ to_set: list[str] | list[float]
+ ) -> None:
+ """"Test VersionedAttribute.history_from_row() knows its DB rows."""
+ attr.set(to_set[0])
+ attr.set(to_set[1])
+ owner.save(self.db_conn)
+ # make empty VersionedAttribute, fill from rows, compare to owner's
+ for row in self.db_conn.row_where(owner.table_name, 'id', owner.id_):
+ loaded_attr = VersionedAttribute(owner, attr.table_name, default)
+ for row in self.db_conn.row_where(attr.table_name, 'parent',
+ owner.id_):
+ loaded_attr.history_from_row(row)
+ self.assertEqual(len(attr.history.keys()),
+ len(loaded_attr.history.keys()))
+ for timestamp, value in attr.history.items():
+ self.assertEqual(value, loaded_attr.history[timestamp])
+
+ @TestCaseAugmented._run_if_with_db_but_not_server
+ def test_all(self) -> None:
+ """Test .all() and its relation to cache and savings."""
+ id1, id2, id3 = self._default_ids
+ item1 = self._make_from_defaults(id1)
+ item2 = self._make_from_defaults(id2)
+ item3 = self._make_from_defaults(id3)
+ # check .all() returns empty list on un-cached items
+ self.assertEqual(self.checked_class.all(self.db_conn), [])
+ # check that all() shows only cached/saved items
+ item1.cache()
+ item3.save(self.db_conn)
+ self.assertEqual(sorted(self.checked_class.all(self.db_conn)),
+ sorted([item1, item3]))
+ item2.save(self.db_conn)
+ self.assertEqual(sorted(self.checked_class.all(self.db_conn)),
+ sorted([item1, item2, item3]))
+
+ @TestCaseAugmented._run_if_with_db_but_not_server
+ def test_singularity(self) -> None:
+ """Test pointers made for single object keep pointing to it."""
+ id1 = self._default_ids[0]
+ obj = self._make_from_defaults(id1)
+ obj.save(self.db_conn)
+ # change object, expect retrieved through .by_id to carry change
+ attr_name = self._change_obj(obj)
+ new_attr = getattr(obj, attr_name)
+ retrieved = self.checked_class.by_id(self.db_conn, id1)
+ self.assertEqual(new_attr, getattr(retrieved, attr_name))
+
+ @TestCaseAugmented._run_if_with_db_but_not_server
+ @TestCaseAugmented._run_on_versioned_attributes
+ def test_versioned_singularity(self,
+ owner: Any,
+ attr_name: str,
+ attr: VersionedAttribute,
+ _: str | float,
+ to_set: list[str] | list[float]
+ ) -> None:
+ """Test singularity of VersionedAttributes on saving."""
+ owner.save(self.db_conn)
+ # change obj, expect retrieved through .by_id to carry change
+ attr.set(to_set[0])
+ retrieved = self.checked_class.by_id(self.db_conn, owner.id_)
+ attr_retrieved = getattr(retrieved, attr_name)
+ self.assertEqual(attr.history, attr_retrieved.history)
+
+ @TestCaseAugmented._run_if_with_db_but_not_server
+ def test_remove(self) -> None:
+ """Test .remove() effects on DB and cache."""
+ obj = self._make_from_defaults(self._default_ids[0])
+ # check removal only works after saving
+ with self.assertRaises(HandledException):
+ obj.remove(self.db_conn)
+ obj.save(self.db_conn)
+ obj.remove(self.db_conn)
+ # check access to obj fails after removal
+ with self.assertRaises(HandledException):
+ print(obj.id_)
+ # check DB and cache now empty
+ self.check_identity_with_cache_and_db([])
+
+
+class Expected:
+ """Builder of (JSON-like) dict to compare against responses of test server.
+
+ Collects all items and relations we expect expressed in the server's JSON
+ responses and puts them into the proper json.dumps-friendly dict structure,
+ accessibla via .as_dict, to compare them in TestsWithServer.check_json_get.
+
+ On its own provides for .as_dict output only {"_library": …}, initialized
+ from .__init__ and to be directly manipulated via the .lib* methods.
+ Further structures of the expected response may be added and kept
+ up-to-date by subclassing .__init__, .recalc, and .d.
+
+ NB: Lots of expectations towards server behavior will be made explicit here
+ (or in the subclasses) rather than in the actual TestCase methods' code.
+ """
+ _default_dict: dict[str, Any]
+ _forced: dict[str, Any]
+ _fields: dict[str, Any]
+ _on_empty_make_temp: tuple[str, str]
+
+ def __init__(self) -> None:
+ for name in ['_default_dict', '_fields', '_forced']:
+ if not hasattr(self, name):
+ setattr(self, name, {})
+ self._lib: dict[str, dict[int, dict[str, Any]]] = {}
+ for k, v in self._default_dict.items():
+ if k not in self._fields:
+ self._fields[k] = v
+
+ def recalc(self) -> None:
+ """Update internal dictionary by subclass-specific rules."""
+ todos = self.lib_all('Todo')
+ for todo in todos:
+ todo['parents'] = []
+ for todo in todos:
+ for child_id in todo['children']:
+ self.lib_get('Todo', child_id)['parents'] += [todo['id']]
+ todo['children'].sort()
+ procsteps = self.lib_all('ProcessStep')
+ procs = self.lib_all('Process')
+ for proc in procs:
+ proc['explicit_steps'] = [s['id'] for s in procsteps
+ if s['owner_id'] == proc['id']]
+
+ @property
+ def as_dict(self) -> dict[str, Any]:
+ """Return dict to compare against test server JSON responses."""
+ make_temp = False
+ if hasattr(self, '_on_empty_make_temp'):
+ category, dicter = getattr(self, '_on_empty_make_temp')
+ id_ = self._fields[category.lower()]
+ make_temp = not bool(self.lib_get(category, id_))
+ if make_temp:
+ self.lib_set(category, [getattr(self, dicter)(id_)])
+ self.recalc()
+ d = {'_library': self._lib}
+ for k, v in self._fields.items():
+ # we expect everything sortable to be sorted
+ if isinstance(v, list) and k not in self._forced:
+ # NB: if we don't test for v being list, sorted() on an empty
+ # dict may return an empty list
+ try:
+ v = sorted(v)
+ except TypeError:
+ pass
+ d[k] = v
+ for k, v in self._forced.items():
+ d[k] = v
+ if make_temp:
+ json = json_dumps(d)
+ id_ = id_ if id_ is not None else -1
+ self.lib_del(category, id_)
+ d = json_loads(json)
+ return d
+
+ def lib_get(self, category: str, id_: int) -> dict[str, Any]:
+ """From library, return item of category and id_, or empty dict."""
+ if category in self._lib and id_ in self._lib[category]:
+ return self._lib[category][id_]
+ return {}
+
+ def lib_all(self, category: str) -> list[dict[str, Any]]:
+ """From library, return items of category, or [] if none."""
+ if category in self._lib:
+ return list(self._lib[category].values())
+ return []
+
+ def lib_set(self, category: str, items: list[dict[str, object]]) -> None:
+ """Update library for category with items."""
+ if category not in self._lib:
+ self._lib[category] = {}
+ for item in items:
+ id_ = item['id'] if item['id'] is not None else -1
+ assert isinstance(id_, int)
+ self._lib[category][id_] = item
+
+ def lib_del(self, category: str, id_: int) -> None:
+ """Remove category element of id_ from library."""
+ del self._lib[category][id_]
+ if 0 == len(self._lib[category]):
+ del self._lib[category]
+
+ def lib_wipe(self, category: str) -> None:
+ """Remove category from library."""
+ if category in self._lib:
+ del self._lib[category]
+
+ def set(self, field_name: str, value: object) -> None:
+ """Set top-level .as_dict field."""
+ self._fields[field_name] = value
+
+ def force(self, field_name: str, value: object) -> None:
+ """Set ._forced field to ensure value in .as_dict."""
+ self._forced[field_name] = value
+
+ @staticmethod
+ def as_ids(items: list[dict[str, Any]]) -> list[int]:
+ """Return list of only 'id' fields of items."""
+ return [item['id'] for item in items]
+
+ @staticmethod
+ def day_as_dict(id_: int, comment: str = '') -> dict[str, object]:
+ """Return JSON of Day to expect."""
+ return {'id': id_, 'comment': comment, 'todos': []}
+
+ def set_day_from_post(self, id_: int, d: dict[str, Any]) -> None:
+ """Set Day of id_ in library based on POST dict d."""
+ day = self.day_as_dict(id_)
+ for k, v in d.items():
+ if 'day_comment' == k:
+ day['comment'] = v
+ elif 'new_todo' == k:
+ next_id = 1
+ for todo in self.lib_all('Todo'):
+ if next_id <= todo['id']:
+ next_id = todo['id'] + 1
+ for proc_id in sorted([id_ for id_ in v if id_]):
+ todo = self.todo_as_dict(next_id, proc_id, id_)
+ self.lib_set('Todo', [todo])
+ next_id += 1
+ elif 'done' == k:
+ for todo_id in v:
+ self.lib_get('Todo', todo_id)['is_done'] = True
+ elif 'todo_id' == k:
+ for i, todo_id in enumerate(v):
+ t = self.lib_get('Todo', todo_id)
+ if 'comment' in d:
+ t['comment'] = d['comment'][i]
+ if 'effort' in d:
+ effort = d['effort'][i] if d['effort'][i] else None
+ t['effort'] = effort
+ self.lib_set('Day', [day])
+
+ @staticmethod
+ def cond_as_dict(id_: int = 1,
+ is_active: bool = False,
+ title: None | str = None,
+ description: None | str = None,
+ ) -> dict[str, object]:
+ """Return JSON of Condition to expect."""
+ versioned: dict[str, dict[str, object]]
+ versioned = {'title': {}, 'description': {}}
+ if title is not None:
+ versioned['title']['0'] = title
+ if description is not None:
+ versioned['description']['0'] = description
+ return {'id': id_, 'is_active': is_active, '_versioned': versioned}
+
+ def set_cond_from_post(self, id_: int, d: dict[str, Any]) -> None:
+ """Set Condition of id_ in library based on POST dict d."""
+ if 'delete' in d:
+ self.lib_del('Condition', id_)
+ return
+ cond = self.lib_get('Condition', id_)
+ if cond:
+ cond['is_active'] = 'is_active' in d and\
+ d['is_active'] in _VALID_TRUES
+ for category in ['title', 'description']:
+ history = cond['_versioned'][category]
+ if len(history) > 0:
+ last_i = sorted([int(k) for k in history.keys()])[-1]
+ if d[category] != history[str(last_i)]:
+ history[str(last_i + 1)] = d[category]
+ else:
+ history['0'] = d[category]
+ else:
+ cond = self.cond_as_dict(id_, **d)
+ self.lib_set('Condition', [cond])
+
+ @staticmethod
+ def todo_as_dict(id_: int = 1,
+ process_id: int = 1,
+ day_id: int = 1,
+ conditions: None | list[int] = None,
+ disables: None | list[int] = None,
+ blockers: None | list[int] = None,
+ enables: None | list[int] = None,
+ calendarize: bool = False,
+ comment: str = '',
+ is_done: bool = False,
+ effort: float | None = None,
+ children: list[int] | None = None,
+ parents: list[int] | None = None,
+ ) -> dict[str, object]:
+ """Return JSON of Todo to expect."""
+ # pylint: disable=too-many-arguments
+ d = {'id': id_,
+ 'day_id': day_id,
+ 'process_id': process_id,
+ 'is_done': is_done,
+ 'calendarize': calendarize,
+ 'comment': comment,
+ 'children': children if children else [],
+ 'parents': parents if parents else [],
+ 'effort': effort,
+ 'conditions': conditions if conditions else [],
+ 'disables': disables if disables else [],
+ 'blockers': blockers if blockers else [],
+ 'enables': enables if enables else []}
+ return d
+
+ def set_todo_from_post(self, id_: int, d: dict[str, Any]) -> None:
+ """Set Todo of id_ in library based on POST dict d."""
+ corrected_kwargs: dict[str, Any] = {
+ 'children': [], 'is_done': 0, 'calendarize': 0, 'comment': ''}
+ for k, v in d.items():
+ if k.startswith('step_filler_to_'):
+ continue
+ if 'adopt' == k:
+ new_children = v if isinstance(v, list) else [v]
+ corrected_kwargs['children'] += new_children
+ continue
+ if k in {'is_done', 'calendarize'} and v in _VALID_TRUES:
+ v = True
+ corrected_kwargs[k] = v
+ todo = self.lib_get('Todo', id_)
+ if todo:
+ for k, v in corrected_kwargs.items():
+ todo[k] = v
+ else:
+ todo = self.todo_as_dict(id_, **corrected_kwargs)
+ self.lib_set('Todo', [todo])
+
+ @staticmethod
+ def procstep_as_dict(id_: int,
+ owner_id: int,
+ step_process_id: int,
+ parent_step_id: int | None = None
+ ) -> dict[str, object]:
+ """Return JSON of ProcessStep to expect."""
+ return {'id': id_,
+ 'owner_id': owner_id,
+ 'step_process_id': step_process_id,
+ 'parent_step_id': parent_step_id}
+
+ @staticmethod
+ def proc_as_dict(id_: int = 1,
+ title: None | str = None,
+ description: None | str = None,
+ effort: None | float = None,
+ conditions: None | list[int] = None,
+ disables: None | list[int] = None,
+ blockers: None | list[int] = None,
+ enables: None | list[int] = None,
+ explicit_steps: None | list[int] = None,
+ suppressed_steps: None | list[int] = None
+ ) -> dict[str, object]:
+ """Return JSON of Process to expect."""
+ # pylint: disable=too-many-arguments
+ versioned: dict[str, dict[str, object]]
+ versioned = {'title': {}, 'description': {}, 'effort': {}}
+ if title is not None:
+ versioned['title']['0'] = title
+ if description is not None:
+ versioned['description']['0'] = description
+ if effort is not None:
+ versioned['effort']['0'] = effort
+ d = {'id': id_,
+ 'calendarize': False,
+ 'suppressed_steps': suppressed_steps if suppressed_steps else [],
+ 'explicit_steps': explicit_steps if explicit_steps else [],
+ '_versioned': versioned,
+ 'conditions': conditions if conditions else [],
+ 'disables': disables if disables else [],
+ 'enables': enables if enables else [],
+ 'blockers': blockers if blockers else []}
+ return d
+
+ def set_proc_from_post(self, id_: int, d: dict[str, Any]) -> None:
+ """Set Process of id_ in library based on POST dict d."""
+ proc = self.lib_get('Process', id_)
+ if proc:
+ for category in ['title', 'description', 'effort']:
+ history = proc['_versioned'][category]
+ if len(history) > 0:
+ last_i = sorted([int(k) for k in history.keys()])[-1]
+ if d[category] != history[str(last_i)]:
+ history[str(last_i + 1)] = d[category]
+ else:
+ history['0'] = d[category]
+ else:
+ proc = self.proc_as_dict(id_,
+ d['title'], d['description'], d['effort'])
+ ignore = {'title', 'description', 'effort', 'new_top_step', 'step_of',
+ 'kept_steps'}
+ proc['calendarize'] = False
+ for k, v in d.items():
+ if k in ignore\
+ or k.startswith('step_') or k.startswith('new_step_to'):
+ continue
+ if k in {'calendarize'} and v in _VALID_TRUES:
+ v = True
+ elif k in {'suppressed_steps', 'explicit_steps', 'conditions',
+ 'disables', 'enables', 'blockers'}:
+ if not isinstance(v, list):
+ v = [v]
+ proc[k] = v
+ self.lib_set('Process', [proc])
+
+
+class TestCaseWithServer(TestCaseWithDB):
+ """Module tests against our HTTP server/handler (and database)."""
+
+ def setUp(self) -> None:
+ super().setUp()
+ self.httpd = TaskServer(self.db_file, ('localhost', 0), TaskHandler)
+ self.server_thread = Thread(target=self.httpd.serve_forever)
+ self.server_thread.daemon = True
+ self.server_thread.start()
+ self.conn = HTTPConnection(str(self.httpd.server_address[0]),
+ self.httpd.server_address[1])
+ self.httpd.render_mode = 'json'
+
+ def tearDown(self) -> None:
+ self.httpd.shutdown()
+ self.httpd.server_close()
+ self.server_thread.join()
+ super().tearDown()
+
+ def post_exp_cond(self,
+ exps: list[Expected],
+ payload: dict[str, object],
+ id_: int = 1,
+ post_to_id: bool = True,
+ redir_to_id: bool = True
+ ) -> None:
+ """POST /condition(s), appropriately update Expecteds."""
+ # pylint: disable=too-many-arguments
+ target = f'/condition?id={id_}' if post_to_id else '/condition'
+ redir = f'/condition?id={id_}' if redir_to_id else '/conditions'
+ if 'title' not in payload:
+ payload['title'] = 'foo'
+ if 'description' not in payload:
+ payload['description'] = 'foo'
+ self.check_post(payload, target, redir=redir)
+ for exp in exps:
+ exp.set_cond_from_post(id_, payload)
+
+ def post_exp_day(self,
+ exps: list[Expected],
+ payload: dict[str, Any],
+ day_id: int = 1
+ ) -> None:
+ """POST /day, appropriately update Expecteds."""
+ if 'make_type' not in payload:
+ payload['make_type'] = 'empty'
+ if 'day_comment' not in payload:
+ payload['day_comment'] = ''
+ date = dt_date_from_day_id(day_id).isoformat()
+ target = f'/day?date={date}'
+ redir_to = f'{target}&make_type={payload["make_type"]}'
+ self.check_post(payload, target, 302, redir_to)
+ for exp in exps:
+ exp.set_day_from_post(day_id, payload)
+
+ def post_exp_process(self,
+ exps: list[Expected],
+ payload: dict[str, Any],
+ id_: int,
+ ) -> dict[str, object]:
+ """POST /process, appropriately update Expecteds."""
+ if 'title' not in payload:
+ payload['title'] = 'foo'
+ if 'description' not in payload:
+ payload['description'] = 'foo'
+ if 'effort' not in payload:
+ payload['effort'] = 1.1
+ self.check_post(payload, f'/process?id={id_}',
+ redir=f'/process?id={id_}')
+ for exp in exps:
+ exp.set_proc_from_post(id_, payload)
+ return payload
+
+ def post_exp_todo(self,
+ exps: list[Expected],
+ payload: dict[str, Any],
+ id_: int,
+ ) -> None:
+ """POST /todo, appropriately updated Expecteds."""
+ self.check_post(payload, f'/todo?id={id_}')
+ for exp in exps:
+ exp.set_todo_from_post(id_, payload)
+
+ def check_filter(self, exp: Expected, category: str, key: str,
+ val: str, list_ids: list[int]) -> None:
+ """Check GET /{category}?{key}={val} sorts to list_ids."""
+ # pylint: disable=too-many-arguments
+ exp.set(key, val)
+ exp.force(category, list_ids)
+ self.check_json_get(f'/{category}?{key}={val}', exp)
+
+ def check_redirect(self, target: str) -> None:
+ """Check that self.conn answers with a 302 redirect to target."""
+ response = self.conn.getresponse()
+ self.assertEqual(response.status, 302)
+ self.assertEqual(response.getheader('Location'), target)
+
+ def check_get(self, target: str, expected_code: int) -> None:
+ """Check that a GET to target yields expected_code."""
+ self.conn.request('GET', target)
+ self.assertEqual(self.conn.getresponse().status, expected_code)
+
+ def check_minimal_inputs(self,
+ url: str,
+ minimal_inputs: dict[str, Any]
+ ) -> None:
+ """Check that url 400's unless all of minimal_inputs provided."""
+ for to_hide in minimal_inputs.keys():
+ to_post = {k: v for k, v in minimal_inputs.items() if k != to_hide}
+ self.check_post(to_post, url, 400)
+
+ def check_post(self, data: Mapping[str, object], target: str,
+ expected_code: int = 302, redir: str = '') -> None:
+ """Check that POST of data to target yields expected_code."""
+ encoded_form_data = urlencode(data, doseq=True).encode('utf-8')
+ headers = {'Content-Type': 'application/x-www-form-urlencoded',
+ 'Content-Length': str(len(encoded_form_data))}
+ self.conn.request('POST', target,
+ body=encoded_form_data, headers=headers)
+ if 302 == expected_code:
+ redir = target if redir == '' else redir
+ self.check_redirect(redir)
+ else:
+ self.assertEqual(self.conn.getresponse().status, expected_code)
+
+ def check_get_defaults(self,
+ path: str,
+ default_id: str = '1',
+ id_name: str = 'id'
+ ) -> None:
+ """Some standard model paths to test."""
+ nonexist_status = 200 if self.checked_class.can_create_by_id else 404
+ self.check_get(path, nonexist_status)
+ self.check_get(f'{path}?{id_name}=', 400)
+ self.check_get(f'{path}?{id_name}=foo', 400)
+ self.check_get(f'/{path}?{id_name}=0', 400)
+ self.check_get(f'{path}?{id_name}={default_id}', nonexist_status)
+
+ def check_json_get(self, path: str, expected: Expected) -> None:
+ """Compare JSON on GET path with expected.
+
+ To simplify comparison of VersionedAttribute histories, transforms
+ timestamp keys of VersionedAttribute history keys into (strings of)
+ integers counting chronologically forward from 0.
+ """
+
+ def rewrite_history_keys_in(item: Any) -> Any:
+ if isinstance(item, dict):
+ if '_versioned' in item.keys():
+ for category in item['_versioned']:
+ vals = item['_versioned'][category].values()
+ history = {}
+ for i, val in enumerate(vals):
+ history[str(i)] = val
+ item['_versioned'][category] = history
+ for category in list(item.keys()):
+ rewrite_history_keys_in(item[category])
+ elif isinstance(item, list):
+ item[:] = [rewrite_history_keys_in(i) for i in item]
+ return item
+
+ def walk_diffs(path: str, cmp1: object, cmp2: object) -> None:
+ # pylint: disable=too-many-branches
+ def warn(intro: str, val: object) -> None:
+ if isinstance(val, (str, int, float)):
+ print(intro, val)
+ else:
+ print(intro)
+ pprint(val)
+ if cmp1 != cmp2:
+ if isinstance(cmp1, dict) and isinstance(cmp2, dict):
+ for k, v in cmp1.items():
+ if k not in cmp2:
+ warn(f'DIFF {path}: retrieved lacks {k}', v)
+ elif v != cmp2[k]:
+ walk_diffs(f'{path}:{k}', v, cmp2[k])
+ for k in [k for k in cmp2.keys() if k not in cmp1]:
+ warn(f'DIFF {path}: expected lacks retrieved\'s {k}',
+ cmp2[k])
+ elif isinstance(cmp1, list) and isinstance(cmp2, list):
+ for i, v1 in enumerate(cmp1):
+ if i >= len(cmp2):
+ warn(f'DIFF {path}[{i}] retrieved misses:', v1)
+ elif v1 != cmp2[i]:
+ walk_diffs(f'{path}[{i}]', v1, cmp2[i])
+ if len(cmp2) > len(cmp1):
+ for i, v2 in enumerate(cmp2[len(cmp1):]):
+ warn(f'DIFF {path}[{len(cmp1)+i}] misses:', v2)
+ else:
+ warn(f'DIFF {path} – for expected:', cmp1)
+ warn('… and for retrieved:', cmp2)
+
+ self.conn.request('GET', path)
+ response = self.conn.getresponse()
+ self.assertEqual(response.status, 200)
+ retrieved = json_loads(response.read().decode())
+ rewrite_history_keys_in(retrieved)
+ # to convert ._lib int keys to str
+ cmp = json_loads(json_dumps(expected.as_dict))
+ try:
+ self.assertEqual(cmp, retrieved)
+ except AssertionError as e:
+ print('EXPECTED:')
+ pprint(cmp)
+ print('RETRIEVED:')
+ pprint(retrieved)
+ walk_diffs('', cmp, retrieved)
+ raise e