diff options
-rw-r--r-- | dot_local/bin/executable_todojson.py | 253 |
1 files changed, 253 insertions, 0 deletions
diff --git a/dot_local/bin/executable_todojson.py b/dot_local/bin/executable_todojson.py new file mode 100644 index 0000000..40db8f2 --- /dev/null +++ b/dot_local/bin/executable_todojson.py @@ -0,0 +1,253 @@ +#!/usr/bin/env python3 + +"""Convert some funky reminders JSON data to todo.txt.""" + +from __future__ import annotations + +import datetime +import json +import sys +from dataclasses import dataclass + + +@dataclass(frozen=True) +class TodoEntry: + index: int + title: str | None + notes: str | None + completed: bool + priority: int + completion_date: datetime.datetime | None + creation_date: datetime.datetime | None + due_date: datetime.datetime | None + url: str | None + + @staticmethod + def from_str(string: str) -> TodoEntry: + words = string.split() + + # Strip from left to find completion + if words[0] == "x": + completed = True + del words[0] + else: + completed = False + + # Strip from left to find priority + if len(words[0]) == 3 and words[0].startswith("(") and words[0].endswith(")"): + if words[0] == "(A)": + priority = 9 + elif words[0] == "(B)": + priority = 5 + else: + priority = 1 + else: + priority = 0 + + # Strip from left to find dates + try: + creation_date = datetime.date.fromisoformat(words[0]) + except ValueError: + creation_date = None + completion_date = None + else: + creation_date = datetime.datetime.combine( + creation_date, + datetime.time.min, + datetime.timezone.utc, + ) + del words[0] + try: + completion_date = datetime.date.fromisoformat(words[0]) + except ValueError: + completion_date = None + else: + completion_date = datetime.datetime.combine( + completion_date, + datetime.time.min, + datetime.timezone.utc, + ) + del words[0] + + # Strip from right to find meta + meta = {} + while words and ":" in words[-1][1:-1]: + k, v = words.pop(-1).split(":", maxsplit=1) + if k == "due": + v = datetime.date.fromisoformat(v) + v = datetime.datetime.combine( + v, + datetime.time.min, + datetime.timezone.utc, + ) + if k == "index": + v = int(v) + if k in meta: + raise ValueError(f"duplicate meta tag {k}") + meta[k] = v + + # Strip from right to find context and project + tags = [] + while words and (words[-1].startswith("+") or words[-1].startswith("@")): + tags.append("#" + words.pop(-1)[1:]) + + # Split the rest into title :: notes + rest = " ".join(words) + title, *rest = rest.split(" :: ", maxsplit=1) + if rest: + notes = rest[0] + else: + notes = None + + if "https" in meta: + url = "https:" + meta.pop("https") + elif "http" in meta: + url = "http:" + meta.pop("http") + else: + url = None + + entry = TodoEntry( + index=meta.pop("index", -1), + title=title, + notes=notes, + completed=completed, + priority=priority, + creation_date=creation_date, + completion_date=completion_date, + due_date=meta.pop("due", None), + url=url, + ) + if meta: + raise ValueError(f"Unconsumed meta: {meta}") + return entry + + def __str__(self) -> str: + bits = [] + + # Completion + if self.completed: + bits.append("x") + + # Reduce priority to 9->high->A, 5->medium->B, 1->low->C + if self.priority > 6: + bits.append("(A)") + elif self.priority > 3: + bits.append("(B)") + elif self.priority > 0: + bits.append("(C)") + + # Creation date required so default if absent + if self.creation_date is None: + bits.append("1970-01-01") + else: + bits.append(roundd(self.creation_date).isoformat()) + + if self.completion_date is not None: + bits.append(roundd(self.completion_date).isoformat()) + + # Extract tags from title or notes + tags = "" + if self.notes: + notes, tags = extract_tags(self.notes) + notes = notes.strip() + else: + notes = self.notes + + if self.title and not tags: + title, tags = extract_tags(self.title) + title = title.strip() + elif self.title: + title = self.title.strip() + else: + title = self.title + + if title and notes: + bits.append(f"{title} :: {notes}") + elif title: + bits.append(title) + elif notes: + bits.append(notes) + + bits.append(tags) + + if self.due_date is not None: + bits.append("due:" + roundd((self.due_date)).isoformat()) + + bits.append("index:" + str(self.index)) + + return " ".join(bits).replace("\n", " ") + + +def extract_tags(string: str) -> tuple[str, str]: + words = string.split() + tags = [] + while words and words[-1].startswith("#"): + tags.append("@" + words.pop(-1)[1:]) + return " ".join(words), " ".join(sorted(tags, key=str.lower)) + + +def roundd(dt: datetime.datetime) -> datetime.date: + # Exist in more than one TZ, round to closest date + dates = [ + datetime.datetime.combine( + dt.date() + delta, + datetime.time.min, + datetime.timezone.utc, + ) + for delta in [ + datetime.timedelta(days=-1), + datetime.timedelta(days=0), + datetime.timedelta(days=+1), + ] + ] + min_date = datetime.date.min + min_delta = datetime.timedelta(days=100) + + for date in dates: + delta = abs(date - dt) + if delta < min_delta: + min_date = date.date() + min_delta = delta + + return min_date + + +def _maybe_date(value: str | None) -> datetime.datetime | None: + if value is not None: + if value.endswith("Z"): + value = value[:-1] + dt = datetime.datetime.fromisoformat(value) + if dt.tzinfo is None: + dt = dt.replace(tzinfo=datetime.timezone.utc) + return dt + return None + + +def main(): + data = sys.stdin.read() + try: + json_data = json.loads(data) + except ValueError: + entries = [TodoEntry.from_str(line.strip()) for line in data.splitlines()] + else: + entries = [ + TodoEntry( + index=n, + title=row.pop("title", None), + notes=row.pop("notes", None), + completed=row.pop("completed"), + priority=row.pop("priority"), + completion_date=_maybe_date(row.pop("completionDate", None)), + creation_date=_maybe_date(row.pop("creationDate", None)), + due_date=_maybe_date(row.pop("dueDate", None)), + url=row.pop("url", None), + ) + for n, row in enumerate(json_data["reminders"]) + ] + if any(x.keys() - {'uid'} for x in json_data["reminders"]): + print(f"Left over data:\n{json_data['reminders']}") + print("\n".join(map(str, entries))) + + +if __name__ == "__main__": + main() |