diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 0000000..91abb11 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,11 @@ +# To get started with Dependabot version updates, you'll need to specify which +# package ecosystems to update and where the package manifests are located. +# Please see the documentation for all configuration options: +# https://docs.github.com/github/administering-a-repository/configuration-options-for-dependency-updates + +version: 2 +updates: + - package-ecosystem: "pip" # See documentation for possible values + directory: "/" # Location of package manifests + schedule: + interval: "weekly" diff --git a/LICENSE b/LICENSE index d7757a6..9f4f1ea 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2020--2021 Daniel Bosk +Copyright (c) 2020--2024 Daniel Bosk Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/Makefile b/Makefile index a4f3bcd..3b0c17f 100644 --- a/Makefile +++ b/Makefile @@ -1,10 +1,10 @@ SUBDIR_GOALS= all clean distclean -SUBDIR+= doc SUBDIR+= src/canvaslms +SUBDIR+= doc +SUBDIR+= docker -version=$(shell sed -n 's/^ *version *= *\"\([^\"]\+\)\",/\1/p' setup.py) -dist=$(addprefix dist/canvaslms-${version}, -py3-none-any.whl .tar.gz) +version=$(shell sed -n 's/^ *version *= *\"\([^\"]\+\)\"/\1/p' pyproject.toml) .PHONY: all @@ -20,32 +20,28 @@ install: compile .PHONY: compile compile: ${MAKE} -C src/canvaslms all + poetry build + +.PHONY: publish publish-github publish-canvaslms publish-docker +publish: publish-canvaslms doc/canvaslms.pdf publish-docker publish-github -.PHONY: publish publish-canvaslms publish-docker -publish: publish-canvaslms doc/canvaslms.pdf +publish-github: doc/canvaslms.pdf git push gh release create -t v${version} v${version} doc/canvaslms.pdf doc/canvaslms.pdf: $(wildcard src/canvaslms/cli/*.tex) ${MAKE} -C $(dir $@) $(notdir $@) -publish-canvaslms: ${dist} - python3 -m twine upload -r pypi ${dist} - -${dist}: compile canvaslms.bash - python3 -m build - -canvaslms.bash: - register-python-argcomplete canvaslms > $@ +publish-canvaslms: compile + poetry publish -#publish-docker: -# sleep 60 -# ${MAKE} -C docker publish +publish-docker: + sleep 60 + ${MAKE} -C docker publish .PHONY: clean clean: - ${RM} canvaslms.bash .PHONY: distclean distclean: diff --git a/README.md b/README.md index 5fa215d..fc3f71f 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,14 @@ is `canvaslms` and it has several subcommands in the same style as Git. `canvaslms` provides output in a format useful for POSIX tools, this makes automating tasks much easier. +## Getting started + +Start by login to your Canvas server + +``` {.text} +canvaslms login +``` + Let's consider how to grade students logging into the student-shell SSH server. We store the list of students' Canvas and KTH IDs in a file. diff --git a/doc/Makefile b/doc/Makefile index b3b802a..ed38f57 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -1,4 +1,3 @@ -NOWEAVEFLAGS.tex= -n -delay -t2 NOTANGLEFLAGS.py= LATEXFLAGS+= -shell-escape @@ -21,8 +20,10 @@ canvaslms.pdf: ${SRC_DIR}/cli/users.tex canvaslms.pdf: ${SRC_DIR}/cli/assignments.tex canvaslms.pdf: ${SRC_DIR}/cli/submissions.tex canvaslms.pdf: ${SRC_DIR}/cli/grade.tex +canvaslms.pdf: ${SRC_DIR}/cli/results.tex +canvaslms.pdf: ${SRC_DIR}/grades/grades.tex -${SRC_DIR}/%.tex: +${SRC_DIR}/%.tex: ${SRC_DIR}/%.nw ${MAKE} -C $(dir $@) $(notdir $@) diff --git a/doc/canvaslms.tex b/doc/canvaslms.tex index dedd723..36574b6 100644 --- a/doc/canvaslms.tex +++ b/doc/canvaslms.tex @@ -104,6 +104,8 @@ \part{The command-line interface} \input{../src/canvaslms/cli/assignments.tex} \input{../src/canvaslms/cli/submissions.tex} \input{../src/canvaslms/cli/grade.tex} +\input{../src/canvaslms/cli/results.tex} +\input{../src/canvaslms/grades/grades.tex} \printbibliography diff --git a/doc/preamble.tex b/doc/preamble.tex index c08980c..aa98bff 100644 --- a/doc/preamble.tex +++ b/doc/preamble.tex @@ -3,19 +3,19 @@ \usepackage[british]{babel} \usepackage{booktabs} +\usepackage[natbib,style=alphabetic,maxbibnames=99]{biblatex} +\addbibresource{canvas.bib} + \usepackage[all]{foreign} \renewcommand{\foreignfullfont}{} \renewcommand{\foreignabbrfont}{} -\usepackage{newclude} +%\usepackage{newclude} \usepackage{import} \usepackage[strict]{csquotes} \usepackage[single]{acro} -\usepackage[natbib,style=alphabetic,maxbibnames=99]{biblatex} -\addbibresource{canvas.bib} - \usepackage{subcaption} \usepackage[noend]{algpseudocode} @@ -35,11 +35,15 @@ \usepackage{amsmath} \usepackage{amssymb} \usepackage{mathtools} +\DeclarePairedDelimiter{\ceil}{\lceil}{\rceil} + \usepackage{amsthm} \usepackage{thmtools} \usepackage[unq]{unique} \DeclareMathOperator{\powerset}{\mathcal{P}} +\usepackage{longtable} + \usepackage[binary-units]{siunitx} \usepackage[capitalize]{cleveref} diff --git a/docker/Dockerfile b/docker/Dockerfile new file mode 100644 index 0000000..d6ff3a5 --- /dev/null +++ b/docker/Dockerfile @@ -0,0 +1,5 @@ +FROM python:3 + +RUN pip3 install --no-cache-dir --upgrade canvaslms && \ + activate-global-python-argcomplete && \ + register-python-argcomplete canvaslms > /etc/bash_completion.d/canvaslms.bash diff --git a/docker/Makefile b/docker/Makefile new file mode 100644 index 0000000..12bc046 --- /dev/null +++ b/docker/Makefile @@ -0,0 +1,22 @@ +VERSION+= latest + +.PHONY: all +all: docker-image + +.PHONY: publish +publish: docker-image + for v in ${VERSION}; do docker push dbosk/canvaslms:$$v; done + +.PHONY: docker-image +docker-image: + docker pull python:3 + docker build --no-cache -t canvaslms . + for v in ${VERSION}; do docker tag canvaslms dbosk/canvaslms:$$v; done + +.PHONY: clean +clean: + true + +.PHONY: distclean +distclean: + -docker image rm -f canvaslms dbosk/canvaslms diff --git a/experiments/quiz-new.py b/experiments/quiz-new.py new file mode 100644 index 0000000..2e7e910 --- /dev/null +++ b/experiments/quiz-new.py @@ -0,0 +1,65 @@ +import canvasapi +import json +import os + +canvas = canvasapi.Canvas(os.environ["CANVAS_SERVER"], + os.environ["CANVAS_TOKEN"]) + +test_course = None + +for course in canvas.get_courses(): + if course.name == "prgm23": + test_course = course + break + +test_quiz = None + +for quiz in course.get_assignments(): + if quiz.name == "Exempelprov": + test_quiz = quiz + break +else: + raise ValueError("No quiz found") + +test_submission = None + +# There is only one in my setup +for quiz_submission in test_quiz.get_submissions(): + if quiz_submission.submitted_at is not None: + test_submission = quiz_submission + break + +print("\n# Quiz submission questions\n") + +for subm_question in test_submission.get_submission_questions(): + #print(subm_question.__dict__.keys()) + #print(subm_question) + print(f"{subm_question.id} " + f"{subm_question.question_name}:\n" + f"{subm_question.question_text}") + try: + print(f"Alternatives: {subm_question.answers}") + print(f"Correct: {subm_question.correct}") + except AttributeError: + pass + + print() + +print("\n# Quiz submission answers\n") + +quiz = None + +for assignment in test_course.get_assignments(): + if assignment.name == "Exempelprov": + quiz = assignment + break + +for submission in quiz.get_submissions(include=["submission_history"]): + for subm in submission.submission_history: + #print(subm) + try: + for data in subm["submission_data"]: + print(json.dumps(data, indent=2)) + except KeyError: + pass + diff --git a/experiments/quiz.py b/experiments/quiz.py new file mode 100644 index 0000000..12645da --- /dev/null +++ b/experiments/quiz.py @@ -0,0 +1,61 @@ +import canvasapi +import json +import os + +canvas = canvasapi.Canvas(os.environ["CANVAS_SERVER"], + os.environ["CANVAS_TOKEN"]) + +test_course = None + +for course in canvas.get_courses(): + if course.name == "Sandbox dbosk": + test_course = course + break + +test_quiz = None + +for quiz in course.get_quizzes(): + if quiz.title == "Classic datorprov": + test_quiz = quiz + break + +test_submission = None + +# There is only one in my setup +for quiz_submission in test_quiz.get_submissions(): + test_submission = quiz_submission + +print("\n# Quiz submission questions\n") + +for subm_question in test_submission.get_submission_questions(): + #print(subm_question.__dict__.keys()) + #print(subm_question) + print(f"{subm_question.id} " + f"{subm_question.question_name}:\n" + f"{subm_question.question_text}") + try: + print(f"Alternatives: {subm_question.answers}") + print(f"Correct: {subm_question.correct}") + except AttributeError: + pass + + print() + +print("\n# Quiz submission answers\n") + +quiz = None + +for assignment in test_course.get_assignments(): + if assignment.name == "Classic datorprov": + quiz = assignment + break + +for submission in quiz.get_submissions(include=["submission_history"]): + for subm in submission.submission_history: + #print(subm) + try: + for data in subm["submission_data"]: + print(json.dumps(data, indent=2)) + except KeyError: + pass + diff --git a/makefiles b/makefiles index a65948d..67d3dff 160000 --- a/makefiles +++ b/makefiles @@ -1 +1 @@ -Subproject commit a65948d6e32472ed4512f0baa8f445a91a7bb35f +Subproject commit 67d3dffd90f19941ad8d95058a8c24450b456757 diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..b5a3ef7 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,792 @@ +# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. + +[[package]] +name = "appdirs" +version = "1.4.4" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = "*" +files = [ + {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"}, + {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"}, +] + +[[package]] +name = "argcomplete" +version = "3.5.2" +description = "Bash tab completion for argparse" +optional = false +python-versions = ">=3.8" +files = [ + {file = "argcomplete-3.5.2-py3-none-any.whl", hash = "sha256:036d020d79048a5d525bc63880d7a4b8d1668566b8a76daf1144c0bbe0f63472"}, + {file = "argcomplete-3.5.2.tar.gz", hash = "sha256:23146ed7ac4403b70bd6026402468942ceba34a6732255b9edf5b7354f68a6bb"}, +] + +[package.extras] +test = ["coverage", "mypy", "pexpect", "ruff", "wheel"] + +[[package]] +name = "arrow" +version = "1.3.0" +description = "Better dates & times for Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "arrow-1.3.0-py3-none-any.whl", hash = "sha256:c728b120ebc00eb84e01882a6f5e7927a53960aa990ce7dd2b10f39005a67f80"}, + {file = "arrow-1.3.0.tar.gz", hash = "sha256:d4540617648cb5f895730f1ad8c82a65f2dad0166f57b75f3ca54759c4d67a85"}, +] + +[package.dependencies] +python-dateutil = ">=2.7.0" +types-python-dateutil = ">=2.8.10" + +[package.extras] +doc = ["doc8", "sphinx (>=7.0.0)", "sphinx-autobuild", "sphinx-autodoc-typehints", "sphinx_rtd_theme (>=1.3.0)"] +test = ["dateparser (==1.*)", "pre-commit", "pytest", "pytest-cov", "pytest-mock", "pytz (==2021.1)", "simplejson (==3.*)"] + +[[package]] +name = "attrs" +version = "23.2.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.2.0-py3-none-any.whl", hash = "sha256:99b87a485a5820b23b879f04c2305b44b951b502fd64be915879d77a7e8fc6f1"}, + {file = "attrs-23.2.0.tar.gz", hash = "sha256:935dc3b529c262f6cf76e50877d35a4bd3c1de194fd41f47a2b7ae8f19971f30"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-mypy = ["mypy (>=1.6)", "pytest-mypy-plugins"] +tests-no-zope = ["attrs[tests-mypy]", "cloudpickle", "hypothesis", "pympler", "pytest (>=4.3.0)", "pytest-xdist[psutil]"] + +[[package]] +name = "backports-tarfile" +version = "1.1.1" +description = "Backport of CPython tarfile module" +optional = false +python-versions = ">=3.8" +files = [ + {file = "backports.tarfile-1.1.1-py3-none-any.whl", hash = "sha256:73e0179647803d3726d82e76089d01d8549ceca9bace469953fcb4d97cf2d417"}, + {file = "backports_tarfile-1.1.1.tar.gz", hash = "sha256:9c2ef9696cb73374f7164e17fc761389393ca76777036f5aad42e8b93fcd8009"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["jaraco.test", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)"] + +[[package]] +name = "cachetools" +version = "5.5.0" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.5.0-py3-none-any.whl", hash = "sha256:02134e8439cdc2ffb62023ce1debca2944c3f289d66bb17ead3ab3dede74b292"}, + {file = "cachetools-5.5.0.tar.gz", hash = "sha256:2cc24fb4cbe39633fb7badd9db9ca6295d766d9c2995f245725a46715d050f2a"}, +] + +[[package]] +name = "canvasapi" +version = "3.3.0" +description = "API wrapper for the Canvas LMS" +optional = false +python-versions = "*" +files = [ + {file = "canvasapi-3.3.0-py3-none-any.whl", hash = "sha256:d7b85a9abf149ed7729002d9bfe2c30bbfcdcf14517f10234b7bdd630a1a8217"}, + {file = "canvasapi-3.3.0.tar.gz", hash = "sha256:86f2e930acc87c9a360575b969687f107ab4e1f3c0ee4556df30d1757eaf5ef0"}, +] + +[package.dependencies] +arrow = "*" +pytz = "*" +requests = "*" + +[[package]] +name = "certifi" +version = "2024.7.4" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, + {file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, +] + +[[package]] +name = "cffi" +version = "1.16.0" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = ">=3.8" +files = [ + {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"}, + {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"}, + {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"}, + {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"}, + {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"}, + {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"}, + {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"}, + {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"}, + {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"}, + {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"}, + {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"}, + {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"}, + {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"}, + {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"}, + {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"}, + {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"}, + {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"}, + {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"}, + {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"}, + {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"}, + {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"}, + {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"}, + {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"}, + {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"}, + {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"}, + {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.3.2" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, + {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, + {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, + {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, + {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, + {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, + {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, + {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, +] + +[[package]] +name = "cryptography" +version = "43.0.1" +description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers." +optional = false +python-versions = ">=3.7" +files = [ + {file = "cryptography-43.0.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:8385d98f6a3bf8bb2d65a73e17ed87a3ba84f6991c155691c51112075f9ffc5d"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:27e613d7077ac613e399270253259d9d53872aaf657471473ebfc9a52935c062"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68aaecc4178e90719e95298515979814bda0cbada1256a4485414860bd7ab962"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:de41fd81a41e53267cb020bb3a7212861da53a7d39f863585d13ea11049cf277"}, + {file = "cryptography-43.0.1-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:f98bf604c82c416bc829e490c700ca1553eafdf2912a91e23a79d97d9801372a"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:61ec41068b7b74268fa86e3e9e12b9f0c21fcf65434571dbb13d954bceb08042"}, + {file = "cryptography-43.0.1-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:014f58110f53237ace6a408b5beb6c427b64e084eb451ef25a28308270086494"}, + {file = "cryptography-43.0.1-cp37-abi3-win32.whl", hash = "sha256:2bd51274dcd59f09dd952afb696bf9c61a7a49dfc764c04dd33ef7a6b502a1e2"}, + {file = "cryptography-43.0.1-cp37-abi3-win_amd64.whl", hash = "sha256:666ae11966643886c2987b3b721899d250855718d6d9ce41b521252a17985f4d"}, + {file = "cryptography-43.0.1-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:ac119bb76b9faa00f48128b7f5679e1d8d437365c5d26f1c2c3f0da4ce1b553d"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bbcce1a551e262dfbafb6e6252f1ae36a248e615ca44ba302df077a846a8806"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58d4e9129985185a06d849aa6df265bdd5a74ca6e1b736a77959b498e0505b85"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d03a475165f3134f773d1388aeb19c2d25ba88b6a9733c5c590b9ff7bbfa2e0c"}, + {file = "cryptography-43.0.1-cp39-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:511f4273808ab590912a93ddb4e3914dfd8a388fed883361b02dea3791f292e1"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:80eda8b3e173f0f247f711eef62be51b599b5d425c429b5d4ca6a05e9e856baa"}, + {file = "cryptography-43.0.1-cp39-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:38926c50cff6f533f8a2dae3d7f19541432610d114a70808f0926d5aaa7121e4"}, + {file = "cryptography-43.0.1-cp39-abi3-win32.whl", hash = "sha256:a575913fb06e05e6b4b814d7f7468c2c660e8bb16d8d5a1faf9b33ccc569dd47"}, + {file = "cryptography-43.0.1-cp39-abi3-win_amd64.whl", hash = "sha256:d75601ad10b059ec832e78823b348bfa1a59f6b8d545db3a24fd44362a1564cb"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:ea25acb556320250756e53f9e20a4177515f012c9eaea17eb7587a8c4d8ae034"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:c1332724be35d23a854994ff0b66530119500b6053d0bd3363265f7e5e77288d"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:fba1007b3ef89946dbbb515aeeb41e30203b004f0b4b00e5e16078b518563289"}, + {file = "cryptography-43.0.1-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:5b43d1ea6b378b54a1dc99dd8a2b5be47658fe9a7ce0a58ff0b55f4b43ef2b84"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:88cce104c36870d70c49c7c8fd22885875d950d9ee6ab54df2745f83ba0dc365"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:9d3cdb25fa98afdd3d0892d132b8d7139e2c087da1712041f6b762e4f807cc96"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:e710bf40870f4db63c3d7d929aa9e09e4e7ee219e703f949ec4073b4294f6172"}, + {file = "cryptography-43.0.1-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7c05650fe8023c5ed0d46793d4b7d7e6cd9c04e68eabe5b0aeea836e37bdcec2"}, + {file = "cryptography-43.0.1.tar.gz", hash = "sha256:203e92a75716d8cfb491dc47c79e17d0d9207ccffcbcb35f598fbe463ae3444d"}, +] + +[package.dependencies] +cffi = {version = ">=1.12", markers = "platform_python_implementation != \"PyPy\""} + +[package.extras] +docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"] +docstest = ["pyenchant (>=1.6.11)", "readme-renderer", "sphinxcontrib-spelling (>=4.0.1)"] +nox = ["nox"] +pep8test = ["check-sdist", "click", "mypy", "ruff"] +sdist = ["build"] +ssh = ["bcrypt (>=3.1.5)"] +test = ["certifi", "cryptography-vectors (==43.0.1)", "pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"] +test-randomorder = ["pytest-randomly"] + +[[package]] +name = "fancycompleter" +version = "0.9.1" +description = "colorful TAB completion for Python prompt" +optional = false +python-versions = "*" +files = [ + {file = "fancycompleter-0.9.1-py3-none-any.whl", hash = "sha256:dd076bca7d9d524cc7f25ec8f35ef95388ffef9ef46def4d3d25e9b044ad7080"}, + {file = "fancycompleter-0.9.1.tar.gz", hash = "sha256:09e0feb8ae242abdfd7ef2ba55069a46f011814a80fe5476be48f51b00247272"}, +] + +[package.dependencies] +pyreadline = {version = "*", markers = "platform_system == \"Windows\""} +pyrepl = ">=0.8.2" + +[[package]] +name = "idna" +version = "3.7" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, + {file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, +] + +[[package]] +name = "importlib-metadata" +version = "7.0.1" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_metadata-7.0.1-py3-none-any.whl", hash = "sha256:4805911c3a4ec7c3966410053e9ec6a1fecd629117df5adee56dfc9432a1081e"}, + {file = "importlib_metadata-7.0.1.tar.gz", hash = "sha256:f238736bb06590ae52ac1fab06a3a9ef1d8dce2b7a35b5ab329371d6c8f5d2cc"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] + +[[package]] +name = "importlib-resources" +version = "6.1.1" +description = "Read resources from Python packages" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_resources-6.1.1-py3-none-any.whl", hash = "sha256:e8bf90d8213b486f428c9c39714b920041cb02c184686a3dee24905aaa8105d6"}, + {file = "importlib_resources-6.1.1.tar.gz", hash = "sha256:3893a00122eafde6894c59914446a512f728a0c1a45f9bb9b63721b6bacf0b4a"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff", "zipp (>=3.17)"] + +[[package]] +name = "jaraco-classes" +version = "3.3.1" +description = "Utility functions for Python class constructs" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jaraco.classes-3.3.1-py3-none-any.whl", hash = "sha256:86b534de565381f6b3c1c830d13f931d7be1a75f0081c57dff615578676e2206"}, + {file = "jaraco.classes-3.3.1.tar.gz", hash = "sha256:cb28a5ebda8bc47d8c8015307d93163464f9f2b91ab4006e09ff0ce07e8bfb30"}, +] + +[package.dependencies] +more-itertools = "*" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] + +[[package]] +name = "jaraco-context" +version = "5.3.0" +description = "Useful decorators and context managers" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jaraco.context-5.3.0-py3-none-any.whl", hash = "sha256:3e16388f7da43d384a1a7cd3452e72e14732ac9fe459678773a3608a812bf266"}, + {file = "jaraco.context-5.3.0.tar.gz", hash = "sha256:c2f67165ce1f9be20f32f650f25d8edfc1646a8aeee48ae06fb35f90763576d2"}, +] + +[package.dependencies] +"backports.tarfile" = {version = "*", markers = "python_version < \"3.12\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["portend", "pytest (>=6,!=8.1.1)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] + +[[package]] +name = "jaraco-functools" +version = "4.0.1" +description = "Functools like those found in stdlib" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jaraco.functools-4.0.1-py3-none-any.whl", hash = "sha256:3b24ccb921d6b593bdceb56ce14799204f473976e2a9d4b15b04d0f2c2326664"}, + {file = "jaraco_functools-4.0.1.tar.gz", hash = "sha256:d33fa765374c0611b52f8b3a795f8900869aa88c84769d4d1746cd68fb28c3e8"}, +] + +[package.dependencies] +more-itertools = "*" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["jaraco.classes", "pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] + +[[package]] +name = "jeepney" +version = "0.8.0" +description = "Low-level, pure Python DBus protocol wrapper." +optional = false +python-versions = ">=3.7" +files = [ + {file = "jeepney-0.8.0-py3-none-any.whl", hash = "sha256:c0a454ad016ca575060802ee4d590dd912e35c122fa04e70306de3d076cce755"}, + {file = "jeepney-0.8.0.tar.gz", hash = "sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806"}, +] + +[package.extras] +test = ["async-timeout", "pytest", "pytest-asyncio (>=0.17)", "pytest-trio", "testpath", "trio"] +trio = ["async_generator", "trio"] + +[[package]] +name = "keyring" +version = "25.5.0" +description = "Store and access your passwords safely." +optional = false +python-versions = ">=3.8" +files = [ + {file = "keyring-25.5.0-py3-none-any.whl", hash = "sha256:e67f8ac32b04be4714b42fe84ce7dad9c40985b9ca827c592cc303e7c26d9741"}, + {file = "keyring-25.5.0.tar.gz", hash = "sha256:4c753b3ec91717fe713c4edd522d625889d8973a349b0e582622f49766de58e6"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.11.4", markers = "python_version < \"3.12\""} +importlib-resources = {version = "*", markers = "python_version < \"3.9\""} +"jaraco.classes" = "*" +"jaraco.context" = "*" +"jaraco.functools" = "*" +jeepney = {version = ">=0.4.2", markers = "sys_platform == \"linux\""} +pywin32-ctypes = {version = ">=0.2.0", markers = "sys_platform == \"win32\""} +SecretStorage = {version = ">=3.2", markers = "sys_platform == \"linux\""} + +[package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +completion = ["shtab (>=1.1.0)"] +cover = ["pytest-cov"] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["pyfakefs", "pytest (>=6,!=8.1.*)"] +type = ["pygobject-stubs", "pytest-mypy", "shtab", "types-pywin32"] + +[[package]] +name = "markdown-it-py" +version = "3.0.0" +description = "Python port of markdown-it. Markdown parsing, done right!" +optional = false +python-versions = ">=3.8" +files = [ + {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"}, + {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"}, +] + +[package.dependencies] +mdurl = ">=0.1,<1.0" + +[package.extras] +benchmarking = ["psutil", "pytest", "pytest-benchmark"] +code-style = ["pre-commit (>=3.0,<4.0)"] +compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"] +linkify = ["linkify-it-py (>=1,<3)"] +plugins = ["mdit-py-plugins"] +profiling = ["gprof2dot"] +rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"] +testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"] + +[[package]] +name = "mdurl" +version = "0.1.2" +description = "Markdown URL utilities" +optional = false +python-versions = ">=3.7" +files = [ + {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"}, + {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"}, +] + +[[package]] +name = "more-itertools" +version = "10.2.0" +description = "More routines for operating on iterables, beyond itertools" +optional = false +python-versions = ">=3.8" +files = [ + {file = "more-itertools-10.2.0.tar.gz", hash = "sha256:8fccb480c43d3e99a00087634c06dd02b0d50fbf088b380de5a41a015ec239e1"}, + {file = "more_itertools-10.2.0-py3-none-any.whl", hash = "sha256:686b06abe565edfab151cb8fd385a05651e1fdf8f0a14191e4439283421f8684"}, +] + +[[package]] +name = "pdbpp" +version = "0.10.3" +description = "pdb++, a drop-in replacement for pdb" +optional = false +python-versions = "*" +files = [ + {file = "pdbpp-0.10.3-py2.py3-none-any.whl", hash = "sha256:79580568e33eb3d6f6b462b1187f53e10cd8e4538f7d31495c9181e2cf9665d1"}, + {file = "pdbpp-0.10.3.tar.gz", hash = "sha256:d9e43f4fda388eeb365f2887f4e7b66ac09dce9b6236b76f63616530e2f669f5"}, +] + +[package.dependencies] +fancycompleter = ">=0.8" +pygments = "*" +wmctrl = "*" + +[package.extras] +funcsigs = ["funcsigs"] +testing = ["funcsigs", "pytest"] + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pygments" +version = "2.17.2" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"}, + {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"}, +] + +[package.extras] +plugins = ["importlib-metadata"] +windows-terminal = ["colorama (>=0.4.6)"] + +[[package]] +name = "pypandoc" +version = "1.14" +description = "Thin wrapper for pandoc." +optional = false +python-versions = ">=3.6" +files = [ + {file = "pypandoc-1.14-py3-none-any.whl", hash = "sha256:1315c7ad7fac7236dacf69a05b521ed2c3f1d0177f70e9b92bfffce6c023df22"}, + {file = "pypandoc-1.14.tar.gz", hash = "sha256:6b4c45f5f1b9fb5bb562079164806bdbbc3e837b5402bcf3f1139edc5730a197"}, +] + +[[package]] +name = "pyreadline" +version = "2.1" +description = "A python implmementation of GNU readline." +optional = false +python-versions = "*" +files = [ + {file = "pyreadline-2.1.zip", hash = "sha256:4530592fc2e85b25b1a9f79664433da09237c1a270e4d78ea5aa3a2c7229e2d1"}, +] + +[[package]] +name = "pyrepl" +version = "0.9.0" +description = "A library for building flexible command line interfaces" +optional = false +python-versions = "*" +files = [ + {file = "pyrepl-0.9.0.tar.gz", hash = "sha256:292570f34b5502e871bbb966d639474f2b57fbfcd3373c2d6a2f3d56e681a775"}, +] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "pytz" +version = "2024.1" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2024.1-py2.py3-none-any.whl", hash = "sha256:328171f4e3623139da4983451950b28e95ac706e13f3f2630a879749e7a8b319"}, + {file = "pytz-2024.1.tar.gz", hash = "sha256:2a29735ea9c18baf14b448846bde5a48030ed267578472d8955cd0e7443a9812"}, +] + +[[package]] +name = "pywin32-ctypes" +version = "0.2.2" +description = "A (partial) reimplementation of pywin32 using ctypes/cffi" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pywin32-ctypes-0.2.2.tar.gz", hash = "sha256:3426e063bdd5fd4df74a14fa3cf80a0b42845a87e1d1e81f6549f9daec593a60"}, + {file = "pywin32_ctypes-0.2.2-py3-none-any.whl", hash = "sha256:bf490a1a709baf35d688fe0ecf980ed4de11d2b3e37b51e5442587a75d9957e7"}, +] + +[[package]] +name = "requests" +version = "2.32.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.8" +files = [ + {file = "requests-2.32.0-py3-none-any.whl", hash = "sha256:f2c3881dddb70d056c5bd7600a4fae312b2a300e39be6a118d30b90bd27262b5"}, + {file = "requests-2.32.0.tar.gz", hash = "sha256:fa5490319474c82ef1d2c9bc459d3652e3ae4ef4c4ebdd18a21145a47ca4b6b8"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rich" +version = "13.9.4" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "rich-13.9.4-py3-none-any.whl", hash = "sha256:6049d5e6ec054bf2779ab3358186963bac2ea89175919d699e378b99738c2a90"}, + {file = "rich-13.9.4.tar.gz", hash = "sha256:439594978a49a09530cff7ebc4b5c7103ef57baf48d5ea3184f21d9a2befa098"}, +] + +[package.dependencies] +markdown-it-py = ">=2.2.0" +pygments = ">=2.13.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<9)"] + +[[package]] +name = "secretstorage" +version = "3.3.3" +description = "Python bindings to FreeDesktop.org Secret Service API" +optional = false +python-versions = ">=3.6" +files = [ + {file = "SecretStorage-3.3.3-py3-none-any.whl", hash = "sha256:f356e6628222568e3af06f2eba8df495efa13b3b63081dafd4f7d9a7b7bc9f99"}, + {file = "SecretStorage-3.3.3.tar.gz", hash = "sha256:2403533ef369eca6d2ba81718576c5e0f564d5cca1b58f73a8b23e7d4eeebd77"}, +] + +[package.dependencies] +cryptography = ">=2.0" +jeepney = ">=0.6" + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "types-python-dateutil" +version = "2.8.19.20240106" +description = "Typing stubs for python-dateutil" +optional = false +python-versions = ">=3.8" +files = [ + {file = "types-python-dateutil-2.8.19.20240106.tar.gz", hash = "sha256:1f8db221c3b98e6ca02ea83a58371b22c374f42ae5bbdf186db9c9a76581459f"}, + {file = "types_python_dateutil-2.8.19.20240106-py3-none-any.whl", hash = "sha256:efbbdc54590d0f16152fa103c9879c7d4a00e82078f6e2cf01769042165acaa2"}, +] + +[[package]] +name = "typing-extensions" +version = "4.9.0" +description = "Backported and Experimental Type Hints for Python 3.8+" +optional = false +python-versions = ">=3.8" +files = [ + {file = "typing_extensions-4.9.0-py3-none-any.whl", hash = "sha256:af72aea155e91adfc61c3ae9e0e342dbc0cba726d6cba4b6c72c1f34e47291cd"}, + {file = "typing_extensions-4.9.0.tar.gz", hash = "sha256:23478f88c37f27d76ac8aee6c905017a143b0b1b886c3c9f66bc2fd94f9f5783"}, +] + +[[package]] +name = "urllib3" +version = "2.2.2" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.8" +files = [ + {file = "urllib3-2.2.2-py3-none-any.whl", hash = "sha256:a448b2f64d686155468037e1ace9f2d2199776e17f0a46610480d311f73e3472"}, + {file = "urllib3-2.2.2.tar.gz", hash = "sha256:dd505485549a7a552833da5e6063639d0d177c04f23bc3864e41e5dc5f612168"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +h2 = ["h2 (>=4,<5)"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "wmctrl" +version = "0.5" +description = "A tool to programmatically control windows inside X" +optional = false +python-versions = ">=2.7" +files = [ + {file = "wmctrl-0.5-py2.py3-none-any.whl", hash = "sha256:ae695c1863a314c899e7cf113f07c0da02a394b968c4772e1936219d9234ddd7"}, + {file = "wmctrl-0.5.tar.gz", hash = "sha256:7839a36b6fe9e2d6fd22304e5dc372dbced2116ba41283ea938b2da57f53e962"}, +] + +[package.dependencies] +attrs = "*" + +[package.extras] +test = ["pytest"] + +[[package]] +name = "zipp" +version = "3.19.1" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "zipp-3.19.1-py3-none-any.whl", hash = "sha256:2828e64edb5386ea6a52e7ba7cdb17bb30a73a858f5eb6eb93d8d36f5ea26091"}, + {file = "zipp-3.19.1.tar.gz", hash = "sha256:35427f6d5594f4acf82d25541438348c26736fa9b3afa2754bcd63cdb99d8e8f"}, +] + +[package.extras] +doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +test = ["big-O", "jaraco.functools", "jaraco.itertools", "jaraco.test", "more-itertools", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy", "pytest-ruff (>=0.2.1)"] + +[metadata] +lock-version = "2.0" +python-versions = "^3.8" +content-hash = "0e04cb6c380fa3066ca03f44309fd06d530c96efb5c42abe0d648af0c1d267a3" diff --git a/pyproject.toml b/pyproject.toml index a77e526..63879ea 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,7 +1,47 @@ -[build-system] -requires = [ - "setuptools>=42", - "wheel" +[tool.poetry] +name = "canvaslms" +version = "4.6" +description = "Command-line interface to Canvas LMS" +authors = ["Daniel Bosk "] +license = "MIT" +readme = "README.md" +repository = "https://github.com/dbosk/canvaslms" +classifiers = [ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + "Development Status :: 4 - Beta", + "Environment :: Console", + "Intended Audience :: Education", + "Intended Audience :: Science/Research", + "Topic :: Utilities" ] -build-backend = "setuptools.build_meta" +include = ["*/**/*.py"] + +[tool.poetry.urls] +"Bug Tracker" = "https://github.com/dbosk/canvaslms/issues" +"Releases" = "https://github.com/dbosk/canvaslms/releases" + +[tool.poetry.scripts] +canvaslms = "canvaslms.cli:main" + +[tool.poetry.dependencies] +python = "^3.8" +appdirs = "^1.4.4" +argcomplete = ">=2,<4" +cachetools = "^5.3.1" +canvasapi = "^3.2.0" +keyring = ">=24.2,<26.0" +pypandoc = "^1.11" +arrow = "^1.2.3" +rich = "^13.0.0" + +[tool.poetry.dev-dependencies] + +[tool.poetry.group.dev.dependencies] +pdbpp = "^0.10.3" + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/setup.py b/setup.py deleted file mode 100644 index 96cbeb4..0000000 --- a/setup.py +++ /dev/null @@ -1,45 +0,0 @@ -from setuptools import setup, find_packages - -setup( - name = "canvaslms", - version = "1.6", - author = "Daniel Bosk", - author_email = "dbosk@kth.se", - description = "Command-line interface for Canvas LMS", - long_description = open("README.md").read(), - long_description_content_type = "text/markdown", - url = "https://github.com/dbosk/canvaslms", - project_urls = { - "Bug Tracker": "https://github.com/dbosk/canvaslms/issues", - "Releases": "https://github.com/dbosk/canvaslms/releases" - }, - classifiers = [ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - "Development Status :: 4 - Beta", - "Environment :: Console", - "Intended Audience :: Education", - "Intended Audience :: Science/Research", - "Topic :: Utilities" - ], - package_dir = { - "": "src" - }, - packages = find_packages(where="src"), - entry_points = { - "console_scripts": [ - "canvaslms = canvaslms.cli:main" - ] - }, - data_files = [ - ("/etc/bash_completion.d", ["canvaslms.bash"]) - ], - python_requires = ">=3.8", - install_requires = [ - "appdirs>=1.4.4", - "argcomplete>=1.12.3", - "canvasapi>=2.0.0", - "pypandoc>=1.6.4" - ] -) diff --git a/src/canvaslms/Makefile b/src/canvaslms/Makefile index 16d1ccb..5104bee 100644 --- a/src/canvaslms/Makefile +++ b/src/canvaslms/Makefile @@ -1,4 +1,6 @@ SUBDIR+= cli +SUBDIR+= grades +SUBDIR+= hacks INCLUDE_MAKEFILES=../../makefiles include ${INCLUDE_MAKEFILES}/subdir.mk diff --git a/src/canvaslms/cli/.gitignore b/src/canvaslms/cli/.gitignore index 536693f..45f1e61 100644 --- a/src/canvaslms/cli/.gitignore +++ b/src/canvaslms/cli/.gitignore @@ -18,3 +18,5 @@ grade.tex grade.py login.py login.tex +results.py +results.tex diff --git a/src/canvaslms/cli/Makefile b/src/canvaslms/cli/Makefile index 8af4f7a..f12b7b1 100644 --- a/src/canvaslms/cli/Makefile +++ b/src/canvaslms/cli/Makefile @@ -1,29 +1,25 @@ -NOWEAVEFLAGS.tex= -n -delay -t2 NOTANGLEFLAGS.py= +MODULES+= __init__.py cli.tex +MODULES+= login.py login.tex +MODULES+= courses.py courses.tex +MODULES+= users.py users.tex +MODULES+= assignments.py assignments.tex +MODULES+= submissions.py submissions.tex +MODULES+= grade.py grade.tex +MODULES+= results.py results.tex .PHONY: all -all: __init__.py cli.tex -all: login.py login.tex -all: courses.py courses.tex -all: users.py users.tex -all: assignments.py assignments.tex -all: submissions.py submissions.tex -all: grade.py grade.tex +all: ${MODULES} +.INTERMEDIATE: cli.py __init__.py: cli.py mv $< $@ .PHONY: clean clean: - ${RM} cli.tex cli.py __init__.py - ${RM} login.py login.tex - ${RM} courses.tex courses.py - ${RM} users.tex users.py - ${RM} assignments.tex assignments.py - ${RM} submissions.tex submissions.py - ${RM} grade.tex grade.py + ${RM} ${MODULES} INCLUDE_MAKEFILES=../../../makefiles diff --git a/src/canvaslms/cli/assignments.nw b/src/canvaslms/cli/assignments.nw index 38fd196..ba9cb69 100644 --- a/src/canvaslms/cli/assignments.nw +++ b/src/canvaslms/cli/assignments.nw @@ -13,12 +13,16 @@ The [[assignment]] command lists information about a given assignment. We outline the module: <>= import argparse +import canvasapi import canvaslms.cli.courses as courses import canvaslms.hacks.canvasapi import csv -import pydoc +import json +import os import pypandoc import re +import rich.console +import rich.markdown import sys <> @@ -38,6 +42,99 @@ def add_assignment_command(subp): @ +\section{Selecting assignments on the command line} + +We now provide two functions that sets up the options for selecting an +assignment on the command line. +These are used by the [[assignment_command]] and [[assignments_command]] +functions, but can also be used by other commands. + +When we select assignments, we have the option to only select those assignments +with ungraded submissions. +However, this option is not always relevant, so we provide a way to disable it. +But to select an assignment, we must first select a course. + +If the [[required]] option is set, we want to make all (relevant) options +required. +<>= +def add_assignment_option(parser, ungraded=True, required=False): + try: + courses.add_course_option(parser, required=required) + except argparse.ArgumentError: + pass + + <> + +def process_assignment_option(canvas, args): + course_list = courses.process_course_option(canvas, args) + <> + return list(assignments_list) +@ + +As mentioned above, we want to be able to select only assignments with ungraded +submissions. +Other than that, we want to select assignments in two alternative ways: by +assignment name or assignment group that an assignment belongs to. +In summary, we want the following options: +\begin{itemize} +\item [[-U]] will filter assignments that have ungraded submissions. +\item [[-a]] will take a regex for assignments. +\item [[-A]] will take a regex for assignment groups. + Then we can return all assignments in those assignment groups. +\end{itemize} +These regular expressions match either the name or the Canvas identifier. +This lets us add the following arguments. +Remember, we add only the ungraded option if that was requested. +Also, if we want the required version, we want to require either an assignment +or an assignment group to be specified. +<>= +if ungraded: + parser.add_argument("-U", "--ungraded", action="store_true", + help="Filter only assignments with ungraded submissions.") + +parser = parser.add_mutually_exclusive_group(required=required) + +parser.add_argument("-a", "--assignment", + required=False, default=".*", + help="Regex matching assignment title or Canvas identifier, " + "default: '.*'") + +parser.add_argument("-A", "--assignment-group", + required=False, default="", + help="Regex matching assignment group title or Canvas identifier.") +@ + +Now we iterate over the [[course_list]] to get to the assignment groups to then +filter out the assignments. +We must get all assignments for a course and all assignment groups. +Then we can filter out the matching assignments. +<>= +assignments_list = [] + +for course in course_list: + try: + ungraded = args.ungraded + except AttributeError: + ungraded = False + + all_assignments = list(filter_assignments([course], + args.assignment, + ungraded=ungraded)) + + try: + assignm_grp_regex = args.assignment_group + except AttributeError: + print("default to .* for group") + assignm_grp_regex = ".*" + + assignment_groups = filter_assignment_groups(course, assignm_grp_regex) + + for assignment_group in assignment_groups: + assignments_list += list(filter_assignments_by_group( + assignment_group, all_assignments)) +@ + + \section{The [[assignments]] subcommand and its options} We add the subparser for [[assignments]]. @@ -45,63 +142,37 @@ We add the subparser for [[assignments]]. assignments_parser = subp.add_parser("assignments", help="Lists assignments of a course", description="Lists assignments of a course. " - "Output, CSV-format: ") + <>) assignments_parser.set_defaults(func=assignments_command) -courses.add_course_option(assignments_parser) -<> -<> +add_assignment_option(assignments_parser) @ Now, that [[assignments_command]] function must take three arguments: [[config]], [[canvas]] and [[args]]. -We use [[process_course_option]] to parse the options that we added with the -[[add_course_option]] function above. +We use [[process_assignment_option]] to parse the options that we added with +the [[add_assignment_option]] function above. <>= def assignments_command(config, canvas, args): output = csv.writer(sys.stdout, delimiter=args.delimiter) - course_list = courses.process_course_option(canvas, args) - + assignment_list = process_assignment_option(canvas, args) <> @ -\subsection{Regex for filtering assignment groups} - -We want to be able to list only assignments in particular assignment groups. -<>= -assignments_parser.add_argument("regex", - default=".*", nargs="?", - help="Regex for filtering assignment groups, default: '.*'") -@ - -\subsection{Selecting the type of assignments} - -We also want to be able to filter out the type of assignments. -For now, we focus on all or ungraded. -<>= -assignments_parser.add_argument("-u", "--ungraded", action="store_true", - help="Show only ungraded assignments.") -@ We don't need to do any particular processing for this option. - - -\subsection{Filter and output the list of assignments} - -We then simply call the list-assignments function with the [[courses_list]] -object as a parameter. -Then we will print the most useful attributes (identifiers) of an assignment in -CSV format. +We then simply get the filtered list from the processing of the assignment +options, stored in [[assignment_list]] above. +Then we will print the most useful attributes. +<>= +"Output, CSV-format: " +" " +" " <>= -for course in course_list: - if args.ungraded: - all_assignments = list(list_ungraded_assignments([course])) - else: - all_assignments = list(list_assignments([course])) - - assignment_groups = filter_assignment_groups(course, args.regex) - - for assignment_group in assignment_groups: - assignments = filter_assignments_by_group( - assignment_group, all_assignments) - - for assignment in assignments: - output.writerow([assignment_group.name, assignment.name]) +for assignment in assignment_list: + output.writerow([ + assignment.course.course_code, + assignment.assignment_group.name, + assignment.name, + assignment.due_at, + assignment.unlock_at, + assignment.lock_at + ]) @ @@ -122,9 +193,10 @@ assignment group. <>= def filter_assignments_by_group(assignment_group, assignments): """Returns elements in assignments that are part of assignment_group""" - return filter( - lambda assignment: assignment.assignment_group_id == assignment_group.id, - assignments) + for assignment in assignments: + if assignment.assignment_group_id == assignment_group.id: + assignment.assignment_group = assignment_group + yield assignment @ @@ -146,68 +218,130 @@ the options that we added with the [[add_course_option]] and [[add_assignment_option]] functions above. <>= def assignment_command(config, canvas, args): + console = rich.console.Console() + assignment_list = process_assignment_option(canvas, args) for assignment in assignment_list: - pydoc.pager(format_assignment(assignment)) + output = format_assignment(assignment) + + if sys.stdout.isatty(): + <> + with console.pager(styles=styles, links=True): + console.print(rich.markdown.Markdown(output, + code_theme="manni")) + else: + print(output) +@ Note that we use the theme [[manni]] for the code, as this works in both dark +and light terminals. + +\subsection{Check if we should use styles} + +By default, [[rich.console.Console]] uses the [[pydoc.pager]], which uses the +system pager (as determined by environment variables etc.). +The default usually can't handle colours, so [[rich]] doesn't use colours when +paging. +We want to check if [[less -r]] or [[less -R]] is set as the pager, in that +case we can use styles. +<>= +pager = "" +if "MANPAGER" in os.environ: + pager = os.environ["MANPAGER"] +elif "PAGER" in os.environ: + pager = os.environ["PAGER"] + +styles = False +if "less" in pager and ("-R" in pager or "-r" in pager): + styles = True @ -\section{Selecting assignments on the command line} - -We now provide two functions that sets up the options for selecting an -assignment on the command line. -These are used above by the [[assignment_command]], but can also be used by -other commands. +\section{Formatting assignments} -To select an assignment, we must first select a course. +Sometimes we want to format the contents of an assignment in the terminal. +The assignment contents given by Canvas is HTML, we want to pipe that through +[[pandoc]] and convert it to markdown. <>= -def add_assignment_option(parser): - try: - courses.add_course_option(parser) - except argparse.ArgumentError: - pass +def format_assignment(assignment): + """Returns an assignment formatted for the terminal""" + text = f""" +<> - <> +""" -def process_assignment_option(canvas, args): - course_list = courses.process_course_option(canvas, args) - <> - return list(assignments_list) -@ + if assignment.description: + instruction = pypandoc.convert_text( + assignment.description, "md", format="html") + text += f"## Instruction\n\n{instruction}\n\n" + <> + else: + <> + text += f"## Assignment data\n\n```json\n{format_json(assignment)}\n```\n" -We add one option [[-a]] to select an assignment. -This is a regular expression which matches the assignment name and identifier. -<>= -parser.add_argument("-a", "--assignment", - required=False, default=".*", - help="Regex matching assignment title or Canvas identifier, " - "default: '.*'") + return text @ -Now we can use the [[course_list]] and [[filter_assignments]] to filter out the -desired assignments. -<>= -assignments_list = filter_assignments(course_list, args.assignment) +\subsection{Assignment metadata} + +Now let's look at the metadata to add. +<>= +# {assignment.name} + +## Metadata + +- Unlocks: {assignment.unlock_at if assignment.unlock_at else None} +- Due: {assignment.due_at if assignment.due_at else None} +- Locks: {assignment.lock_at if assignment.lock_at else None} +- Ungraded submissions: {assignment.needs_grading_count} +- Submission type: {assignment.submission_types} +- URL: {assignment.html_url} +- Submissions: {assignment.submissions_download_url} @ +\subsection{Assignment rubric} -\section{Formatting assignments} +We want to format the rubric as well. +<>= +try: + text += f"## Rubric\n\n{format_rubric(assignment.rubric)}\n\n" +except AttributeError: + pass +@ -Sometimes we want to format the contents of an assignment in the terminal. -The assignment contents given by Canvas is HTML, we want to pipe that through -[[pandoc]] and convert it to markdown. +We'll do this with [[format_rubric]]. +It should output a markdown representation of the rubric. <>= -def format_assignment(assignment): - """Returns an assignment formatted for the terminal""" - instruction = pypandoc.convert_text( - assignment.description, "md", format="html") - - return f"""# {assignment.name} +def format_rubric(rubric): + """ + Returns a markdown representation of the rubric + """ + if not rubric: + return "No rubric set." + + text = "" + for criterion in rubric: + text += f"- {criterion['description']}\n" + text += f" - Points: {criterion['points']}\n" + text += f" - Ratings: " + text += "; ".join([ + f"{rating['description'].strip()} ({rating['points']})" + for rating in criterion["ratings"] + ]) + "\n" + text += f"\n```\n{criterion['long_description']}\n```\n\n" + + return text +@ -{instruction} +\subsection{Assignment data as raw JSON} -URL: {assignment.html_url} -Submissions: {assignment.submissions_download_url}""" +We also want to format the assignment data as JSON. +We must extract all attributes from the assignment object. +<>= +def format_json(assignment): + """Returns a JSON representation of the assignment""" + return json.dumps({ + key: str(value) for key, value in assignment.__dict__.items() + if not key.startswith("_") + }, indent=2) @ @@ -224,25 +358,47 @@ We provide the following functions: We return the assignments for a list of courses, since we can match several courses with a regular expression (using [[filter_courses]]). <>= -def list_assignments(courses): - for course in courses: - for assignment in course.get_assignments(): - assignment.course = course - yield assignment +def list_assignments(assignments_containers, ungraded=False): + """Lists all assignments in all assignments containers (courses or + assignement groups)""" + for container in assignments_containers: + if isinstance(container, canvasapi.course.Course): + course = container + elif isinstance(container, canvasapi.assignment.AssignmentGroup): + assignment_group = container + course = assignment_group.course + + if ungraded: + assignments = container.get_assignments(bucket="ungraded") + else: + assignments = container.get_assignments() + + for assignment in assignments: + try: + assignment.course = course + except NameError: + pass + + try: + assignment.assignment_group = assignment_group + except NameError: + pass -def list_ungraded_assignments(courses): - for course in courses: - for assignment in course.get_assignments(bucket="ungraded"): - assignment.course = course yield assignment + +def list_ungraded_assignments(assignments_containers): + return list_assignments(assignments_containers, ungraded=True) @ We also want to filter out assignments on the title based on regex. +We also take an optional default argument to indicate whether we only want +ungraded assignments. <>= -def filter_assignments(courses, regex): - """Returns all assignments from courses whose title matches regex""" +def filter_assignments(assignments_containers, regex, ungraded=False): + """Returns all assignments from assignments_container whose + title matches regex""" p = re.compile(regex) - for assignment in list_assignments(courses): + for assignment in list_assignments(assignments_containers, ungraded=ungraded): if p.search(assignment.name): yield assignment elif p.search(str(assignment.id)): diff --git a/src/canvaslms/cli/cli.nw b/src/canvaslms/cli/cli.nw index 5beb438..f788c71 100644 --- a/src/canvaslms/cli/cli.nw +++ b/src/canvaslms/cli/cli.nw @@ -33,7 +33,9 @@ import appdirs import argcomplete, argparse from canvasapi import Canvas import canvaslms.cli.login +import json import os +import pathlib import sys <> @@ -86,12 +88,16 @@ The function gets the configuration file as an argument. We don't return any error if the file doesn't exist. We leave that to the caller to determine what to do if the configuration is empty. +Likewise, we issue a warning if the configuration file is malformed, \ie it +can't be processed as valid JSON. <>= try: with open(config_file, "r") as file: config.update(json.load(file)) except FileNotFoundError: pass +except json.decoder.JSONDecodeError as err: + warn(f"config file is malformed: {err}") @ We can read the credentials from the environment, this takes precedence over @@ -109,13 +115,24 @@ if "CANVAS_TOKEN" in os.environ: At times, we might want to update the configuration file. When we update the config file, we must ensure that any other part of the -configuration file is kept. -So we write the [[config]] dictionary to the config file. +configuration file is kept, so we write the [[config]] dictionary to the config +file. + +If we get a [[FileNotFoundError]], that means the config directory doesn't +exist and we must create it. <>= def update_config_file(config, config_file): """Updates the config file by writing the config dictionary back to it""" - with open(config_file, "w") as fd: - json.dump(config, fd) + try: + <> + except FileNotFoundError: + <> + <> +<>= +os.makedirs(pathlib.PurePath(config_file).parent) +<>= +with open(config_file, "w") as fd: + json.dump(config, fd) @ @@ -142,13 +159,6 @@ args = argp.parse_args() config = read_configuration(args.config_file) -hostname, token = canvaslms.cli.login.load_credentials(config) - -if not (hostname and token): - err("No hostname or token, rum 'canvaslms login'") - -canvas = Canvas(hostname, token) - <> @ @@ -182,26 +192,62 @@ argp.add_argument("-d", "--delimiter", \subsection{The subcommands} Each subcommand will have its own module in the package. -Each such module must have a function [[add_command]] that takes teh -[[subp]] parser as an argument and adds its command and options to that. +Each such module must have a function [[add_command]] that takes the [[subp]] +parser as an argument and adds its command and options to that. +For example, the [[login]] command: +The \texttt{login} command is located in [[canvaslms.cli.login]]. +<>= +import canvaslms.cli.login +<>= +canvaslms.cli.login.add_command(subp) +@ In short, each [[add_command]] must add a subparser ([[subp.add_parser]]) that will set the [[func]] attribute. Then we can execute the correct function and let that function check the remaining arguments. + +Each command function must take three arguments: +\begin{enumerate} +\item the configuration, +\item a Canvas object to use for interaction with Canvas, +\item the processed command-line arguments. +\end{enumerate} +This means that we must read the credentials to create the Canvas object. +One exception is the [[login]] command: this command doesn't need the Canvas +object as it will be run before there are credentials. <>= +if args.func != canvaslms.cli.login.login_command: + <> +else: + canvas = None + if args.func: args.func(config, canvas, args) @ -\paragraph{The \texttt{login} command} +To create the Canvas object, we must read the credentials using the +[[canvaslms.cli.login]] module. +If there are no credentials, we give an error about running the [[login]] +command first. +Otherwise, we create the Canvas object named [[canvas]]. +<>= +hostname, token = canvaslms.cli.login.load_credentials(config) -The \texttt{login} command is located in [[canvaslms.cli.login]]. -<>= -import canvaslms.cli.login +if not (hostname and token): + err(1, "No hostname or token, run `canvaslms login`") + +<> + +canvas = Canvas(hostname, token) @ -<>= -canvaslms.cli.login.add_command(subp) + +Now, we must specify a URL to the Canvas server, not actually a hostname. +If the hostname already contains \enquote{https}, fine; if not, we should add +it. +<>= +if "://" not in hostname: + hostname = f"https://{hostname}" @ \paragraph{The \texttt{courses} command} @@ -210,7 +256,6 @@ The \texttt{courses} command resides in [[canvaslms.cli.courses]] and supports the protocol above. <>= import canvaslms.cli.courses -@ <>= canvaslms.cli.courses.add_command(subp) @ @@ -220,7 +265,6 @@ canvaslms.cli.courses.add_command(subp) The \texttt{users} command resides in [[canvaslms.cli.users]]. <>= import canvaslms.cli.users -@ <>= canvaslms.cli.users.add_command(subp) @ @@ -232,7 +276,6 @@ There are two commands related to assignments in the Both are added by the same function call. <>= import canvaslms.cli.assignments -@ <>= canvaslms.cli.assignments.add_command(subp) @ @@ -244,7 +287,6 @@ There are two commands related to submissions in the Both are added by one function call. <>= import canvaslms.cli.submissions -@ <>= canvaslms.cli.submissions.add_command(subp) @ @@ -254,7 +296,15 @@ canvaslms.cli.submissions.add_command(subp) The \texttt{grade} command is located in [[canvaslms.cli.grade]]. <>= import canvaslms.cli.grade -@ <>= canvaslms.cli.grade.add_command(subp) @ + +\paragraph{The \texttt{results} command} + +The \texttt{results} command is located in [[canvaslms.cli.results]]. +<>= +import canvaslms.cli.results +<>= +canvaslms.cli.results.add_command(subp) +@ diff --git a/src/canvaslms/cli/courses.nw b/src/canvaslms/cli/courses.nw index c853310..8dd486e 100644 --- a/src/canvaslms/cli/courses.nw +++ b/src/canvaslms/cli/courses.nw @@ -7,8 +7,10 @@ be used by other subcommands. We outline the module: <>= +import arrow import canvaslms.hacks.canvasapi import csv +import datetime import re import sys @@ -19,7 +21,7 @@ def add_command(subp): courses_parser = subp.add_parser("courses", help="Lists your courses", description="Lists your courses. Output, CSV-format: " - " ") + "<>") courses_parser.set_defaults(func=courses_command) <> @ @@ -32,6 +34,14 @@ courses_parser.add_argument("regex", help="Regex for filtering courses, default: '.*'") @ +We also take an option [[--all]] to show all courses. +By default we just want to show the courses that haven't ended. +<>= +courses_parser.add_argument("-a", "--all", + action="store_true", default=False, + help="List all courses, by default list only current courses.") +@ + \section{Producing a list of courses, [[courses_command]]}% \label{list-courses-function} @@ -44,23 +54,54 @@ def courses_command(config, canvas, args): output = csv.writer(sys.stdout, delimiter=args.delimiter) course_list = filter_courses(canvas, args.regex) + + <> + for course in course_list: <> @ We will cover the set up and processing of the options in the following sections. +\section{Filter the list of courses} + +We want to filter the courses depending on the [[--all]] argument. +<>= +if not args.all: + is_current_course = lambda x: \ + x.start_at is None \ + or (x.end_at is None and \ + arrow.get(x.start_at)-arrow.now().shift(years=-1) \ + > datetime.timedelta(0)) \ + or x.end_at is not None and arrow.get(x.end_at) > arrow.now() + course_list = filter(is_current_course, course_list) +@ + + \section{Output the course data} We have the course data in a [[course]] object. Now we just print the interesting data about it. +<>= +* <>= -output.writerow([ +row = [] +if args.id: + row.append(course.id) +row.extend([ course.course_code, course.name, course.start_at, course.end_at ]) +output.writerow(row) +@ + +We add the option [[--id]] to show the Canvas ID of the course. +<>= +courses_parser.add_argument("-i", "--id", + action="store_true", default=False, + help="Include Canvas ID of the course as first column.") @ @@ -97,8 +138,10 @@ name or Canvas ID against a regular expression. We provide a function that can be used by other subcommands to set up options for selecting a course in this way. +If the [[required]] option is specified, we want the course option to be +required (\eg for the [[grade]] command). <>= -def add_course_option(parser): +def add_course_option(parser, required=False): """Adds the -c option to argparse parser to filter out courses""" <> @@ -111,7 +154,7 @@ def process_course_option(canvas, args): We need a course, so we require a regular expression that matches the course title, course code or Canvas identifier. <>= -parser.add_argument("-c", "--course", required=False, default=".*", +parser.add_argument("-c", "--course", required=required, default=".*", help="Regex matching courses on title, course code or Canvas ID, " "default: '.*'") @ diff --git a/src/canvaslms/cli/grade.nw b/src/canvaslms/cli/grade.nw index 916d20b..32b6742 100644 --- a/src/canvaslms/cli/grade.nw +++ b/src/canvaslms/cli/grade.nw @@ -5,7 +5,7 @@ submission. We outline the module: <>= -import canvaslms.cli.submissions as submissions +from canvaslms.cli import submissions, users import webbrowser <> @@ -20,6 +20,7 @@ def add_command(subp): We add the subparser for [[grade]]. We must identify submissions, for this we use the options provided by [[add_submission_options]] (\cref{submission-options}). +We will add [[required=True]] so that we get all options as required. <>= grade_parser = subp.add_parser("grade", help="Grades assignments (hic sunt dracones!)", @@ -27,7 +28,7 @@ grade_parser = subp.add_parser("grade", "the regex matching is very powerful, " "be certain that you match what you think!") grade_parser.set_defaults(func=grade_command) -submissions.add_submission_options(grade_parser) +submissions.add_submission_options(grade_parser, required=True) <> @ Now, that [[grade_command]] function must take three arguments: [[config]], [[canvas]] and [[args]]. @@ -48,10 +49,13 @@ We introduce two options: This can be almost anything: Canvas accepts points, percentages or letter grades and will convert accordingly. \item [[-m]] or [[--message]], which sets a comment. +\item [[-v]] or [[--verbose]], which will cause [[canvaslms]] to print what +grade is set for which assignment and which student. \end{itemize} -Both options are optional. +Both [[-g]] and [[-m]] are optional. If neither is given, the SpeedGrader page of each submission is opened in the web browser. +In that case, [[-v]] make not much sense. <>= grade_options = grade_parser.add_argument_group( "arguments to set the grade and/or comment, " @@ -82,6 +86,34 @@ if not args.grade and not args.message: webbrowser.open(submissions.speedgrader(submission)) else: for submission in submission_list: + <> submission.edit(**results) @ +\subsection{Verbose output when setting grades} + +Now, we want a verbosity option to control whether or not to print what's +happening (even for non-errors). +Using the option turns verbose mode on, it's off by default. +<>= +grade_parser.add_argument("-v", "--verbose", + action="store_true", default=False, + help="Increases verbosity, prints what grade is set " + "for which assignment for which student.") +<>= +if args.verbose: + id = f"{submission.assignment.course.course_code} " \ + f"{submission.assignment.name} {submission.user}" + + event = "" + try: + event += f" grade = {args.grade}" + except: + pass + try: + event += f" msg = '{args.message}'" + except: + pass + + print(f"{id}:{event}") + diff --git a/src/canvaslms/cli/login.nw b/src/canvaslms/cli/login.nw index dc23f7e..2324388 100644 --- a/src/canvaslms/cli/login.nw +++ b/src/canvaslms/cli/login.nw @@ -1,10 +1,34 @@ -\chapter{Managing credentials} +\chapter{Managing credentials: the \texttt{login} command} We want a subcommand to handle the user's credentials for accessing Canvas. In particular, we need the user to be able to change the credentials in the system keyring, \eg in case the user wrote the wrong password. The rest we don't need to do much about, merely point out the possibilities to the user. +We summarize it like this: +<>= +Manages the user's Canvas login credentials. There are three ways to supply the +login credentials, in order of priority: + +1) Through the system keyring: Just run `canvaslms login` and you'll be guided + to enter the credentials (server name and token) and they will be stored in + the keyring. + +2) Through the environment: Just set the environment variables CANVAS_SERVER + and CANVAS_TOKEN. + +3) Through the configuration file: Just write + + {{ + "canvas": {{ + "host": "the actual hostname", + "access_token": "the actual token" + }} + }} + + to the file {dirs.user_config_dir}/config.json (default, or use the -f + option, see `canvaslms -h`). +@ We outline the module: <>= @@ -22,41 +46,20 @@ def add_command(subp): """Adds the login command to argparse parser""" login_parser = subp.add_parser("login", help="Manage login credentials", - description=""" -Manages the user's Canvas login credentials. There are three ways to supply the -login credentials, in order of priority: - -1) Through the system keyring: Just run `canvaslms login` and you'll be guided - to enter the credentials (server name and token) and they will be stored in - the keyring. - -2) Through the environment: Just set the environment variables CANVAS_SERVER - and CANVAS_TOKEN. - -3) Through the configuration file: Just write - - { - "canvas": { - "host": "the actual hostname", - "access_token": "the actual token" - } - } - - to the file """ + dirs.user_config_dir + """/config.json (default, or use - the -f option). + description=f""" +<> """) - login_parser.set_defaults(func=update_credentials_in_keyring) + login_parser.set_defaults(func=login_command) @ -\section{Updating the credentials in the keyring} +\section{The \texttt{login} command function} As stated, if the subcommand is run, we should update the credentials in the keyring. -If we run this subcommand, also want to clear the cache; otherwise, the cache -will keep the outdated credentials. +Or the config file, if there is no keyring. <>= -def update_credentials_in_keyring(config, canvas, args): +def login_command(config, canvas, args): """Guides the user to update credentials""" print("Enter the hostname for Canvas, " @@ -64,38 +67,56 @@ def update_credentials_in_keyring(config, canvas, args): hostname = input("Canvas hostname: ") print(f""" -Open - - https://{hostname}/profile/settings - -in your browser. Scroll down to approved integrations and click the -'+ New access token' button. Fill in the required data and click the -'Generate token' button. Enter the token here. +<> """) token = input("Canvas token: ") try: - keyring.set_password("canvaslms", "hostname", hostname) - keyring.set_password("canvaslms", "token", token) + <> except: canvaslms.cli.warn(f"You don't have a working keyring. " - "Will write hostname and token written to config file " - "{args.config_file}.") + f"Will write hostname and token to config file " + f"{args.config_file}.") config["canvas"]["host"] = hostname config["canvas"]["access_token"] = token canvaslms.cli.update_config_file(config, args.config_file) + +<>= +Open + + https://{hostname}/profile/settings + +in your browser. Scroll down to approved integrations and click the +'+ New access token' button. Fill in the required data and click the +'Generate token' button. Enter the token here. @ +Now, to keep this data in the keyring, we simply use [[canvaslms]] as the +service, then we store the hostname as the password of user +\enquote{hostname}. +And the same with the token. +<>= +keyring.set_password("canvaslms", "hostname", hostname) +keyring.set_password("canvaslms", "token", token) +@ + +When we need these again, we simply load them. +<>= +hostname = keyring.get_password("canvaslms", "hostname") +token = keyring.get_password("canvaslms", "token") +@ + + \section{Loading user credentials} The [[load_credentials]] function will try to get the user's LADOK credentials. There are three locations: \begin{enumerate} \item the system keyring, -\item the environment variables [[LADOK_USER]] and [[LADOK_PASS]], +\item the environment variables [[CANVAS_SERVER]] and [[CANVAS_TOKEN]], \item the configuration file. \end{enumerate} They are given the priority they are listed in above. @@ -107,8 +128,7 @@ If all fail, the function will return [[None]] for both. def load_credentials(config): """Load credentials from keyring, environment or config dictionary""" try: - hostname = keyring.get_password("canvaslms", "host") - token = keyring.get_password("canvaslms", "token") + <> if hostname and token: return hostname, token except: diff --git a/src/canvaslms/cli/results.nw b/src/canvaslms/cli/results.nw new file mode 100644 index 0000000..131a7b1 --- /dev/null +++ b/src/canvaslms/cli/results.nw @@ -0,0 +1,549 @@ +\chapter{The \texttt{results} command} +\label{results-command} + +This chapter provides the subcommand [[results]], which lists the results of a +course. +The purpose of the listing is to export results from Canvas. + +We want to export two types of results. +The first is grades for the students. +We want to turn the assignments in Canvas into grades that can be exported. +The format of the listing is compatible with the \texttt{ladok3} +package\footnote{% + URL: \url{https://github.com/dbosk/ladok3} +}. + +The second is a listing of all assignments that prevents a student from getting +a grade. +This is useful for reminding students to finish their missing assignments. + +We'll take a general approach and provide an option to switch between these two +cases. + + +\section{The [[results]] subcommand and its options} + +We outline the module: +<>= +import canvaslms.cli +from canvaslms.cli import assignments, courses, submissions, users +import canvaslms.hacks.canvasapi + +import argparse +import csv +import canvasapi.submission +import datetime as dt +import importlib +import importlib.machinery +import importlib.util +import os +import pathlib +import re +import sys + +<> + +def add_command(subp): + """Adds the results command to argparse parser subp""" + <> +@ + +We add the subparser for [[results]]. +The command requires two arguments: course and assignment. +We also want the option to filter on users. +We can add these by using [[add_assignment_option]], however, we don't need the +ungraded flag as we want to export results (\ie graded material). +Also, we can just add the [[add_user_or_group_option]] to be able to filter on +users or groups. +<>= +results_parser = subp.add_parser("results", + help="Lists results of a course", + description="""<>""", + epilog="""<>""") +results_parser.set_defaults(func=results_command) +assignments.add_assignment_option(results_parser, ungraded=False) +users.add_user_or_group_option(results_parser) +<> +<> +<> +@ + +Let's summarize what we want to do. +<>= +Lists results of a course for export, for instance to the `ladok report` +command. Output format, CSV: + + + +Can also export a list of missing assignment results (--missing option) that +prevent the student from getting a grade. Output format, CSV: + + + +The reason can be "not submitted" or "not graded". +<>= +If you specify an assignment group, the results of the assignments in that +group will be summarized. You can supply your own function for summarizing +grades through the -S option. See `pydoc3 canvaslms.grades` for different +options. +@ We will cover the option for and loading of the custom summary module later, +in \cref{custom-summary-modules}. + +Now, that [[results_command]] function must take three arguments: [[config]], +[[canvas]] and [[args]]. +However, unlike the other commands, we don't want to do the processing for the +assignment options using [[process_assignment_option]]. +We want to handle that ourselves, because we want slightly different handling. +<>= +def results_command(config, canvas, args): + <> +@ + +Now we'd simply like to print the results. +The user provides us with a set of courses and a set of assignments or +assignment groups in those courses. +If the user provides assignment groups, we will automatically summarize the +results of all assignments in the assignment group. + +We will create a list of results, where each result is a tuple (actually a +list, since the length might vary). +These tuples will then be printed in CSV format to standard output. +<>= +output = csv.writer(sys.stdout, delimiter=args.delimiter) + +if args.assignment_group != "": + results = summarize_assignment_groups(canvas, args) +else: + results = summarize_assignments(canvas, args) + +for result in results: + output.writerow(result) +@ + + +\section{Filtering grades in output} + +We also want to let the user choose to not include Fs (or other grades) in the +output. +By default, we ignore F and Fx grades. +If the user supplies [[-F]], we include all grades. +If the user supplies [[-F regex]], we use regex to filter grades. +<>= +passing_regex = r"^([A-EP]|complete)$" +all_grades_regex = r"^([A-FP]x?|(in)?complete)$" +results_parser.add_argument("-F", "--filter-grades", + required=False, action="store", nargs="?", + const=all_grades_regex, + default=passing_regex, + help=f"Filter grades. By default we only output " + f"A--Es and Ps ({passing_regex}. " + f"If you want to include Fs ({all_grades_regex}), use this option. " + f"You can also supply an optional regex to this option " + f"to filter grades based on that.") +@ + +To make filtering easy, we provide a helper function. +<>= +def filter_grade(grade, regex): + """ + Returns True if the grade matches the regex. + """ + return re.search(regex, grade) +@ + + +\section{Summarizing assignment results} + +In this case, we want to have one assignment per row in the output. +We want to output course, assignment, student ID, grade, submission date and +those who participated in the grading. + +We first get the list of courses. +We do this to then get the list of all users in all courses. +We need these to get the integration ID, that can be used for LADOK for +example. + +Then we get the list of assignments in all courses. +We get the submissions for each assignment. +These submissions are filtered by user. +We do this because this attaches a [[user]] attribute to each submissions with +the details of each user. +This gives a trivial [[yield]] statement at the end. +<>= +def summarize_assignments(canvas, args): + """ + Turn submissions into results: + - canvas is a Canvas object, + - args is the command-line arguments, as parsed by argparse. + """ + + <> + + for submission in submissions_list: + if submission.grade is not None: + if filter_grade(submission.grade, args.filter_grades): + yield [ + submission.assignment.course.course_code, + submission.assignment.name, + submission.user.integration_id, + submission.grade, + round_to_day(submission.submitted_at or submission.graded_at), + *all_graders(submission) + ] +@ + +To create the list of submissions, [[submissions_list]], we have to do the +following. +First need to list the courses. +For each course we need to get all the users (students). +Then, for each course, we also need all the assignments. +When we have the assignments, we can get the submissions. +Fortunately, we can use the filtering functions provided by the [[courses]], +[[assignments]] and [[submissions]] modules. +They will parse the CLI arguments and generate the lists. +<>= +assignments_list = assignments.process_assignment_option(canvas, args) +users_list = users.process_user_or_group_option(canvas, args) + +submissions_list = submissions.filter_submissions( + submissions.list_submissions(assignments_list, + include=["submission_history"]), + users_list) +@ + +\section{Fixing the dates} + +We want the grade date to be a date, not the timestamp supplied by Canvas. +For instance, LADOK wants dates, not timestamps. +<>= +def round_to_day(timestamp): + """ + Takes a Canvas timestamp and returns the corresponding datetime.date object. + """ + return dt.date.fromisoformat(timestamp.split("T")[0]) +@ + +\section{Getting all graders for a submission} + +We need all graders who participated in the grading, meaning also those who +previously graded (since the last grader might just complement it). +<>= +def all_graders(submission): + """ + Returns a list of everyone who participated in the grading of the submission. + I.e. also those who graded previous submissions, when submission history is + available. + """ + graders = [] + + for prev_submission in submission.submission_history: + <> + <> + + return graders +@ + +To make the code easier, we'll turn the [[submission_history]] data into +[[Submission]] objects. +We also want to keep the added [[.assignment]] attribute, since we'll use it +later. +<>= +prev_submission = canvasapi.submission.Submission( + submission._requester, prev_submission) +prev_submission.assignment = submission.assignment +@ + +Now, we'd like to extract the grader. +We'll get the grader's Canvas user ID, so we'll need to resolve it to an actual +user. +Fortunately, we can use the [[resolve_grader]] function from the +[[submissions]] module to do all the work. +<>= +grader = submissions.resolve_grader(prev_submission) +if grader: + graders.append(grader) +@ + + +\section{Summarizing assignment group results} + +In this case, we want to have one assignment group per row in the output. +We want to output course, assignment group, student ID, summarized grade based +on all assignments in the group and the latest submission date. + +Unlike the previous case, here we must maintain the structure of which +assignments belong to which assignment group so that we can check easily that a +user has passed all assignments in the group. +<>= +def summarize_assignment_groups(canvas, args): + """ + Summarize assignment groups into a single grade: + - canvas is a Canvas object, + - args is the command-line arguments, as parsed by argparse. + """ + + courses_list = courses.process_course_option(canvas, args) + all_assignments = list(assignments.process_assignment_option(canvas, args)) + users_list = set(users.process_user_or_group_option(canvas, args)) + + for course in courses_list: + ag_list = assignments.filter_assignment_groups( + course, args.assignment_group) + + for assignment_group in ag_list: + assignments_list = list(assignments.filter_assignments_by_group( + assignment_group, all_assignments)) + if args.missing: + <> + else: + <> +@ + +\subsection{Producing a list of grades} + +Let's start with the case where we want to produce a list of grades. +We simply call the [[summary.summarize_group]] function with the assignments +and users and process the results. +<>= +<> +for user, grade, grade_date, *graders in summary.summarize_group( + assignments_list, users_list): + <> + yield [ + course.course_code, + assignment_group.name, + user.integration_id, + grade, + grade_date, + *graders + ] +@ We will now cover the [[summarize_group]] function in the [[summary]] module. + +If a student hasn't done anything, the grade and date will be [[None]]. +There is no point in including this in the result. +Similarly, this is a good point to do the filtering of grades. +<>= +if grade is None or grade_date is None \ + or not filter_grade(grade, args.filter_grades): + continue +@ + +\subsection{Loading a custom summary module} +\label{custom-summary-modules} + +Different teachers have different policies for merging several assignments into +one grade. +We now want to provide a way to override the default function. +<>= +Name of Python module or file containing module to load with a custom +summarization function to summarize assignment groups. The default module is +part of the `canvaslms` package: `{default_summary_module}`. But it could be +any Python file in the file system or other built-in modules. See `pydoc3 +canvaslms.grades` for alternative modules or how to build your own. +<>= +default_summary_module = "canvaslms.grades.conjunctavg" +results_parser.add_argument("-S", "--summary-module", + required=False, default=default_summary_module, + help=f"""<>""") +@ + +Now, let's load the module into the identifier [[summary]] for the above code. +This is a very dangerous construction. +An attacker can potentially load their own module and have it execute when +reporting grades. +For instance, a malicious module could change grades, \eg always set +A's. + +Now to the loader, we first try to load a system module, then we look for a +module in the current working directory. +We provide a helper function to do this. +<>= +def load_module(module_name): + """ + Load a module from the file system or a built-in module. + """ + try: + return importlib.import_module(module_name) + except ModuleNotFoundError: + module_path = pathlib.Path.cwd() / module_name + module = module_path.stem + + loader = importlib.machinery.SourceFileLoader( + module, str(module_path)) + spec = importlib.util.spec_from_loader(module, loader) + module_obj = importlib.util.module_from_spec(spec) + loader.exec_module(module_obj) + return module_obj +<>= +try: + summary = load_module(args.summary_module) +except Exception as err: + canvaslms.cli.err(1, f"Error loading summary module " + f"'{args.summary_module}': {err}") +@ + +The available summary functions and the default one can be found in +\cref{summary-modules}. + +\subsection{Producing a list of missing assignments} + +Now we want to look at the missing option. +If the user supplies this option, we want to produce a list of missing +assignments. +Similarly to summarizing a group, we also want to use different modules to +produce the missing assignments. +We'll use an option missing which takes an optional name of such a module. +<>= +<> +results_parser.add_argument("-M", "--missing", + required=False, nargs="?", + const=default_missing_module, default=None, + help="Produce a list of missing assignments instead of grades. " + "You can supply a custom module to this option, the module must " + "contain a " + "function `missing_assignments(assignments_list, users_list). " + <> + "This option only has effect when working with assignment groups.") +@ + +This lets us load the module and use it to produce the missing assignments, in +a similar fashion as above. +<>= +if args.missing: + try: + missing = load_module(args.missing) + except Exception as err: + canvaslms.cli.err(1, f"Error loading missing module " + f"'{args.missing}': {err}") +@ + +Now, to the main part of the problem. +We simply load the module and call the [[missing_assignments]] function. +It should return a list of tuples, where each tuple is a user, an assignment +and a reason why the assignment is missing. +For instance, the reason could be \enquote{not submitted} or \enquote{not +graded} or \enquote{failed}. + +We output the user's login ID instead of the integration ID, since the login ID +can be used to contact the student (which is probably what we want this data +for). +<>= +<> +<> +for user, assignment, reason in missing_results: + yield [ + course.course_code, + assignment_group.name, + user.login_id, + assignment.name, + reason + ] +@ + +\subsubsection{The default missing module} + +We'll now cover a default function for the missing assignments. +We'll put it in the same module as the [[results]] CLI command, not in a +separate module. +<>= +def missing_assignments(assignments_list, users_list, + <>): + """ + Returns tuples of missing assignments. + + <> + """ + for user in users_list: + for assignment in assignments_list: + <> + <> +<>= +default_missing_module = "canvaslms.cli.results" +@ + +We'll add [[<>]] to the function to make +it accept useful arguments to modify its behaviour. +This way, when someone needs a specific function for their course, they can +just write a function that modifies the default arguments to this function. + +Let's outline what we want this function to do. +The default module checks if all things are graded or submitted. +<>= +"The default module checks if all things are graded or submitted. " +<>= +For each assignment that a student is not done with, we yield a tuple of the +user, the assignment and the reason why the assignment is missing. + +The reason can be "not submitted" or "not graded" or "not a passing grade". + +The only reason to use a different module is if you have optional assignments. +We only want to remind the students of the things they need to pass the course. +We don't want to make it sound like an optional assignment is mandatory. +@ + +This gives us something like this. +<>= +try: + submission = assignment.get_submission(user) +except canvasapi.exceptions.ResourceDoesNotExist: + continue + +if submission is None: + yield user, assignment, "not submitted" +elif submission.grade is None: + if submission.submitted_at: + yield user, assignment, \ + f"submitted on {submission.submitted_at}, but not graded" + else: + yield user, assignment, "not done" +elif not filter_grade(submission.grade, passing_regex): + if submission.submitted_at and \ + submission.submitted_at > submission.graded_at: + yield user, assignment, \ + f"not a passing grade ({submission.grade}), resubmission not graded" + else: + yield user, assignment, \ + f"not a passing grade ({submission.grade})" +@ + +Now, we need that [[passing_regex]], so we can add it to the optional +arguments, with a default value (same as above). +<>= +passing_regex=r"^([A-EP]|complete)$", +@ + +Next, if we want to be able to skip optional assignments, we can add an +optional argument for that. +<>= +optional_assignments = None, +@ + +This allows us to make the call to the function as follows. +We check if it's the default function or not, if it is we can pass additional +arguments from the CLI arguments. +<>= +if missing.missing_assignments == missing_assignments: + missing_results = missing.missing_assignments( + assignments_list, users_list, + passing_regex=args.filter_grades, + optional_assignments=args.optional_assignments) +else: + missing_results = missing.missing_assignments( + assignments_list, users_list) +@ + +All that is missing now is the optional assignments argument for the parser. +<>= +results_parser.add_argument("-O", "--optional-assignments", + required=False, nargs="+", default=None, + help="List of regexes matching optional assignments. The default missing " + "assignments will treat matching assignments as optional.") +@ + +Finally, we can do the skipping too. +<>= +if optional_assignments: + if any(re.search(optional, assignment.name) + for optional in optional_assignments): + continue +@ diff --git a/src/canvaslms/cli/submissions.nw b/src/canvaslms/cli/submissions.nw index 2f85da1..66bf846 100644 --- a/src/canvaslms/cli/submissions.nw +++ b/src/canvaslms/cli/submissions.nw @@ -6,18 +6,33 @@ handles an individual submission. We outline the module: <>= +import canvasapi.exceptions +import canvasapi.file +import canvasapi.submission + +import canvaslms.cli import canvaslms.cli.assignments as assignments import canvaslms.cli.users as users import canvaslms.hacks.canvasapi import argparse import csv -import pydoc +import json +import os +import pathlib +import subprocess import pypandoc import re +import rich.console +import rich.markdown +import rich.json +import shlex import sys +import tempfile +import textwrap import urllib.request +<> <> def add_command(subp): @@ -41,10 +56,12 @@ We add the subparser for [[submissions]]. submissions_parser = subp.add_parser("submissions", help="Lists submissions of an assignment", description="Lists submissions of assignment(s). Output format: " - " ") + " " + " ") submissions_parser.set_defaults(func=submissions_command) assignments.add_assignment_option(submissions_parser) add_submission_options(submissions_parser) +<> @ Now, that [[submissions_command]] function must take three arguments: [[config]], [[canvas]] and [[args]]. It must also do the processing for the assignment options using @@ -76,7 +93,17 @@ if args.user or args.category or args.group: output = csv.writer(sys.stdout, delimiter=args.delimiter) for submission in submissions: - output.writerow(format_submission_short(submission)) + if args.login_id: + output.writerow(format_submission_short_unique(submission)) + else: + output.writerow(format_submission_short(submission)) +@ + +Now, we must add that option [[args.login_id]]. +<>= +submissions_parser.add_argument("-l", "--login-id", + help="Print login ID instead of name.", + default=False, action="store_true") @ @@ -84,6 +111,8 @@ for submission in submissions: Sometimes we want to compute the URL for SpeedGrader for a submission. The [[Submission]] objects come with an attribute [[preview_url]]. +(However, previous submissions don't have this, so if it doesn't exist, we use +[[None]].) We want to turn that one into the SpeedGrader URL. The [[preview_url]] looks like this: \begin{minted}{text} @@ -99,7 +128,10 @@ We use Python's regex abilities to rewrite the URL. <>= def speedgrader(submission): """Returns the SpeedGrader URL of the submission""" - speedgrader_url = submission.preview_url + try: + speedgrader_url = submission.preview_url + except AttributeError: + return None speedgrader_url = re.sub("assignments/", "gradebook/speed_grader?assignment_id=", @@ -114,17 +146,25 @@ def speedgrader(submission): return speedgrader_url @ + \section{The [[submission]] subcommand and its options} Here we provide a subcommand [[submission]] which deals with an individual submission. +<>= +Prints data about matching submissions, including submission and grading time, +and any text-based attachments. + <>= submission_parser = subp.add_parser("submission", help="Prints information about a submission", - description="Prints data about matching submissions, " - "including submission and grading time, any text-based attachments.") + description=""" +<> +""") submission_parser.set_defaults(func=submission_command) add_submission_options(submission_parser) +<> +<> @ We also need the corresponding function. For now, we only print the most relevant data of a submission. <>= @@ -133,10 +173,311 @@ def submission_command(config, canvas, args): <> @ -Then we can fetch the submission. +Then we can fetch the submission, format it to markdown ([[format_submission]]) +and then print it. +We use the [[rich]] package to print it. +This prints the markdown output of [[format_submission]] nicer in the terminal. +It also adds syntax highlighting for the source code attachments. +However, we need to adapt the use of styles to the pager to be used. +If stdout is not a terminal, we don't use [[rich]], then we simply print the +raw markdown. + +However, we might want to write all data to the output directory. +In that case, we don't print anything to stdout. + +Finally, we might also want to open the directory of files, either in a shell +or in the system file manager. +We'll open the directory in the file manager so that the user can explore the +files while reading the output (containing the grading and comments). +So we must open this first, then we can proceed. + +When we spawn a shell, we don't want to do anything else. +But open in the file manager we can do no matter what the user wants to do, if +they pipe the output or skip the output altogether. <>= +console = rich.console.Console() +<> for submission in submission_list: - pydoc.pager(format_submission(submission, canvas)) + <> + output = format_submission(submission, + history=args.history, + tmpdir=tmpdir/subdir, + json_format=args.json) + + <> + + if args.open == "open": + <> + elif args.open == "all": + <> + + if <>: + <> + elif args.output_dir: + pass + elif sys.stdout.isatty(): + <> + with console.pager(styles=styles): + <> + else: + print(output) +@ Note that we use the theme [[manni]] for the code, as this works well in both +dark and light terminals. + +If we specify the [[output_dir]] we want to write the output to files and not +have it printed to stdout. +<>= +submission_parser.add_argument("-o", "--output-dir", + required=False, default=None, + help="Write output to files in directory the given directory. " + "If not specified, print to stdout. " + "If specified, do not print to stdout.") +@ + +We also have the open option, that has a choice of a few alternatives. +<>= +submission_parser.add_argument("--open", required=False, + nargs="?", default=None, const="open", + choices=["open", "all"]+choices_for_shells, + help="Open the directory containing the files using " + "the default file manager (`open`). " + "With `open`, the pager will be used to display the output as usual. " + "With `all`, all files (not the directory containing them) will be " + "opened in the default application for the file type. " + <> + "Default: %(const)s") +<>= +args.open in choices_for_shells +<>= +<> +@ + +Finally, we can add the [[json]] option to the [[submission]] command. +<>= +submission_parser.add_argument("--json", required=False, + action="store_true", default=False, + help="Print output as JSON, otherwise Markdown.") +<>= +if args.json: + console.print(rich.json.JSON(output)) +else: + console.print(rich.markdown.Markdown(output, + code_theme="manni")) +@ + +\subsection{Specifying an output directory} + +If the user specified an output directory, we will not create a temporary +directory, but rather let [[tmpdir]] be the output directory. +<>= +if args.output_dir: + tmpdir = pathlib.Path(args.output_dir) +else: + tmpdir = pathlib.Path(tempfile.mkdtemp()) +@ + +Finally, we can then write the output to a file in the output directory. +We need to structure the files in some way. +We have a list of submissions for various students. +The submissions can be submissions for various assignments (in assignment +groups) in various courses. +The two most interesting options are: +\begin{enumerate} +\item to group by student, that is the student is the top level directory, +[[student/course/assignment]]; or +\item to group by course, that is we get [[course/assignment/student]]. +\end{enumerate} +This affects both +[[<>]] +and +[[<<[[tmpdir]] for [[format_submission]]>>]]. + +We'll introduce an option that lets us choose between these two options. +<>= +submission_parser.add_argument("--sort-order", required=False, + choices=["student", "course"], default="student", + help="Determines the order in which directories are created " + "in `output_dir`. `student` results in `student/course/assignment` " + "and `course` results in `course/assignment/student`. " + "Default: %(default)s") +<>= +if args.sort_order == "student": + subdir = f"{submission.user.login_id}" \ + f"/{submission.assignment.course.course_code}" \ + f"/{submission.assignment.name}" +else: + subdir = f"{submission.assignment.course.course_code}" \ + f"/{submission.assignment.name}" \ + f"/{submission.user.login_id}" + +(tmpdir / subdir).mkdir(parents=True, exist_ok=True) +<>= +if args.json: + filename = "metadata.json" + output = json.dumps(output, indent=2) +else: + filename = "metadata.md" +with open(tmpdir/subdir/filename, "w") as f: + f.write(output) +@ + +\subsection{Opening the directory containing the files} + +Sometimes we want to open the directory. +There are several ways we can do this. +We can open the directory with the system file explorer, that way the user can +open files while reading the stdout output using a pager. +<>= +subprocess.run(["open", tmpdir/subdir]) +@ + +If we instead want to open all files contained in the directory, we can need to +iterate all the files and open them one by one. +<>= +for file in (tmpdir/subdir).iterdir(): + subprocess.run(["open", file]) +@ + +We can also spawn a shell in the directory so that the user can work with the +files, for instance run the Python code in the case of a Python lab submission. +Now, we could spawn a sub-shell of the user's shell, +we'll let this be the [[shell]] option. +Another approach would be to run a Docker image and mount the directory in the +container. +This would be the [[docker]] option. +<>= +choices_for_shells = ["shell", "docker"] +<>= +"With `shell`, we just drop into the shell (as set by $SHELL), " +"the output can be found in the metadata.{json,md} file in " +"the shell's working directory. " +"With `docker`, we run a Docker container with the " +"directory mounted in the container. " +"This way we can run the code in the submission in a " +"controlled environment. " +"Note that this requires Docker to be installed and running. " +<>= +if args.open == "shell": + <> +elif args.open == "docker": + <> +@ + +In both cases, we want to print some useful info for the user, so that they can +more easily orient themselves. +In the case of the sub-shell, we print a message and then spawn the shell in +the directory. +At exit, we print a message that the shell has terminated and that the files +are left in the directory, so the user can go back without executing the +command again. +<>= +print(f"---> Spawning a shell ({os.environ['SHELL']}) in {tmpdir/subdir}") + +subprocess.run([ + "sh", "-c", f"cd '{tmpdir/subdir}' && exec {os.environ['SHELL']}" +]) + +print(f"<--- canvaslms submission shell terminated.\n" + f"---- Files left in {tmpdir/subdir}.") +@ + +We want to do the same for Docker. +However, this is a bit more complicated. +We need to know which image to use and which command to run in the container. +We also need to know any other options that we might want to pass to Docker. +<>= +print(f"---> Running a Docker container, files mounted in /mnt.") + +cmd = [ + "docker", "run", "-it", "--rm" +] +if args.docker_args: + cmd += args.docker_args +cmd += [ + "-v", f"{tmpdir/subdir}:/mnt", + args.docker_image, args.docker_cmd +] + +subprocess.run(cmd) + +print(f"<--- canvaslms submission Docker container terminated.\n" + f"---- Files left in {tmpdir/subdir}.\n" + f"---- To rerun the container, run:\n" + f"`{' '.join(map(shlex.quote, cmd))}`") +@ + +This requires us to add an option for the Docker image to use and an option for +the command to run in the Docker container. +<>= +submission_parser.add_argument("--docker-image", required=False, + default="ubuntu", + help="The Docker image to use when running a Docker container. " + "This is used with the `docker` option for `--open`. " + "Default: %(default)s") +submission_parser.add_argument("--docker-cmd", required=False, + default="bash", + help="The command to run in the Docker container. " + "This is used with the `docker` option for `--open`. " + "Default: %(default)s") +@ + +For the last argument, [[args.docker_args]], we want to be able to pass any +arguments to the Docker command. +This should be a list of strings, so we can just pass it on to the +[[subprocess.run]] function. + +Using the [[argparse.REMAINDER]] option, we can pass the rest of the command +line to the Docker command. +This is useful since it saves us a lot of problems with escaping options that +we want to pass to Docker, instead of our argparser to parse it. +Normally, if we want to pass [[-e LADOK_USER]] to Docker, our argparser would +pick up that [[-e]] as an option, unless escaped. +<>= +submission_parser.add_argument("--docker-args", required=False, + default=[], nargs=argparse.REMAINDER, + help="Any additional arguments to pass to the Docker command. " + "This is used with the `docker` option for `--open`. " + "Note that this must be the last option on the command line, it takes " + "the rest of the line as arguments for Docker.") +@ + + +\subsection{Check if we should use styles} + +By default, [[rich.console.Console]] uses the [[pydoc.pager]], which uses the +system pager (as determined by environment variables etc.). +The default usually can't handle colours, so [[rich]] doesn't use colours when +paging. +We want to check if [[less -r]] or [[less -R]] is set as the pager, in that +case we can use styles. +<>= +pager = "" +if "MANPAGER" in os.environ: + pager = os.environ["MANPAGER"] +elif "PAGER" in os.environ: + pager = os.environ["PAGER"] + +styles = False +if "less" in pager and ("-R" in pager or "-r" in pager): + styles = True +<>= +Uses MANPAGER or PAGER environment variables for the pager to page output. If +the `-r` or `-R` flag is passed to `less`, it uses colours in the output. That +is, set `PAGER=less -r` or `PAGER=less -R` to get coloured output from this +command. + +@ + +\subsection{Optional history} + +Now, let's turn to that [[args.history]] argument. +We want to exclude it sometimes, for instance, when we want to get to the +comments only. +So we default to off, since it's only occasionally that we want to see the +history. +<>= +submission_parser.add_argument("-H", "--history", action="store_true", + help="Include submission history.") @ @@ -151,15 +492,24 @@ For this we need \item a user or group, \item to know if we aim for all or just ungraded submissions. \end{itemize} +We add the [[required]] parameter to specify if we want to have required +arguments, \eg for the [[grade]] command. <>= -def add_submission_options(parser): +def add_submission_options(parser, required=False): try: - assignments.add_assignment_option(parser) + assignments.add_assignment_option(parser, required=required) except argparse.ArgumentError: pass try: - users.add_user_or_group_option(parser) + users.add_user_or_group_option(parser, required=required) + except argparse.ArgumentError: + pass + + submissions_parser = parser.add_argument_group("filter submissions") + try: # to protect from this option already existing in add_assignment_option + submissions_parser.add_argument("-U", "--ungraded", action="store_true", + help="Only ungraded submissions.") except argparse.ArgumentError: pass @@ -172,9 +522,13 @@ def process_submission_options(canvas, args): user_list = users.process_user_or_group_option(canvas, args) if args.ungraded: - submissions = list_ungraded_submissions(assignment_list) + submissions = list_ungraded_submissions(assignment_list, + include=["submission_history", "submission_comments", + "rubric_assessment"]) else: - submissions = list_submissions(assignment_list) + submissions = list_submissions(assignment_list, + include=["submission_history", "submission_comments", + "rubric_assessment"]) return list(filter_submissions(submissions, user_list)) @ @@ -236,79 +590,587 @@ def filter_submissions(submission_list, user_list): \section{Printing a submission} We provide two functions to print a submission. -One to print a short identifier and one to print the submission data. +One to print a short summary (row in CSV data) and one to print the submission +data for rendering. +The first is to get an overview of all submissions, the latter to look into the +details of only one submission. We'll format the submission in short format. The most useful data is the identifier, the grade and the date of grading. <>= def format_submission_short(submission): - if submission.submitted_at: - date = submission.submitted_at - else: - date = submission.graded_at - return [submission.assignment.course.course_code, submission.assignment.name, - submission.user_id, submission.user.name, submission.grade, date] + return [ + submission.assignment.course.course_code, + submission.assignment.name, + submission.user.name, + submission.grade, submission.submitted_at, submission.graded_at + ] +@ + +Sometimes we want the short format to contain a unique identifier (such as +[[login_id]]) instead of the name. +<>= +def format_submission_short_unique(submission): + <> + + return [ + submission.assignment.course.course_code, + submission.assignment.name, + uid, + submission.grade, submission.submitted_at, submission.graded_at + ] +@ + +However, we note that sometimes the student doesn't have a [[login_id]] +attribute, so we can use their [[integration_id]] or [[sis_user_id]] instead +for uniqueness. +See \cref{UserUniqueID} for details. +<>= +uid = users.get_uid(submission.user) @ We provide the function [[format_submission]] to nicely format a submission. It prints metadata, downloads any text attachments to include in the output. -It also uses the [[canvas]] object to resolve course, assignment and user IDs. +We also output all the submission history at the end. +We no longer need the [[canvas]] object to resolve course, assignment and user +IDs, instead, we add these as attributes when fetching the objects. +So [[submission.assignment]] is the assignment it came from, we don't need to +resolve the assignment from the assignmend ID. + +We have a [[md_title_level]] argument to specify the level of the title in the +Markdown version of the output. +We want this to be able to recursively use [[format_submission]] to format the +submission history. +This must then be passed to the recursive call and the section formatting. <>= -def format_submission(submission, canvas): - """Formats submission for printing to stdout""" +def format_submission(submission, history=False, json_format=False, + md_title_level="#", + <<[[format_submission]] args>>): + """ + Formats submission for printing to stdout. Returns a string. + + If history is True, include all submission versions from history. + + If json_format is True, return a JSON string, otherwise Markdown. + + `md_title_level` is the level of the title in Markdown, by default `#`. This + is used to create a hierarchy of sections in the output. + + <<[[format_submission]] doc>> + """ student = submission.assignment.course.get_user(submission.user_id) - formatted_submission = f"""# Metadata + if json_format: + formatted_submission = {} + else: + formatted_submission = "" -{submission.assignment.course.course_code} > {submission.assignment.name} + <> + <> + <> + if history: + <> + else: + <> + <> + <> -- Student: {student.name} ({student.login_id or None}, {submission.user_id}) -- Submission ID: {submission.id} -- Submitted (graded): {submission.submitted_at} ({submission.graded_at}) -- Grade: {submission.grade} ({submission.score}) -- SpeedGrader: {speedgrader(submission)} -""" + return formatted_submission +@ - try: - if submission.submission_comments: - formatted_submission += f""" +\subsection{Some helper functions} -# Comments -""" +We want to format some sections. +The section has a title and a body. +<<[[format_section]] doc>>= +In the case of Markdown (default), we format the title as a header and the body +as a paragraph. If we don't do JSON, but receive a dictionary as the body, we +format it as a list of key-value pairs. - for comment in submission.submission_comments: - formatted_submission += f""" -{comment["author_name"]} ({comment["created_at"]}): +`md_title_level` is the level of the title in Markdown, by default `#`. +We'll use this to create a hierarchy of sections in the output. -{comment["comment"]} -""" - except AttributeError: - pass +In the case of JSON, we return a dictionary with the title as the key and the +body as the value. +<>= +def format_section(title, body, json_format=False, md_title_level="#"): + """ + <<[[format_section]] doc>> + """ + if json_format: + return {title: body} + + if isinstance(body, dict): + return "\n".join([ + f" - {key.capitalize().replace('_', ' ')}: {value}" + for key, value in body.items() + ]) + + return f"\n{md_title_level} {title}\n\n{body}\n\n" +@ - formatted_submission += f""" +\subsection{Metadata} -# Body +To format the metadata section, we simply pass the right strings to the section +formatting function. +<>= +metadata = { + "course": submission.assignment.course.course_code, + "assignment": submission.assignment.name, + "student": str(student), + "submission_id": submission.id, + "submitted_at": submission.submitted_at, + "graded_at": submission.graded_at, + "grade": submission.grade, + "graded_by": str(resolve_grader(submission)), + "speedgrader": speedgrader(submission) +} -{submission.body} -""" +if json_format: + formatted_submission.update(format_section("metadata", metadata, + json_format=True, + md_title_level=md_title_level)) +else: + formatted_submission += format_section("Metadata", metadata, + md_title_level=md_title_level) +@ + +Now to resolve the grader, we need to look up a user ID. +Fortunately, we can do that through the course that is included as part of the +assignment, as part of the submission. +(We add this manually in [[list_submissions]].) +The grader ID is negative if it was graded automatically, \eg by a quiz or LTI +integration. +If negative, it's either the quiz ID or LTI tool ID. +(Negate to get the ID.) +Finally, we look up the user object from the course. +In some rare cases, we might not find the user, in which case we return the +grader ID as a string. +<>= +def resolve_grader(submission): + """ + Returns a user object if the submission was graded by a human. + Otherwise returns None if ungraded or a descriptive string. + """ + try: + if submission.grader_id is None: + return None + except AttributeError: + return None + + if submission.grader_id < 0: + return "autograded" try: - for attachment in submission.attachments: - if "text/" not in attachment["content-type"]: - continue + return submission.assignment.course.get_user(submission.grader_id) + except canvasapi.exceptions.ResourceDoesNotExist: + return f"unknown grader {submission.grader_id}" +@ - contents = urllib.request.urlopen(attachment["url"]).read().decode("utf-8") - formatted_submission += f""" +\subsection{Rubric data} -## {attachment["filename"]} +<>= +try: + if submission.rubric_assessment: + if json_format: + formatted_submission.update(format_section( + "rubric_assessment", + format_rubric(submission, json_format=True)), + json_format=True) + else: + formatted_submission += format_section( + "Rubric assessment", + format_rubric(submission)) +except AttributeError: + pass +@ -``` -{contents} -``` -""" - except AttributeError: +\subsection{General comments} + +<>= +try: + if submission.submission_comments: + if json_format: + formatted_submission.update(format_section( + "comments", submission.submission_comments, + json_format=True)) + else: + body = "" + for comment in submission.submission_comments: + body += f"{comment['author_name']} ({comment['created_at']}):\n\n" + body += comment["comment"] + "\n\n" + formatted_submission += format_section("Comments", body) +except AttributeError: + pass +@ + +\subsection{Body} + +<>= +try: + if submission.body: + if json_format: + formatted_submission.update(format_section( + "body", submission.body, json_format=True, + md_title_level=md_title_level)) + else: + formatted_submission += format_section("Body", submission.body, + md_title_level=md_title_level) +except AttributeError: + pass +@ + +\subsection{Quiz answers} + +<>= +try: + if submission.submission_data: + if json_format: + formatted_submission.update(format_section( + "quiz_answers", submission.submission_data, + json_format=True, md_title_level=md_title_level)) + else: + formatted_submission += format_section( + "Quiz answers", + json.dumps(submission.submission_data, indent=2), + md_title_level=md_title_level) +except AttributeError: + pass +@ + +\subsection{Attachments} + +For the attachment, we want to add it to the output. +In the case of Python code we want to have it as a Markdown code block. +If it's a text file, we want to have it as part of the plain Markdown. +We will try to convert the attachment to Markdown using [[pypandoc]]. + +Since we want the files to be their own sections, for easy navigation, we must +bumps [[md_title_level]] by one to make the files a subsection of the main +submission. + +In the JSON version, we want to introduce one more level: the attachments +should be found under the key [[attachments]]. +<>= +try: + <> + if json_format: + attachments = {} + for attachment in submission.attachments: + <> + formatted_attachment = format_section(attachment.filename, contents, + json_format=json_format, + md_title_level=md_title_level+"#") + + if json_format: + attachments.update(formatted_attachment) + else: + formatted_submission += formatted_attachment + + if json_format and attachments: + formatted_submission.update(format_section("attachments", attachments, + json_format=True, + md_title_level=md_title_level)) +except AttributeError: + pass +@ + +Let's look at the conversion. +If it's a text-based format, we want to include it as a Markdown code block. +Otherwise, we'll try to convert it to Markdown using [[pypandoc]]. +If the latter fails, we want to add a pointer to the file in the output, so +that the user can open it in an external viewer. + +In fact, having a copy of all the files locally is useful. +We need to download it anyways, so we might just as well put a copy in a local +temporary directory too. +We'll let the caller specify the directory to use, so we can use the same +directory for all attachments and potentially all users. +<<[[format_submission]] args>>= +tmpdir = None, +<<[[format_submission]] doc>>= +`tmpdir` is the directory to store all the submission files. Defaults to None, +which creates a temporary directory. +<>= +tmpdir = pathlib.Path(tmpdir or tempfile.mkdtemp()) +tmpdir.mkdir(parents=True, exist_ok=True) +@ + +We'll use [[tmpdir]] as the temporary directory to store the files when we try +to convert them. +<>= +def convert_to_md(attachment: canvasapi.file.File, + tmpdir: pathlib.Path) -> str: + """ + Converts `attachment` to Markdown. Returns the Markdown string. + + Store a file version in `tmpdir`. + """ + <> + content_type = getattr(attachment, "content-type") + <> + <> + <> +<>= +contents = convert_to_md(attachment, tmpdir) +@ + +The download is simple. +<>= +outfile = tmpdir / attachment.filename +attachment.download(outfile) +@ + +If the content type is text, we can just decode it and use it in a Markdown +code block with a suitable (Markdown) content type. +This means that we can set what type of data the code block contains. +We compute this text from the content type of the attachment. +For instance, Python source code is [[text/x-python]]. +We remove the [[text/]] prefix and check if there is any [[x-]] prefix left, in +which case we remove that as well. + +Now, we want to change the content type to the format expected by Markdown to +do proper syntax highlighting. +This requires some ugly processing since one [[.py]] file might have content +type [[text/x-python]] and another [[.py]] file might have +[[text/python-script]]. +<>= +def text_to_md(content_type): + """ + Takes a text-based content type, returns Markdown code block type. + Raises ValueError if not possible. + """ + if content_type.startswith("text/"): + content_type = content_type[len("text/"):] + else: + raise ValueError(f"Not text-based content type: {content_type}") + + if content_type.startswith("x-"): + content_type = content_type[2:] + if content_type == "python-script": + content_type = "python" + + return content_type +@ + +This leaves us with the following. +The advantage of reading the content from the file is that Python will solve +the encoding for us. +Instead of using an [[if]] statement, we'll go all Python and use a +[[try-except]] block. +<>= +try: + md_type = text_to_md(content_type) + with open(outfile, "r") as f: + contents = f.read() + return f"```{md_type}\n{contents}\n```" +except ValueError: + pass +@ + +Now we'll do the same for PDF files. +We'll use [[pdf2txt]] to convert the PDF to text. +However, here we'll use an if statement. +We'll check for the content type to end with [[pdf]], that will capture also +[[x-pdf]]. +<>= +if content_type.endswith("pdf"): + try: + return subprocess.check_output(["pdf2txt", str(outfile)], + text=True) + except subprocess.CalledProcessError: pass +@ - return formatted_submission +Finally, as a last attempt, we use [[pypandoc]] to try to convert it to +Markdown. +Here we'll use Pandoc's ability to infer the file type on its own. +This means we'll have to download the attachment as a file in a temporary +location and let Pandoc convert the file to Markdown. +<>= +try: + return pypandoc.convert_file(outfile, "markdown") +except Exception as err: + return f"Cannot convert this file. " \ + f"The file is located at\n\n {outfile}\n\n" +@ + + +\subsection{Submission history} + +The history contains all the student's submissions, including the current +submission. +We want to keep track of what belongs to the different versions. +This becomes important when we rely on the [[tmpdir]] to store the files. +We don't want the files of one version overwrite the files of another. +Then we can't be sure which version we're looking at. + +To produce the history, we'll modify [[tmpdir]] to create a subdirectory for +each version. +We'll write a metadata file for each version. +Then we'll return all of those in one main metadata file too. +<>= +try: + submission_history = submission.submission_history +except AttributeError: + pass +else: + if submission_history: + versions = {} + for version, prev_submission in enumerate(submission.submission_history): + version = str(version) + version_dir = tmpdir / f"version-{version}" + + <> + <> + + if json_format: + formatted_submission.update(format_section( + "submission_history", versions, json_format=True, + md_title_level=md_title_level)) + else: + formatted_versions = "" + for version, prev_metadata in versions.items(): + formatted_versions += format_section(f"Version {version}", + prev_metadata, + md_title_level=md_title_level+"#") + formatted_submission += format_section( + "Submission history", formatted_versions, + md_title_level=md_title_level) +@ + +The [[prev_submission]] that we get is in raw JSON format from Canvas. +We'll turn it into a [[Submission]] object and add the extra assignment +attribute to it. +Now we can reuse the [[format_submission]] function to format this version's +submission metadata. + +Note that we can't pass on the [[history]] argument to [[format_submission]], +we need to let it know that it's formatting a history version in another way to +get the sectioning levels right. +We'll use an argument [[md_title_level]]. +<>= +prev_submission = canvasapi.submission.Submission( + submission._requester, prev_submission) +prev_submission.assignment = submission.assignment + +prev_metadata = format_submission(prev_submission, + tmpdir=version_dir, + json_format=json_format, + md_title_level=md_title_level+"#") + +versions[version] = prev_metadata +@ + +When we write the metadata to a file, we'll either just write a string or JSON +data. +We can use [[json.dump]] to write the JSON data to a file. +<>= +if json_format: + with open(version_dir/"metadata.json", "w") as f: + json.dump(prev_metadata, f, indent=2) +else: + with open(version_dir/"metadata.md", "w") as f: + f.write(prev_metadata) +@ + + +\section{Formatting rubrics} + +For assignments that use rubrics, we want to format those rubrics so that we +can read the results instead of just the cumulated grade. +<>= +def format_rubric(submission, json_format=False): + """ + Format the rubric assessment of the `submission` in readable form. + + If `json_format` is True, return a JSON string, otherwise Markdown. + """ + + if json_format: + result = {} + else: + result = "" + + for crit_id, rating_data in submission.rubric_assessment.items(): + criterion = get_criterion(crit_id, submission.assignment.rubric) + rating = get_rating(rating_data["rating_id"], criterion) + try: + comments = rating_data["comments"] + except KeyError: + comments = "" + + <> + + if not json_format: + result += "\n" + + return result.strip() +@ + +Sometimes Canvas is missing some data, +for instance, an individual rating. +So we add it only if it exists. +<>= +if json_format: + result[criterion["description"]] = { + "rating": rating["description"] if rating else None, + "points": rating["points"] if rating else None, + "comments": comments + } +else: + result += f"- {criterion['description']}: " + if rating: + result += f"{rating['description']} ({rating['points']})" + else: + result += "-" + result += "\n" + if comments: + result += textwrap.indent(textwrap.fill(f"- Comment: {comments}"), + " ") + result += "\n" +@ + +We can get the rating of a rubric from the rubric assessment. +We can get this data from [[submission.rubric_assessment]] and it looks like +this: +\begin{minted}{python} +{'_7957': {'rating_id': '_6397', 'comments': '', 'points': 1.0}, + '_1100': {'rating_id': '_8950', 'comments': '', 'points': 1.0}} +\end{minted} + +We get the rubric with the assignment. +So we can get it through [[submission.assignment.rubric]] and it looks like +this: +\begin{minted}{python} +[{'id': '_7957', 'points': 1.0, 'description': 'Uppfyller kraven i lydelsen', +'long_description': '', 'criterion_use_range': False, 'ratings': [{'id': +'_6397', 'points': 1.0, 'description': 'OK', 'long_description': ''}, {'id': +'_7836', 'points': 0.0, 'description': 'Påpekande', 'long_description': ''}]}, +{'id': '_1100', 'points': 1.0, 'description': 'Kan redogöra för alla detaljer', +'long_description': '', 'criterion_use_range': False, 'ratings': [{'id': +'_8950', 'points': 1.0, 'description': 'OK', 'long_description': ''}, {'id': +'_4428', 'points': 0.0, 'description': 'Påpekande', 'long_description': ''}]}] +\end{minted} +It's essentially a list of criterions. +We want to extract a criterion by ID from the rubric. +<>= +def get_criterion(criterion_id, rubric): + """Returns criterion with ID `criterion_id` from rubric `rubric`""" + for criterion in rubric: + if criterion["id"] == criterion_id: + return criterion + + return None +@ And in exactly the same fashion we want to extract a rating from the +criterion. +<>= +def get_rating(rating_id, criterion): + """Returns rating with ID `rating_id` from rubric criterion `criterion`""" + for rating in criterion["ratings"]: + if rating["id"] == rating_id: + return rating + + return None @ diff --git a/src/canvaslms/cli/users.nw b/src/canvaslms/cli/users.nw index c2d8fed..531bf9c 100644 --- a/src/canvaslms/cli/users.nw +++ b/src/canvaslms/cli/users.nw @@ -19,11 +19,13 @@ We outline the module: <>= import argparse import canvasapi.course +import canvasapi.exceptions import canvasapi.group import canvaslms.cli import canvaslms.cli.courses as courses import canvaslms.hacks.canvasapi import csv +import operator import re import sys @@ -154,34 +156,70 @@ def make_user_rows(canvas, args, roles): try: row = [] row.append(user.course.course_code) - if args.canvas_id: - row.append(user.id) - row.append(user.login_id) - if args.ladok: - row.append(user.integration_id) - if args.split_name: - lastnames, firstnames = user.sortable_name.split(", ") - row.append(firstnames.strip()) - row.append(lastnames.strip()) - else: - row.append(user.name) - if args.email: - row.append(user.email) + <> except AttributeError as err: canvaslms.cli.warn(f"skipped {user}: {err}") continue yield row -@ Note that in most cases, the [[user.login_id]] and [[user.email]] will be the +@ + +Note that in most cases, the [[user.login_id]] and [[user.email]] will be the same. However, we need both, because the students can change their email address to a non-KTH address. Also note, there are users for whom some attributes don't exist, hence we must first extract them in a try-except construction. This happens if they've been removed from the course or never registered. +<>= +if args.canvas_id: + row.append(user.id) +<> +<> +<> +<> +@ + +Now, some of those attributes are optional and we should check [[args]] whether +the user specified that an attribute should be included. +Some of the attributes might not exist, so we need to try-except them. +(This happens for instance to the [[login_id]] attribute at KTH when the +student is no longer a student with us.) +<>= +try: + row.append(user.login_id) +except AttributeError: + row.append(None) +<>= +if args.ladok: + try: + row.append(user.integration_id) + except AttributeError: + row.append(None) +<>= +if args.email: + try: + row.append(user.email) + except AttributeError: + row.append(None) +@ + +The name attribute always exists, but we must check if we want first and last +names separately or as one string. +To split them, we can use the [[sortable_name]] attribute and split on the [[, +]] following the last name and preceding the first name. +<>= +if args.split_name: + lastnames, firstnames = user.sortable_name.split(", ") + row.append(firstnames.strip()) + row.append(lastnames.strip()) +else: + row.append(user.name) +@ If we filter by groups, then we must iterate over the groups to include the group information. +Other than the group data, we include the user data as before. <>= def make_user_rows_w_groups(canvas, args, roles): """Takes a list of courses and returns a list of users in those courses, @@ -203,19 +241,7 @@ def make_user_rows_w_groups(canvas, args, roles): except AttributeError: pass row.append(group.name) - if args.canvas_id: - row.append(user.id) - row.append(user.login_id) - if args.ladok: - row.append(user.integration_id) - if args.split_name: - lastnames, firstnames = user.sortable_name.split(", ") - row.append(firstnames.strip()) - row.append(lastnames.strip()) - else: - row.append(user.name) - if args.email: - row.append(user.email) + <> except AttributeError as err: canvaslms.cli.warn(f"skipped {user}: {err}") continue @@ -235,7 +261,7 @@ def add_groups_command(subp): groups_parser = subp.add_parser("groups", help="Lists groups of a course", description="Lists groups of a course(s). Output, CSV-format: " - " <#members>") + " <#members>") groups_parser.set_defaults(func=groups_command) courses.add_course_option(groups_parser) <> @@ -273,7 +299,10 @@ else: for category in categories: for group in filter_groups([category], args.regex): - row = [group.id, category.name, group.name, group.members_count] + row = [ + category.course.course_code, category.name, + group.name, group.members_count + ] output.writerow(row) @ @@ -288,8 +317,10 @@ we can filter out groups; or filter out groups directly from courses. We want to filter out the group categories from a list of courses. <>= def filter_group_categories(course_list, regex): - """Filters out the group categories whose names match regex - in the courses in course_list""" + """ + Filters out the group categories whose names match regex + in the courses in course_list + """ name = re.compile(regex or ".*") @@ -309,9 +340,11 @@ method [[.get_groups()]], so thanks to Python's duck typing we can write short code. <>= def filter_groups(items, regex): - """Items is a list of either courses or group categories, + """ + Items is a list of either courses or group categories, regex is a regular expression. - Returns all groups whose name match regex.""" + Returns all groups whose name match regex. + """ name = re.compile(regex or ".*") @@ -341,13 +374,23 @@ We provide the following functions: First, we provide [[list_users]], which takes a list of courses and a list of Canvas roles as arguments. +Here we must be careful. +For some courses we can't list the users, because of lacking permissions. +Whenever we're searching for a particular user, we might look in all courses, +and then we must skip those where we can't list the users. +This way we can search through all other courses instead of stopping at the +first error. <>= def list_users(courses, roles): """List users in courses with roles""" users = list() for course in courses: - course_users = list(course.get_users(enrollment_type=roles)) + try: + course_users = list(course.get_users(enrollment_type=roles)) + except canvasapi.exceptions.CanvasException as err: + canvaslms.cli.warn(f"skipped {course}: {err}") + continue for user in course_users: user.course = course users.extend(course_users) @@ -357,35 +400,67 @@ def list_users(courses, roles): Second, we provide the most general function, [[filter_users]], which takes a list of courses, a list of Canvas roles and a regex as arguments. +It returns the matching users. + +We compile the regex to a pattern. +This is because it's slightly faster, since we reuse the regex for several +searches. +We'll ignore case. <>= def filter_users(course_list, regex, roles=[]): - """Filter users in courses with roles based on regex - - regex is matched on login ID, Canvas ID and name.""" - pattern = re.compile(regex or ".*") + """ + Filter users in courses with roles based on regex. `regex` is matched on + - Canvas ID (exact match), + - name (regex), + - login ID (regex), + - integration id (exact match), + - SIS user ID (exact match). + """ + pattern = re.compile(regex or ".*", re.IGNORECASE) for user in list_users(course_list, roles): - if pattern.search(user.name): - yield user - continue - - try: - if pattern.search(user.login_id): - yield user - continue - except AttributeError: - canvaslms.cli.warn(f"{user} has no login_id") - - if str(user.id) == regex: - yield user - continue + <> +@ - try: - if user.integration_id == regex: - yield user - continue - except AttributeError: - canvaslms.cli.warn(f"{user} has no integration_id") +Now to check if the user matches, we want to match some things by regular +expression, but other things exactly. +The reasoning is this: +If we use the name, we might match on parts of it; if the user has a unique +enough name. +But if we give the integration ID, we wouldn't try to give part of it; it's not +predicable how much of it will give a unique match, and it doesn't make sense +to look for students with a common prefix of their integration IDs. +Also, some attributes might not exist, so we must try-except them and issue a warning +if they don't exist. +<>= +if str(user.id) == regex: + yield user + continue + +if pattern.search(user.name): + yield user + continue + +try: + if pattern.search(user.login_id): + yield user + continue +except AttributeError: + canvaslms.cli.warn(f"{user} has no login_id") + +try: + if user.integration_id == regex: + yield user + continue +except AttributeError: + canvaslms.cli.warn(f"{user} has no integration_id") + +try: + if user.sis_user_id == regex: + yield user + continue +except AttributeError: + canvaslms.cli.warn(f"{user} has no sis_user_id") @ Now, we can define the function~[[list_students]] in terms of [[list_users]]. @@ -400,6 +475,49 @@ def list_teachers(courses): @ +\section{Getting a unique identifier}\label{UserUniqueID} + +In many cases, we want to get a unique identifier for a user. +The natural attribute would be [[login_id]]. +However, sometimes the [[login_id]] attribute doesn't exist. +So we want a function that does the following. +<>= +Takes a user object and returns a unique identifier. + +Returns one of login_id, integration_id, sis_user_id, id (Canvas ID); in that +order of preference. +@ + +This yields the following function. +Try returning the attributes in that order, try the next when failed. +<>= +def get_uid(user): + """ + <> + """ + attributes = ["login_id", "integration_id", "sis_user_id", "id"] + for attribute in attributes: + try: + <> + except AttributeError: + pass + + <> +@ + +To return the attribute, we simply fetch it. +We'll use [[attrgetter]]. +<>= +return operator.attrgetter(attribute)(user) +@ + +If no attribute existed (all failed), which should not happen, then we raise an +exception. +<>= +raise AttributeError(f"no unique user attribute existed, tried: {attributes}") +@ + + \section{Options for the command line} We provide two ways to filter out users by command-line option: @@ -416,33 +534,74 @@ which add these as mutually exclusive options. We provide the two helper functions for other modules to filter out users from a set of courses. This option requires the course option from [[canvaslms.cli.courses]]. + +The first function, [[add_user_option_wo_depends]], simply adds the user +option. +The more useful function, [[add_user_option]], will also try to add the other +required options, like the course(s) where to find users. <>= -def add_user_option_wo_depends(parser): - """Adds the -u option to argparse parser, - without adding other required options""" - user_parser = parser.add_argument_group("filter by user") - user_parser.add_argument("-u", "--user", metavar="user_regex", - required=False, default=".*", - help="Filter users on name, login ID or Canvas ID by user_regex, " - "default: '.*'") - -def add_user_option(parser): +def add_user_option_wo_depends(parser, required=False): + """ + Adds the -u option to argparse parser, without adding + other required options. + + <> + """ + help="Filter users on Canvas ID, name, login ID, integration ID, or " \ + "SIS ID by user_regex. " \ + "Integration ID and SIS ID match exactly, not by regex. " \ + "Note that for login ID, you should start with ^ and $ to avoid " \ + "matching on unintended substrings; c.f. son@institution.tld and " \ + "person@institution.tld, where the first will match both without " \ + "leading ^. The regex allows matching using ^son@, thus skipping " \ + "any domain in this case." + options = {"required": required} + if not required: + options["default"] = ".*" + help += ", default: '.*'" + else: + help += ", required: use '.*' to match all students" + + parser.add_argument("-u", "--user", metavar="user_regex", + help=help, **options) + + <> + +def add_user_option(parser, required=False): """Adds the -u option to argparse parser""" try: courses.add_course_option(parser) except argparse.ArgumentError: pass - add_user_option_wo_depends(parser) + add_user_option_wo_depends(parser, required) @ -When processing this option, we need to filter by course first. +When processing this option, we need to filter by course first, so we use the +processing from the [[courses]] module to get the list of courses matching the +courses options. +Then we simply filter all users. <>= def process_user_option(canvas, args): """Processes the user option from command line, returns a list of users""" return list(filter_users( courses.process_course_option(canvas, args), - args.user)) + args.user, + roles=args.role)) +@ + +We note that we need the roles. +Sometimes we want to be able to filter based on role. +However, most of the time, we just want the students, so we'll default to that. +<>= +The `role` option allows specifying which roles to include, for instance +students or TAs. +<>= +parser.add_argument("-r", "--role", + choices={"teacher", "student", "student_view", + "ta", "observer", "designer"}, + default="student", + help="Includes only users in this role, defaults to student.") @ \subsection{The group option} @@ -471,14 +630,18 @@ def add_group_category_option(parser): add_group_category_option_wo_depends(parser) def add_group_option_wo_depends(parser): - """Adds group filtering option to argparse parser, - without adding required options""" + """ + Adds group filtering option to argparse parser, + without adding required options + """ + try: + add_group_category_option_wo_depends(parser) + except argparse.ArgumentError: + pass - group_parser = parser.add_argument_group("filter by group") - add_group_category_option_wo_depends(group_parser) - group_parser.add_argument("-G", "--group", metavar="group_regex", + parser.add_argument("-G", "--group", metavar="group_regex", required=False, - help="Filters groups whose name match group_regex") + help="Filters user groups whose name match group_regex") def add_group_option(parser): """Adds group filtering options to argparse parser, @@ -515,15 +678,24 @@ only one option can be used at a time. The processing must return a list of users, so in case of the group option we must extract the users. <>= -def add_user_or_group_option(parser): +def add_user_or_group_option(parser, required=False): """Adds user and group options as mutually exclusive options to parser""" try: courses.add_course_option(parser) except argparse.ArgumentError: pass - add_user_option_wo_depends(parser) - add_group_option_wo_depends(parser) + parser = parser.add_mutually_exclusive_group(required=required) + + try: + add_user_option_wo_depends(parser) + except argparse.ArgumentError: + pass + + try: + add_group_option_wo_depends(parser) + except argparse.ArgumentError: + pass def process_user_or_group_option(canvas, args): """Returns a list of users, filtered either by user regex or by groups""" diff --git a/src/canvaslms/grades/.gitignore b/src/canvaslms/grades/.gitignore new file mode 100644 index 0000000..4f0fda4 --- /dev/null +++ b/src/canvaslms/grades/.gitignore @@ -0,0 +1,13 @@ +grades.tex +__init__.py +mysum.py +conjunctavg.tex +conjunctavg.py +disjunctmax.tex +disjunctmax.py +maxgradesurvey.py +maxgradesurvey.tex +conjunctavgsurvey.py +conjunctavgsurvey.tex +tilkryLAB1.py +tilkryLAB1.tex diff --git a/src/canvaslms/grades/Makefile b/src/canvaslms/grades/Makefile new file mode 100644 index 0000000..9f4332a --- /dev/null +++ b/src/canvaslms/grades/Makefile @@ -0,0 +1,44 @@ +NOTANGLEFLAGS.py= + +MODULES+= __init__.py +MODULES+= conjunctavg.py +MODULES+= disjunctmax.py +MODULES+= maxgradesurvey.py +MODULES+= conjunctavgsurvey.py +MODULES+= tilkryLAB1.py + +.PHONY: all +all: grades.tex +all: conjunctavg.tex +all: disjunctmax.tex +all: maxgradesurvey.tex +all: conjunctavgsurvey.tex +all: tilkryLAB1.tex +all: ${MODULES} + +grades.tex: conjunctavg.tex +grades.tex: disjunctmax.tex +grades.tex: maxgradesurvey.tex +grades.tex: conjunctavgsurvey.tex +grades.tex: tilkryLAB1.tex + +__init__.py: init.py + ${MV} $^ $@ + +.INTERMEDIATE: init.py +init.py: grades.nw + ${NOTANGLE.py} + + +.PHONY: clean +clean: + ${RM} grades.tex + ${RM} ${MODULES} + ${RM} init.py + + +INCLUDE_MAKEFILES=../../../makefiles +include ${INCLUDE_MAKEFILES}/tex.mk +include ${INCLUDE_MAKEFILES}/noweb.mk +include ${INCLUDE_MAKEFILES}/pkg.mk + diff --git a/src/canvaslms/grades/conjunctavg.nw b/src/canvaslms/grades/conjunctavg.nw new file mode 100644 index 0000000..e8e4d6c --- /dev/null +++ b/src/canvaslms/grades/conjunctavg.nw @@ -0,0 +1,129 @@ +\section{Conjunctive average} + +We have one requirement on the summary module: it must contain a function +[[summarize_group]] that takes two arguments; +the first being a list of assignments, +the second being a list of users. +The [[summarize_group]] function is the function that the above code will call. +This gives the following outline of the module. +<>= +""" +Module that summarizes an assignment group by conjunctive average. + +Conjunctive average means: + + 1) We need all assignments to have a non-F grade. + 2) If there are A--F assignments present, we will compute the average of + those grades. For instance; an A and a C will result in a B; an A and a B + will result in an A, but an A with two Bs will become a B (standard + rounding). +""" + +import datetime as dt +from canvaslms.cli import results +from canvasapi.exceptions import ResourceDoesNotExist + +<> + +def summarize_group(assignments_list, users_list): + """Summarizes a particular set of assignments (assignments_list) for all + users in users_list""" + + for user in users_list: + grade, grade_date, graders = summarize(user, assignments_list) + yield [user, grade, grade_date, *graders] +@ + + +\subsection{Summarizing grades: assignment grades to component grade} + +Now we will describe the [[summarize]] helper function. +We want to establish three things: the most recent date, a suitable grade and +who graded. + +For the most recent date, we add all dates to a list and then take the +maximum. +If the list is empty, we don't report any grade, but returns [[None]] for both +grade and date. + +For the grade, as we iterate through we look for P/F and A--E grades. +We can then check for Fs among the P/F grades, if we find an F the summarized +grade will be an F. +If we find no Fs, then we can compute the average over all A--E grades and use +that as the final grade. + +For who graded, we simply extract the list of graders from the submissions. +<>= +def summarize(user, assignments_list): + """Extracts user's submissions for assignments in assingments_list to + summarize results into one grade and a grade date. Summarize by conjunctive + average.""" + + pf_grades = [] + a2e_grades = [] + dates = [] + graders = [] + + for assignment in assignments_list: + try: + submission = assignment.get_submission(user, + include=["submission_history"]) + except ResourceDoesNotExist: + pf_grades.append("F") + continue + + submission.assignment = assignment + graders += results.all_graders(submission) + + grade = submission.grade + + if grade is None: + grade = "F" + + if grade in "ABCDE": + a2e_grades.append(grade) + else: + pf_grades.append(grade) + + grade_date = submission.submitted_at or submission.graded_at + + if grade_date: + grade_date = dt.date.fromisoformat(grade_date.split("T")[0]) + dates.append(grade_date) + + if all(map(lambda x: x == "P", pf_grades)): + final_grade = "P" + if a2e_grades: + final_grade = a2e_average(a2e_grades) + else: + final_grade = "F" + + if dates: + final_date = max(dates) + else: + final_date = None + final_grade = None + + return (final_grade, final_date, graders) +@ + +\subsection{Computing averages} + +To compute the average for the A--E grades; we will convert the grades into +integers, compute the average, round the value to an integer and convert back. +<>= +def a2e_average(grades): + """Takes a list of A--E grades, returns the average.""" + num_grades = map(grade_to_int, grades) + avg_grade = round(sum(num_grades)/len(grades)) + return int_to_grade(avg_grade) + +def grade_to_int(grade): + grade_map = {"E": 1, "D": 2, "C": 3, "B": 4, "A": 5} + return grade_map[grade] + +def int_to_grade(int_grade): + grade_map_inv = {1: "E", 2: "D", 3: "C", 4: "B", 5: "A"} + return grade_map_inv[int_grade] +@ + diff --git a/src/canvaslms/grades/conjunctavgsurvey.nw b/src/canvaslms/grades/conjunctavgsurvey.nw new file mode 100644 index 0000000..ed6ccf3 --- /dev/null +++ b/src/canvaslms/grades/conjunctavgsurvey.nw @@ -0,0 +1,149 @@ +\section{Conjunctive average with surveys} + +We have one requirement on the summary module: it must contain a function +[[summarize_group]] that takes two arguments; +the first being a list of assignments, +the second being a list of users. +The [[summarize_group]] function is the function that the above code will call. +This gives the following outline of the module. +<>= +""" +This module is the same as `canvaslms.grades.conjunctavg` except that any +submissions with grades other than A--F and P/F are treated as P. For instance, +numeric grades (like points). Also, all submissions must have a date. This +makes this module useful for including mandatory, ungraded surveys. +""" + +import datetime as dt +from canvaslms.grades.conjunctavg import a2e_average +from canvaslms.cli import results +from canvasapi.exceptions import ResourceDoesNotExist + +<> + +def summarize_group(assignments_list, users_list): + """Summarizes a particular set of assignments (assignments_list) for all + users in users_list""" + + for user in users_list: + grade, grade_date, graders = summarize(user, assignments_list) + yield [user, grade, grade_date, *graders] +@ + + +\subsection{Summarizing grades: assignment grades to component grade} + +Now we will describe the [[summarize]] helper function. +We want to establish two things: the most recent date and a suitable grade. + +For the most recent date, we just add them to a list as we iterate through +them. +Then we can simply take the maximum. + +For the grade, as we iterate through we look for P/F and A--E grades. +We can then check for Fs among the P/F grades, if we find an F the summarized +grade will be an F. +If we find no Fs, then we can compute the average over all A--E grades and use +that as the final grade. + +If we encounter a grade that is not A--F nor P/F, then it is assumed to be a +survey, which is treated as a P. +<>= +def summarize(user, assignments_list): + """ + Extracts user's submissions for assignments in assingments_list to summarize + results into one grade and a grade date. Summarize by conjunctive average. + + If some submission lacks date, return ("F", None). + """ + + pf_grades = [] + a2e_grades = [] + dates = [] + graders = [] + + for assignment in assignments_list: + try: + submission = assignment.get_submission(user, + include=["submission_history"]) + submission.assignment = assignment + except ResourceDoesNotExist: + pf_grades.append("F") + continue + + grade = submission.grade + graders += results.all_graders(submission) + + if grade is None: + grade = "F" + + <> + <> + + <> + <> + + <> + + return (final_grade, final_date, graders) +@ + +We look at the grade and add it to the appropriate list. +The grade Fx is treated as an F. +It's the only grade that should be more than one letter. +<>= +if grade in "ABCDE": + a2e_grades.append(grade) +elif grade in "PF": + pf_grades.append(grade) +elif grade == "Fx": + pf_grades.append("F") +else: + pf_grades.append("P") +@ + +When we check, we check that all the P/F grades are P. +If that's the case, we can compute the average of the A--E grades---if there +are any. +If there are no A--E grades, the final grade is P. +<>= +if all(map(lambda x: x == "P", pf_grades)): + final_grade = "P" + if a2e_grades: + final_grade = a2e_average(a2e_grades) +else: + final_grade = "F" +@ + +When it comes to the date, we want primarily the submission date. +If there is no submission date, we use the grade date. +(However, when we require the student to present their work, we should probably +use the grade date as that best represents the date of presenting.) +<>= +grade_date = submission.submitted_at or submission.graded_at + +if grade_date: + grade_date = dt.date.fromisoformat(grade_date.split("T")[0]) + dates.append(grade_date) +@ + +When we check the dates, we want the final date to be the most recent date. +If there are no dates, the student hasn't done anything, then we set the final +grade (and date) to [[None]] instead of F. +<>= +if dates: + final_date = max(dates) +else: + final_date = None + final_grade = None +@ + +Finally, as a check, we can check that the number of dates and number of grades +are the same. +Otherwise, they have passed everything they have done, but simply not done some +assignment. +<>= +if len(dates) < len(pf_grades) + len(a2e_grades): + final_grade = "F" +@ + diff --git a/src/canvaslms/grades/disjunctmax.nw b/src/canvaslms/grades/disjunctmax.nw new file mode 100644 index 0000000..1a51a98 --- /dev/null +++ b/src/canvaslms/grades/disjunctmax.nw @@ -0,0 +1,131 @@ +\section{Disjunctive maximum} + +In this module we want to provide the disjunctive maximum way of computing +\emph{one grade} from \emph{several assignments}. +<>= +Module that summarizes an assignment group by disjunctive maximum. + +Disjunctive maximum means: + + 1) At least one assignment must have a non-F grade. + 2) If there are more than one assignment with a non-F grade, we take the + maximum as the grade. A--E are valued higher than P. The grade F is valued + the lowest. + +We fail if there is an assignment which doesn't have A--F or P/F grading +scales. +@ + +We have one requirement on the summary module: it must contain a function +[[summarize_group]] that takes two arguments; +the first being a list of assignments, +the second being a list of users. +The [[summarize_group]] function is the function that the above code will call. +This gives the following outline of the module. +<>= +""" +<> +""" + +import datetime as dt +from canvaslms.cli import results +from canvasapi.exceptions import ResourceDoesNotExist + +<> + +def summarize_group(assignments_list, users_list): + """Summarizes a particular set of assignments (assignments_list) for all + users in users_list""" + + for user in users_list: + grade, grade_date, graders = summarize(user, assignments_list) + yield [user, grade, grade_date, *graders] +@ + + +\subsection{Summarizing grades: assignment grades to component grade}% +\label{disjunctmax-summarize} + +Now we will describe the [[summarize]] helper function. +We want to establish two things: the most recent date and a suitable grade. + +For the most recent date, we just add them to a list as we iterate through the +submissions. +We do the same for grades, as we iterate through we add any grade to a list. +In the end we compute the maximums of both lists. +<>= +def summarize(user, assignments_list): + """Extracts user's submissions for assignments in assingments_list to + summarize results into one grade and a grade date. Summarize by disjunctive + maximum.""" + + grades = [] + dates = [] + graders = [] + + for assignment in assignments_list: + try: + submission = assignment.get_submission(user, + include=["submission_history"]) + except ResourceDoesNotExist: + pf_grades.append("F") + continue + + submission.assignment = assignment + graders += results.all_graders(submission) + + grade = submission.grade + + if grade is None: + grade = "F" + + grades.append(grade) + + grade_date = submission.submitted_at or submission.graded_at + + if grade_date: + grade_date = dt.date.fromisoformat(grade_date.split("T")[0]) + dates.append(grade_date) + + if grades: + final_grade = grade_max(grades) or "F" + else: + final_grade = "F" + + if dates: + final_date = max(dates) + else: + final_date = None + final_grade = None + + return (final_grade, final_date, graders) +@ + +\subsection{Computing the maximum} + +To compute the maximum for the A--E grades; we will convert the grades into +integers, compute the maximum, and convert back. +We also include P/F here, since we can count them as lower than A--E. +<>= +def grade_max(grades): + """Takes a list of A--E/P--F grades, returns the maximum.""" + num_grades = list(map(grade_to_int, grades)) + + if num_grades: + max_grade = max(num_grades) + return int_to_grade(max_grade) + + return None + +def grade_to_int(grade): + grade_map = {"F": -2, "Fx": -1, + "P": 0, + "E": 1, "D": 2, "C": 3, "B": 4, "A": 5} + return grade_map[grade] + +def int_to_grade(int_grade): + grade_map_inv = {-2: "F", -1: "Fx", + 0: "P", + 1: "E", 2: "D", 3: "C", 4: "B", 5: "A"} + return grade_map_inv[int_grade] + diff --git a/src/canvaslms/grades/grades.nw b/src/canvaslms/grades/grades.nw new file mode 100644 index 0000000..6e5eef7 --- /dev/null +++ b/src/canvaslms/grades/grades.nw @@ -0,0 +1,137 @@ +\chapter{Computing grades from groups of assignments} +\label{summary-modules} + +This is the documentation for the \texttt{canvaslms.grades} package. +<>= +This package contains modules to summarize assignment groups in different ways. +These modules are used with the `-S` option of the `results` command. + +For a module to be used with the `canvaslms results -S module` option, the +module must fulfil the following: + + 1) It must contain a function named `summarize_group`. + 2) `summarize_group` must take two arguments: + + I) `assignment_list`, a list of `canvasapi.assignment.Assignment` + objects. These assignments all belong to the same group, \ie their + grades should be used to compute the student's final grade. + + II) `users_list`, a list of `canvasapi.user.User` objects. This is a + list of users, i.e. students, for whom to compute the grades. + + 3) The return value should be a list of lists. Each list should have the + form `[user, grade, grade date, grader 1, ..., grader N]`. + +For more details, see Chapter 11 of the `canvaslms.pdf` file found among the +release files at: + + https://github.com/dbosk/canvaslms/releases +@ + +Also see \cref{results-command} for details on the [[results]] command. + +Now, this package's init module ([[__init__.py]], refered to by +[[<>]]) only needs this: +<>= +""" +<> +""" +@ + +Let's look at a simple example module, [[<>]]. +In the [[summarize_group]] function, we extract the all the grades, the dates +and the graders and return the tuple (list) expected. +<>= +import datetime as dt +from canvaslms.cli import results +from canvasapi.exceptions import ResourceDoesNotExist + +def summarize_group(assignments, users): + """ + Summarizes the grades for all assignments into one grade for each user. + """ + for user in users: + grades = [] + dates = [] + graders = [] + + <> + + yield [user, final_grade(grades), max(dates), *graders] +@ + +We leave for the reader to imagine all the possibilities of the +[[final_grade(grades)]] function that is supposed to take the list of grades +and turn them into one final grade. +(For more elaborate and complete examples, where the [[final_grade]] function +is actually implemented, see the summarizing functions implemented in the +remaining sections of this chapter.) + +To extract the data we need, we simply iterate through all the assignments and +fetch the user's (student's) submission. + +Note that we must add the option [[include=["submission_history"]]] to be able +to extract everyone who participated in the grading, not just the last one. +This is important since the last grader might just check the parts that the +previous grader said must be fixed by the student. +So both are part of the grading. + +We must also handle the event that the submission doesn't exist. +This happens in very rare cases. +But to get correct behaviour, we must treat it as an F. +<>= +for assignment in assignments: + try: + submission = assignment.get_submission(user, + include=["submission_history"]) + except ResourceDoesNotExist: + grades.append("F") + + <> +@ + +For each submission, we extract the grades and append them to the list of +grades. +<>= +grades.append(submission.grade or "F") +@ + +Then we fetch the graders and append them to the list of graders. +The function [[all_graders]] expects the submission to have an attribute +[[.assignment]] pointing to the assignment in question. +(We want this attribute to not have to use a [[canvas]] object to resolve the +[[.assignment_id]] attribute that is there by default.) +<>= +submission.assignment = assignment +graders += results.all_graders(submission) +@ + +Finally, the date: +We should firstly use the submission date. +However, in some cases, like oral presentations, the student hasn't submitted +anything (even if they should, in case of labs). +Then there is no submission date, so we have to resort to the grading date. +<>= +date = submission.submitted_at or submission.graded_at +try: + grade_date = dt.date.fromisoformat(date.split("T")[0]) +except AttributeError: + pass +else: + dates.append(date) +@ + +To use this module we would run +\begin{center} + \texttt{canvaslms results -S mysum.py} +\end{center} +in the directory where \texttt{mysum.py} is located. +We can also give the relative or absolute path to \texttt{mysum.py} instead. + +%%% Modules %%% + +\input{../src/canvaslms/grades/conjunctavg.tex} +\input{../src/canvaslms/grades/disjunctmax.tex} +\input{../src/canvaslms/grades/maxgradesurvey.tex} +\input{../src/canvaslms/grades/conjunctavgsurvey.tex} +\input{../src/canvaslms/grades/tilkryLAB1.tex} diff --git a/src/canvaslms/grades/maxgradesurvey.nw b/src/canvaslms/grades/maxgradesurvey.nw new file mode 100644 index 0000000..692c440 --- /dev/null +++ b/src/canvaslms/grades/maxgradesurvey.nw @@ -0,0 +1,103 @@ +\section{Maximum grade, latest date with surveys} + +In this module we want to provide the disjunctive maximum way of computing +\emph{one grade} from \emph{several assignments}. +But we also want to include ungraded surveys. +<>= +Module that summarizes an assignment group by maximizing grade and date. This +module is the same as `canvaslms.grades.disjunctmax`, but also includes +ungraded surveys (for instance quiz with points, where the number of points is +ignored). Consequently all assignments must have a date. + +This function also doen't fail when there is a grade other than A--F or P/F +present. Such grades are all treated as F. +@ + +We have one requirement on the summary module: it must contain a function +[[summarize_group]] that takes two arguments; +the first being a list of assignments, +the second being a list of users. +The [[summarize_group]] function is the function that the above code will call. +This gives the following outline of the module. +<>= +""" +<> +""" + +import datetime as dt +from canvaslms.grades.disjunctmax import grade_max +from canvaslms.cli import results +from canvasapi.exceptions import ResourceDoesNotExist + +<> + +def summarize_group(assignments_list, users_list): + """Summarizes a particular set of assignments (assignments_list) for all + users in users_list""" + + for user in users_list: + grade, grade_date, graders = summarize(user, assignments_list) + yield [user, grade, grade_date, *graders] +@ + + +\subsection{Summarizing grades: assignment grades to component grade} + +Now we will describe the [[summarize]] helper function. +We want to establish three things: the most recent date, a suitable grade and +the graders. + +For the most recent date, we just add them to a list as we iterate through the +submissions. +We do the same for grades, as we iterate through we add any grade to a list. +In the end we compute the maximums of both lists. + +The key difference to the [[summarize]] function in +\cref{disjunctmax-summarize} is the translation of other grades to F. +<>= +def summarize(user, assignments_list): + """Extracts user's submissions for assignments in assingments_list to + summarize results into one grade and a grade date. Summarize by disjunctive + maximum.""" + + grades = [] + dates = [] + graders = [] + + for assignment in assignments_list: + try: + submission = assignment.get_submission(user, + include=["submission_history"]) + except ResourceDoesNotExist: + grades.append("F") + continue + + submission.assignment = assignment + graders += results.all_graders(submission) + + grade = submission.grade + + if grade is None or grade not in "ABCDEPF": + grade = "F" + + grades.append(grade) + + grade_date = submission.submitted_at or submission.graded_at + + if grade_date: + grade_date = dt.date.fromisoformat(grade_date.split("T")[0]) + dates.append(grade_date) + + if len(dates) < len(grades): + final_grade = "F" + else: + final_grade = grade_max(grades) or "F" + + if dates: + final_date = max(dates) + else: + final_date = None + final_grade = None + + return (final_grade, final_date, graders) + diff --git a/src/canvaslms/grades/tilkryLAB1.nw b/src/canvaslms/grades/tilkryLAB1.nw new file mode 100644 index 0000000..d3676fe --- /dev/null +++ b/src/canvaslms/grades/tilkryLAB1.nw @@ -0,0 +1,310 @@ +\section{The Applied Crypto course} + +In this module we'll describe a more complicated way of calculating the grades. +This is used for the LAB1 module in DD2520 Applied Crypto at KTH. + +We have the following grading criteria for the intended learning outcomes. +\begin{longtable}[]{% +>{\raggedright}p{0.25\columnwidth}% +>{\raggedright}p{0.25\columnwidth}% +>{\raggedright}p{0.25\columnwidth}% +>{\raggedright}p{0.25\columnwidth}% +} +\caption{Grading criteria for the intended learning outcomes in DD2520 Applied +Crypto at KTH.} \tabularnewline +\toprule +ILO & +E/P & +C & +A \tabularnewline +\midrule +\endhead +describe cryptographic concepts and explain their security properties, & +from simple examples & +from simple system descriptions & +from complex system descriptions \tabularnewline +\midrule +%ASSESSMENT +% +%written assignments, formative: seminar exercises +use basic terminology in computer security and cryptography correctly & +with few mistakes & +with few and only minor mistakes & +with clear and concise explanations \tabularnewline +\midrule +%ASSESSMENT +% +%written assignments, labs submissions, lab solution presentations; +%formative: seminar exercises +find and use documentation of cryptographic libraries and standards, & +enough to solve labs and cover basics for discussions and hand-ins with +some scientific resources \tabularnewline +\midrule +%ASSESSMENT +% +%written assignments, lab submissions; formative: seminar exercises +identify and categorise threats against a cryptographic IT-system at a +conceptual level, suggest appropriate countermeasures and present the +reasoning to others & +with some appropriate counter-measures and basic argumentation, with +sufficient clarity for fellow students and for teachers to understand, +with few mistakes & +with some appropriate counter-measures and basic argumentation, with +demonstrated correct understanding, with enough relevant detail and few +tangents & +with arguably most appropriate counter-measures and nuanced +argumentation, with logical and pedagogical flow and concise expression +of all (and only) relevant and correct details \tabularnewline +%ASSESSMENT +% +%written assignments, labs submissions, lab solution presentations; +%formative: seminar exercises +\bottomrule +\end{longtable} + +Then we have some mandatory and some optional assignments. +\begin{itemize} +\item Cryptanalysis of Ciphertexts +\item Optional: Cryptopals (C, B, A) +\item Implement AES (Kattis Problem) +\item Optional: AES presentation (C, A) +\item MANDATORY Seminar (pick 1/2 or 2/2): usability (Sonja) ON CAMPUS +\item MANDATORY Seminar (pick 15/2 or 16/2): Impact considerations around crypto systems (Sonja) ON CAMPUS +\item MANDATORY Design Considerations (after the impact considerations seminar) +\item MANDATORY Lab (pick 23/2 or 1/3): Introduction to ProVerif (Karl and Jesper) ON CAMPUS +\item Optional: Side channels (C, B, A) +\item Optional: Secure multi-party computation (C, B, A) +\end{itemize} +The assignments are designed in such a way that the optional assignments let +the students show that they fulfil the higher criteria. +The mandatory ones just ensure the grading criteria for E. + +Each assignment in turn has assignment-specific grading criteria to map grading +to the general grading criteria above. +The assignment-specific grading criteria are mapped to points. + +\begin{longtable}[]{% +>{\raggedright}p{0.3\columnwidth}% +>{\raggedright}p{0.2\columnwidth}% +>{\raggedright}p{0.2\columnwidth}% +>{\raggedright}p{0.2\columnwidth}% +>{\raggedright}p{0.2\columnwidth}% +} +\caption{Assignment-oriented grading criteria for the assignments in DD2520 +Applied Crypto at KTH.}\tabularnewline +\toprule +Higher LAB1 grades given E of mandatory & +Optional assignments for D & +Optional assignments for C & +Optional assignments for B & +Optional assignments for A\tabularnewline +\midrule +\endhead +\(\ceil{\sum}\) is the rounded up sum of points from optional assignments, +where \(1A=2.5, 1B=1.5, 1C=1.\) & +\(\ceil{\sum} = 1\) & +\(\ceil{\sum} = 2\) & +\(\ceil{\sum} = 3\) & +\(\sum \geq 4\)\tabularnewline +\midrule +possible instantiations & 1C & 2C or 1B & 2B or 1A or (1B+1C) & 2A or +(1A+1B) or (1A+2C)\tabularnewline +\bottomrule +\end{longtable} + +We let the mandatory assignments have a grading scale that translates to P +whenever a mandatory assignment is passed. +The optional assignments will not have any grading scale, but we'll use the +points. +This way we can simply sum up the points of the optional assignments and +translate the points to a grade using the table above. + +We should not that this translation is not perfect. +As is pointed out in by the possible instantiations above: technically one can +get an A by doing all the four optional assignments at the lowest level (\(C = +1\)). +However, for now I'm stuck with this system that I inherited with the course. + +In this module we want to provide a summarizer for this grading system. +<>= +Summarizes the assignments of LAB1 in the course DD2520 Applied Crypto at KTH. + +There are some mandatory assignments graded P/F. They must all be P. + +There are also some optional assigments. They're given scores between 0 and +2.5. The sum of the scores is used to determine the grade. If the sum is more +than 4, the grade is A. +@ + +We have one requirement on the summary module: it must contain a function +[[summarize_group]] that takes two arguments; +the first being a list of assignments, +the second being a list of users. +The [[summarize_group]] function is the function that the above code will call. +This gives the following outline of the module. +<>= +""" +<> +""" + +import datetime as dt +from canvaslms.cli import results +from canvasapi.exceptions import ResourceDoesNotExist +import logging +from math import ceil + +<> + +def summarize_group(assignments_list, users_list): + """ + Summarizes a particular set of assignments (assignments_list) for all + users in users_list + """ + + for user in users_list: + grade, grade_date, graders = summarize(user, assignments_list) + yield [user, grade, grade_date, *graders] +@ + + +\subsection{Summarizing grades: assignment grades to component grade} + +Now we will describe the [[summarize]] helper function. +We want to establish three things: the most recent date, a suitable grade and +the graders. + +For the most recent date, we just add them to a list as we iterate through the +submissions. +Then we'll pick the most recent date from the list. + +We do the same for grades, as we iterate through we add any grade to either of +two lists: [[mandatory]] or [[optional]]. +The mandatory assignments have P/F grades. +The optional assignments have grades between 0 and 2.5. +<>= +def summarize(user, assignments_list): + """ + Extracts user's submissions for assignments in assingments_list to summarize + results into one grade and a grade date. + + Summarize according to tilkry grading scheme. + """ + + mandatory = [] + optional = [] + dates = [] + graders = [] + + for assignment in assignments_list: + try: + submission = assignment.get_submission(user, + include=["submission_history"]) + except ResourceDoesNotExist: + <> + continue + + submission.assignment = assignment + graders += results.all_graders(submission) + + grade = submission.grade + + <> + + grade_date = submission.submitted_at or submission.graded_at + + if grade_date: + grade_date = dt.date.fromisoformat(grade_date.split("T")[0]) + dates.append(grade_date) + + if not all(grade == "P" for grade in mandatory): + final_grade = "F" + else: + <> + + if dates: + final_date = max(dates) + else: + final_date = None + final_grade = None + + return (final_grade, final_date, graders) +@ + +\subsection{Sorting out mandatory and optional assignments} + +If an assignment is mandatory or optional determines what to do if there is no +submission. +This means that we can't use the grade of the submission (float or P/F) to +determine if the assignment is mandatory or optional. +Based on the names listed above, the system will be that an optional assignment +has its name prefixed with \enquote{Optional:}. +(However, we'll include a few different prefixes.) +<>= +if is_optional(assignment): + <> +else: + <> +<>= +def is_optional(assignment): + assignment_name = assignment.name.casefold() + return ( + assignment_name.startswith("optional:") + or assignment_name.startswith("(optional)") + ) +@ + +If the assignment is mandatory, we'll add the grade to [[mandatory]]. +If there is no grade or submission, we'll treat it as an F. + +We also added a grade P+, for the fun of it, on one assignment. +We treat it as a normal P though. +<>= +if grade is None: + grade = "F" +elif grade == "P+": + grade = "P" +elif grade not in "PF": + logging.warning(f"Invalid grade {grade} for {user} in {assignment}, " + "using F") + grade = "F" +mandatory.append(grade) +<>= +if not is_optional(assignment): + mandatory.append("F") +@ + +If the assignment is optional, we'll add the grade to [[optional]]---if there +is any. +If there is no grade or submission, we'll just skip it. +<>= +if grade is None: + continue +try: + grade = float(grade) + optional.append(grade) +except ValueError: + logging.warning(f"Invalid grade {grade} for {user} in {assignment}, " + "skipping.") + continue +@ + +\subsection{Calculating the final grade} + +The final grade is calculated based on the sum of the optional assignments. +<>= +if sum(optional) >= 4: + final_grade = "A" +elif ceil(sum(optional)) >= 3: + final_grade = "B" +elif ceil(sum(optional)) >= 2: + final_grade = "C" +elif ceil(sum(optional)) >= 1: + final_grade = "D" +else: + final_grade = "E" +@ + +\subsection{The complete module} + +All in all, the module looks like this. +\inputminted{python}{../src/canvaslms/grades/tilkryLAB1.py} diff --git a/src/canvaslms/hacks/Makefile b/src/canvaslms/hacks/Makefile index 07ecbad..5a7a9d9 100644 --- a/src/canvaslms/hacks/Makefile +++ b/src/canvaslms/hacks/Makefile @@ -1,4 +1,3 @@ -NOWEAVEFLAGS.tex= -n -delay -t2 NOTANGLEFLAGS.py= diff --git a/src/canvaslms/hacks/canvasapi.nw b/src/canvaslms/hacks/canvasapi.nw index be417f9..fe92ea0 100644 --- a/src/canvaslms/hacks/canvasapi.nw +++ b/src/canvaslms/hacks/canvasapi.nw @@ -109,6 +109,36 @@ for module_name, module in canvasapi_modules.items(): @ +\section{Improve User's [[__str__]] method} + +By default, [[canvasapi]]'s [[User]] class defines a [[__str__]] dunder method +that uses the user's name and Canvas ID. +We want to make it more useful, by using the user's name and login ID. +<>= +def make_useful_user_dunder_str(): + """Improves the user class by changing __str__""" + <> + <> +@ + +Now, we simply need to define a function to use as a drop-in replacement for +the [[__str__]] method. +<>= +def name_and_login(self): + try: + return f"{self.name} <{self.login_id}>" + except AttributeError as err: + return f"{self.name} <>" +@ + +Then we simply need to replace the current [[__str__]] method with the new one +above. +<>= +import canvasapi.user +canvasapi.user.User.__str__ = name_and_login +@ + + \section{Cacheable Canvas objects} We would like certain methods in certain Canvas objects to be cached. @@ -491,3 +521,4 @@ else: return self.__cache.values() @ +